diff --git a/Makefile b/Makefile
index 65f91c46ae..52fde4cf41 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@ export BIN_TIMESTAMP ?=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
export TIMESTAMP ?=$(shell echo $(BIN_TIMESTAMP) | tr -d ':' | tr 'T' '-' | tr -d 'Z')
SOURCE_GIT_COMMIT_TIMESTAMP ?= $(shell TZ=UTC0 git show --quiet --date='format-local:%Y%m%d%H%M%S' --format="%cd")
-OCP_VERSION := $(shell jq -r '.release.base' ${PROJECT_DIR}/assets/release/release-$(shell uname -i).json)
+include Makefile.version.$(shell uname -i).var
MICROSHIFT_VERSION ?= $(subst -clean,,$(shell echo '${OCP_VERSION}-${SOURCE_GIT_COMMIT_TIMESTAMP}-${SOURCE_GIT_COMMIT}-${SOURCE_GIT_TREE_STATE}'))
# Overload SOURCE_GIT_TAG value set in vendor/github.com/openshift/build-machinery-go/make/lib/golang.mk
@@ -160,9 +160,18 @@ _build_local:
+@GOOS=$(GOOS) GOARCH=$(GOARCH) $(MAKE) --no-print-directory build \
GO_BUILD_PACKAGES:=./cmd/microshift \
GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/$(GOOS)_$(GOARCH)
- +@GOOS=$(GOOS) GOARCH=$(GOARCH) $(MAKE) -C etcd --no-print-directory build \
- GO_BUILD_PACKAGES:=./cmd/microshift-etcd \
- GO_BUILD_BINDIR:=../$(CROSS_BUILD_BINDIR)/$(GOOS)_$(GOARCH)
+ +@GOOS=$(GOOS) GOARCH=$(GOARCH) \
+ GO_LD_FLAGS="$(GC_FLAGS) -ldflags \"\
+ -X main.majorFromGit=$(MAJOR) \
+ -X main.minorFromGit=$(MINOR) \
+ -X main.versionFromGit=$(EMBEDDED_GIT_TAG) \
+ -X main.commitFromGit=$(EMBEDDED_GIT_COMMIT) \
+ -X main.gitTreeState=$(EMBEDDED_GIT_TREE_STATE) \
+ -X main.buildDate=$(BIN_TIMESTAMP) \
+ $(LD_FLAGS)\"" \
+ $(MAKE) -C etcd --no-print-directory build \
+ GO_BUILD_PACKAGES:=./cmd/microshift-etcd \
+ GO_BUILD_BINDIR:=../$(CROSS_BUILD_BINDIR)/$(GOOS)_$(GOARCH)
cross-build-linux-amd64:
+$(MAKE) _build_local GOOS=linux GOARCH=amd64
@@ -231,8 +240,8 @@ bin/lichen: bin vendor/modules.txt
vendor:
go mod vendor
- for p in $(wildcard scripts/auto-rebase/rebase_patches/*.patch); do \
+ for p in $(sort $(wildcard scripts/auto-rebase/rebase_patches/*.patch)); do \
echo "Applying patch $$p"; \
- git mailinfo /dev/null /dev/stderr 2<&1- < $$p | git apply || exit 1; \
+ git mailinfo /dev/null /dev/stderr 2<&1- < $$p | git apply --reject || exit 1; \
done
.PHONY: vendor
diff --git a/Makefile.kube_git.var b/Makefile.kube_git.var
index 567df82f01..b1a2ba54ad 100644
--- a/Makefile.kube_git.var
+++ b/Makefile.kube_git.var
@@ -1,5 +1,5 @@
KUBE_GIT_MAJOR=1
KUBE_GIT_MINOR=26
KUBE_GIT_VERSION=v1.26.0
-KUBE_GIT_COMMIT=89232647de67ea787d339b1bd7c780a0ed97f3f9
+KUBE_GIT_COMMIT=379cd9f22597a7a7f6ea57471f590c1abf01ce92
KUBE_GIT_TREE_STATE=clean
diff --git a/Makefile.version.aarch64.var b/Makefile.version.aarch64.var
new file mode 100644
index 0000000000..a837955acd
--- /dev/null
+++ b/Makefile.version.aarch64.var
@@ -0,0 +1 @@
+OCP_VERSION := 4.13.0-0.nightly-arm64-2023-04-27-232704
diff --git a/Makefile.version.x86_64.var b/Makefile.version.x86_64.var
new file mode 100644
index 0000000000..7041b90272
--- /dev/null
+++ b/Makefile.version.x86_64.var
@@ -0,0 +1 @@
+OCP_VERSION := 4.13.0-0.nightly-2023-04-21-084440
diff --git a/README.md b/README.md
index 79c980279b..bd87d3a505 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ scale testing, and provisioning of lightweight Kubernetes control planes.
To run MicroShift, the minimum system requirements are:
- x86_64 or aarch64 CPU architecture
-- Red Hat Enterprise Linux 8 with Extended Update Support (8.6 or later)
+- Red Hat Enterprise Linux 9 with Extended Update Support (9.2 or later)
- 2 CPU cores
- 2GB of RAM
- 2GB of free system root storage for MicroShift and its container images
@@ -34,7 +34,7 @@ Depending on user workload requirements, it may be necessary to add more resourc
performance, disk space in a root partition for container images, an LVM group for container storage, etc.
## Deploying MicroShift on Edge Devices
-For production deployments, MicroShift can be run on bare metal hardware or hypervisors supported and certified for the Red Hat Enterprise Linux 8 operating system.
+For production deployments, MicroShift can be run on bare metal hardware or hypervisors supported and certified for the Red Hat Enterprise Linux 9 operating system.
- [Edge systems certified for Red Hat Enterprise Linux](https://catalog.redhat.com/hardware/search?c_catalog_channel=Edge%20System&p=1)
- [Hypervisors certified for Red Hat Enterprise Linux](https://access.redhat.com/solutions/certified-hypervisors)
@@ -75,4 +75,4 @@ Community documentation sources are managed at > "${TEMP_FILE}"
+ echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$?
done
done
+ if [[ $rc -ne 0 ]]; then
+ sleep 60 & wait
+ continue
+ fi
+
# TODO: Update /etc/hosts atomically to avoid any inconsistent behavior
# Replace /etc/hosts with our modified version if needed
diff --git a/assets/components/openshift-dns/node-resolver/update-node-resolver.sh b/assets/components/openshift-dns/node-resolver/update-node-resolver.sh
index aedc87198a..327718fef7 100644
--- a/assets/components/openshift-dns/node-resolver/update-node-resolver.sh
+++ b/assets/components/openshift-dns/node-resolver/update-node-resolver.sh
@@ -48,11 +48,17 @@ while true; do
fi
# Append resolver entries for services
+ rc=0
for svc in "${!svc_ips[@]}"; do
for ip in ${svc_ips[${svc}]}; do
- echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}"
+ echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$?
done
done
+ if [[ $rc -ne 0 ]]; then
+ sleep 60 & wait
+ continue
+ fi
+
# TODO: Update /etc/hosts atomically to avoid any inconsistent behavior
# Replace /etc/hosts with our modified version if needed
diff --git a/assets/components/ovn/master/daemonset.yaml b/assets/components/ovn/master/daemonset.yaml
index df8858802e..ed0675fe29 100644
--- a/assets/components/ovn/master/daemonset.yaml
+++ b/assets/components/ovn/master/daemonset.yaml
@@ -44,7 +44,7 @@ spec:
containers:
# ovn-northd: convert network objects in nbdb to flows in sbdb
- name: northd
- image: {{ .ReleaseImage.ovn_kubernetes_microshift }}
+ image: {{ .ReleaseImage.ovn_kubernetes_microshift_rhel_9 }}
command:
- /bin/bash
- -c
@@ -97,7 +97,7 @@ spec:
# nbdb: the northbound, or logical network object DB. In raft mode
- name: nbdb
- image: {{ .ReleaseImage.ovn_kubernetes_microshift }}
+ image: {{ .ReleaseImage.ovn_kubernetes_microshift_rhel_9 }}
command:
- /bin/bash
- -c
@@ -223,7 +223,7 @@ spec:
# sbdb: The southbound, or flow DB. In raft mode
- name: sbdb
- image: {{ .ReleaseImage.ovn_kubernetes_microshift }}
+ image: {{ .ReleaseImage.ovn_kubernetes_microshift_rhel_9 }}
command:
- /bin/bash
- -c
@@ -315,7 +315,7 @@ spec:
# ovnkube master: convert kubernetes objects in to nbdb logical network components
- name: ovnkube-master
- image: {{ .ReleaseImage.ovn_kubernetes_microshift }}
+ image: {{ .ReleaseImage.ovn_kubernetes_microshift_rhel_9 }}
command:
- /bin/bash
- -c
diff --git a/assets/components/ovn/node/daemonset.yaml b/assets/components/ovn/node/daemonset.yaml
index 7e7cf95137..3b7862df36 100644
--- a/assets/components/ovn/node/daemonset.yaml
+++ b/assets/components/ovn/node/daemonset.yaml
@@ -40,7 +40,7 @@ spec:
containers:
# ovn-controller: programs the vswitch with flows from the sbdb
- name: ovn-controller
- image: {{ .ReleaseImage.ovn_kubernetes_microshift }}
+ image: {{ .ReleaseImage.ovn_kubernetes_microshift_rhel_9 }}
command:
- /bin/bash
- -c
diff --git a/assets/controllers/kube-controller-manager/defaultconfig.yaml b/assets/controllers/kube-controller-manager/defaultconfig.yaml
index 7aca755b62..efceaa3c61 100644
--- a/assets/controllers/kube-controller-manager/defaultconfig.yaml
+++ b/assets/controllers/kube-controller-manager/defaultconfig.yaml
@@ -14,9 +14,9 @@ extendedArguments:
leader-elect-retry-period:
- "3s"
leader-elect-resource-lock:
- - "configmapsleases"
+ - "leases"
leader-elect-renew-deadline:
- - "12s" # Increase api call timeout value from default 5s to 6s, required in case primary dns server fail.
+ - "12s" # Increase api call timeout value from default 5s to 6s, required in case primary dns server fail.
controllers:
- "*"
- "-ttl" # TODO: this is excluded in kube-core, but not in #21092
diff --git a/assets/release/release-aarch64.json b/assets/release/release-aarch64.json
index 33d8bfd1a7..881bc2f9d1 100644
--- a/assets/release/release-aarch64.json
+++ b/assets/release/release-aarch64.json
@@ -1,16 +1,16 @@
{
"release": {
- "base": "4.13.0-0.nightly-arm64-2023-02-28-171639"
+ "base": "4.13.0-0.nightly-arm64-2023-04-27-232704"
},
"images": {
- "cli": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:14b1c70aa990218a79493b3807b7d1be7db5270bb7d514b7b124bf3b1680e908",
- "coredns": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2b9c82e4386ea324425c877ec0f71ea4716592e2cec4a6e2d8400a8d4d81b5ae",
- "haproxy-router": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:919e017776264b4909b0eb390ce95fa8b93e8e9f3932c906b6e013a371a41174",
- "kube-rbac-proxy": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0c5d03660ff04bdae29c3616992438fb8d5fcd1a8a52ca870cdeb6265d12f36",
+ "cli": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:25b1bb086a4eee276897911075a9034ddbf44d2ccce39c3c4b79cc59bfb6f226",
+ "coredns": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12a7b9e5d6bb21667dcf8e87435f32e3249e8c519793b6e7909e61cd3878c47c",
+ "haproxy-router": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c74f1267e481b9d2838288a0c4c0d56d7326be3791d0de8fe5cb0c30bc83527e",
+ "kube-rbac-proxy": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0b21d44f9b053592278a87f4dcd42ce35de5e66a4a0cbeb8e92cbe5f521285a2",
"openssl": "registry.access.redhat.com/ubi8/openssl@sha256:9e743d947be073808f7f1750a791a3dbd81e694e37161e8c6c6057c2c342d671",
- "ovn-kubernetes-microshift": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad6a1b1a01f928dad3ed9b1d1288c4d5e665868c1713ca54c64ff21ebe4fb8ca",
- "pod": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0a8792c061b5c99e62960592f77d5f97dcce8606017c18dd6988c9b24939c30f",
- "service-ca-operator": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f191e9462d8c54bca1951ed188562e4343b3e70f5ac7f15fa64c3a0ec179e3a1",
+ "ovn-kubernetes-microshift-rhel-9": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:537ea561cc852b5e4967ea59478aaa2d721f4860725c11e635ff38ec9feb90fc",
+ "pod": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d2a89b27563e268eb3c5a16ff555fba061a9904abe5d65f5a39312e816a01776",
+ "service-ca-operator": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a89e4c39eeba38f2f48f8d40fd6cfbf3d2c407d1f6d9ea00be9e1456524e488a",
"topolvm_csi": "registry.redhat.io/lvms4/topolvm-rhel8@sha256:10bffded5317da9de6c45ba74f0bb10e0a08ddb2bfef23b11ac61287a37f10a1",
"topolvm_csi_registrar": "registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:a4319ff7c736ca9fe20500dc3e5862d6bb446f2428ea2eadfb5f042195f4f860",
"topolvm_csi_livenessprobe": "registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:9df24be671271f5ea9414bfd08e58bc2fa3dc4bc68075002f3db0fd020b58be0",
diff --git a/assets/release/release-x86_64.json b/assets/release/release-x86_64.json
index fe6d178d09..a68053faf9 100644
--- a/assets/release/release-x86_64.json
+++ b/assets/release/release-x86_64.json
@@ -1,16 +1,16 @@
{
"release": {
- "base": "4.13.0-0.nightly-2023-02-27-101545"
+ "base": "4.13.0-0.nightly-2023-04-21-084440"
},
"images": {
- "cli": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9af4bd001d30fd00d89c5f199b970b820240d0959312c2fb8ea82597c8da24bb",
- "coredns": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e7a3cf3a4e3a19c02b7cb7136fb96466b04f12f7a0176d1b8778e6991ce55e70",
- "haproxy-router": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a3883746d3a051fc71aafee3c7b958dd65f8c367fdb127dc398ad912dd802ade",
- "kube-rbac-proxy": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:eef3d1894656818ad393df61d3713115dce777e113b781c1d01bc285ee56ca2c",
+ "cli": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:800f0bb464dc9d622c3a670e503bee267670395c9bea0fb6247737b6f826ba7d",
+ "coredns": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ef20b93c7bad79e4fa20cecaf85af5a897342aefd133b5d2c693d74a4813df2c",
+ "haproxy-router": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5460207dedbfc16cc26527f5fc7ccc8143242b1d4ca329476441cce3672a992b",
+ "kube-rbac-proxy": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e2b2c89aedaa44964e4cf003ef94963da2e773ace08e601592078adefa482b52",
"openssl": "registry.access.redhat.com/ubi8/openssl@sha256:9e743d947be073808f7f1750a791a3dbd81e694e37161e8c6c6057c2c342d671",
- "ovn-kubernetes-microshift": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5ab6561dbe5a00a9b96e1c29818d8376c8e871e6757875c9cf7f48e333425065",
- "pod": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8863d0268479214bd3835fe0135e94780e13a15ba00afe55e168346da825628a",
- "service-ca-operator": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:91e31c0e8171d92d991322419c860cde1d4126fa927be2c453fbe14aa22743f7",
+ "ovn-kubernetes-microshift-rhel-9": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8266e902ee5402689563a9bf6d623d39ede6dca9263407612618440d39fbe2",
+ "pod": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9c3c1e09925601bb101aca93ffbf55d49999f55d9952578f5aa45c309cd05c58",
+ "service-ca-operator": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ba8c66a65d8a7d32c7d6aec772d0cc88f65bf54b16664cfedf8e068c00689a4",
"topolvm_csi": "registry.redhat.io/lvms4/topolvm-rhel8@sha256:10bffded5317da9de6c45ba74f0bb10e0a08ddb2bfef23b11ac61287a37f10a1",
"topolvm_csi_registrar": "registry.redhat.io/openshift4/ose-csi-node-driver-registrar@sha256:a4319ff7c736ca9fe20500dc3e5862d6bb446f2428ea2eadfb5f042195f4f860",
"topolvm_csi_livenessprobe": "registry.redhat.io/openshift4/ose-csi-livenessprobe@sha256:9df24be671271f5ea9414bfd08e58bc2fa3dc4bc68075002f3db0fd020b58be0",
diff --git a/docs/config/busybox_running_check.sh b/docs/config/busybox_running_check.sh
new file mode 100644
index 0000000000..9522b99f04
--- /dev/null
+++ b/docs/config/busybox_running_check.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+set -e
+
+SCRIPT_NAME=$(basename $0)
+PODS_NS_LIST=(busybox)
+PODS_CT_LIST=(1 )
+
+# Source the MicroShift health check functions library
+source /usr/share/microshift/functions/greenboot.sh
+
+# Set the exit handler to log the exit status
+trap 'script_exit' EXIT
+
+# The script exit handler logging the FAILURE or FINISHED message depending
+# on the exit status of the last command
+#
+# args: None
+# return: None
+function script_exit() {
+ [ "$?" -ne 0 ] && status=FAILURE || status=FINISHED
+ echo $status
+}
+
+#
+# Main
+#
+
+# Exit if the current user is not 'root'
+if [ $(id -u) -ne 0 ] ; then
+ echo "The '${SCRIPT_NAME}' script must be run with the 'root' user privileges"
+ exit 1
+fi
+
+echo "STARTED"
+
+# Exit if the MicroShift service is not enabled
+if [ $(systemctl is-enabled microshift.service 2>/dev/null) != "enabled" ] ; then
+ echo "MicroShift service is not enabled. Exiting..."
+ exit 0
+fi
+
+# Set the wait timeout for the current check based on the boot counter
+WAIT_TIMEOUT_SECS=$(get_wait_timeout)
+
+# Wait for pod images to be downloaded
+for i in ${!PODS_NS_LIST[@]}; do
+ CHECK_PODS_NS=${PODS_NS_LIST[$i]}
+
+ echo "Waiting ${WAIT_TIMEOUT_SECS}s for pod image(s) from the '${CHECK_PODS_NS}' namespace to be downloaded"
+ wait_for ${WAIT_TIMEOUT_SECS} namespace_images_downloaded
+done
+
+# Wait for pods to enter ready state
+for i in ${!PODS_NS_LIST[@]}; do
+ CHECK_PODS_NS=${PODS_NS_LIST[$i]}
+ CHECK_PODS_CT=${PODS_CT_LIST[$i]}
+
+ echo "Waiting ${WAIT_TIMEOUT_SECS}s for ${CHECK_PODS_CT} pod(s) from the '${CHECK_PODS_NS}' namespace to be in 'Ready' state"
+ wait_for ${WAIT_TIMEOUT_SECS} namespace_pods_ready
+done
+
+# Verify that pods are not restarting
+for i in ${!PODS_NS_LIST[@]}; do
+ CHECK_PODS_NS=${PODS_NS_LIST[$i]}
+
+ echo "Checking pod restart count in the '${CHECK_PODS_NS}' namespace"
+ namespace_pods_not_restarting ${CHECK_PODS_NS}
+done
diff --git a/docs/config/microshift-starter.ks b/docs/config/microshift-starter.ks
index 3697a1e453..2589f49644 100644
--- a/docs/config/microshift-starter.ks
+++ b/docs/config/microshift-starter.ks
@@ -5,7 +5,7 @@ text
reboot
# Configure network to use DHCP and activate on boot
-network --bootproto=dhcp --device=link --activate --onboot=on --hostname=microshift-starter.local --noipv6
+network --bootproto=dhcp --device=link --activate --onboot=on --hostname=microshift-starter --noipv6
# Partition disk with a 1GB boot XFS partition and a 10GB LVM volume containing system root
# The remainder of the volume will be used by the CSI driver for storing data
@@ -53,9 +53,12 @@ if ! subscription-manager status >& /dev/null ; then
fi
# Configure systemd journal service to persist logs between boots and limit their size to 1G
-mkdir -p /var/log/journal/
-sed -i 's/.*Storage=.*/Storage=auto/g' /etc/systemd/journald.conf
-sed -i 's/.*SystemMaxUse=.*/SystemMaxUse=1G/g' /etc/systemd/journald.conf
-sed -i 's/.*RuntimeMaxUse=.*/RuntimeMaxUse=1G/g' /etc/systemd/journald.conf
+sudo mkdir -p /etc/systemd/journald.conf.d
+cat </dev/null
+[Journal]
+Storage=persistent
+SystemMaxUse=1G
+RuntimeMaxUse=1G
+EOF
%end
diff --git a/docs/default_csi_plugin.md b/docs/default_csi_plugin.md
index c8ff733034..4bd2682f62 100644
--- a/docs/default_csi_plugin.md
+++ b/docs/default_csi_plugin.md
@@ -3,7 +3,7 @@
> **IMPORTANT!** The default LVMS configuration is intended to match the developer environment described in [MicroShift Development Environment](./devenv_setup.md). See section **[Configuring LVMS](#Configuring-LVMS)** for guidance on configuring LVMS for your environment.
MicroShift enables dynamic storage provisioning out of the box with the LVMS CSI plugin. This plugin is a downstream
-Red Hat fork of TopoLVM. This provisioner will create a new LVM logical volume in the `rhel` volume group for each
+Red Hat build of TopoLVM. This provisioner will create a new LVM logical volume in the `rhel` volume group for each
PersistenVolumeClaim(PVC), and make these volumes available to pods. For more information on LVMS, visit the repo's
[README](https://github.com/red-hat-storage/topolvm).
@@ -31,20 +31,22 @@ Full documentation of the config spec can be found at [github.com/red-hat-storag
#### Path
-The user provided lvmd config should be written to the same directory as the MicroShift config. If a MicroShift config
-doesn't exist, MicroShift will assume default lvmd values. These paths will be checked for the config, depending on the user MicroShift
-is run as.
-
-1. User config dir: `~/.microshift/lvmd.yaml`
-2. Global config dir: `/etc/microshift/lvmd.yaml`
+The user provided lvmd config should be written to the same directory as the MicroShift config. If an lvmd configuration file
+does not exist in `/etc/microshift/lvmd.yaml`, MicroShift will use default values.
## System Requirements
-### Volume Group Name
+### Default Volume Group
+
+If there is only one volume group on the system, LVMS uses it by
+default. If there are multiple volume groups, and no configuration
+file, LVMS looks for a volume group named `microshift`. If there is no
+volume group named `microshift`, LVMS is disabled.
-The default integration of LVMS assumes a volume-group named `rhel`. LVMS's node-controller expects that volume
-group to exist prior to launching the service. If the volume group does not exist, the node-controller will fail to
-start and enter a CrashLoopBackoff state.
+LVMS expects all volume groups to exist prior to launching the
+service. If LVMS is configured to use a volume group that does not
+exist, the node-controller Pod will fail and enter a CrashLoopBackoff
+state.
### Volume Size Increments
diff --git a/docs/devenv_cloud.md b/docs/devenv_cloud.md
index 533acdfe63..1be2ad41fd 100644
--- a/docs/devenv_cloud.md
+++ b/docs/devenv_cloud.md
@@ -49,8 +49,7 @@ Proceed by creating a virtual instance using the following parameters.
* Use a descriptive name prefixed by your user name if the account is shared with others (i.e. `myuser-ushift-dev`)
* Select the Red Hat Linux operating system family
* Select the `64-bit (x86)` or `64 bit (Arm)` architecture
-* Select Red Hat Enterprise Linux 8 (HVM) operating system.
-> Do not attempt to use RHEL 9 as it is not supported for the development environment. The operating system will be upgraded to the 8.7 version in the configuration stage.
+* Select Red Hat Enterprise Linux 9 (HVM) operating system.
* Select the instance type depending on your architecture of choice:
* e.g. `c5.metal` for `x86_64`
* e.g. `c6g.metal` for `aarch64`
@@ -60,7 +59,7 @@ Proceed by creating a virtual instance using the following parameters.
* 90 GiB root volume on `gp3` storage type
* 10 GiB data volume on `gp3` storage type
-Review your selections and lauch the cloud instance.
+Review your selections and lauch the cloud instance.
> Unless noted otherwise, all further configuration should be performed on the cloud instance created in this section.
### Other Cloud Environments
@@ -84,7 +83,7 @@ sudo useradd -m -s /bin/bash -p "$(openssl passwd -1 ${PASSWORD})" microshift
echo -e 'microshift\tALL=(ALL)\tNOPASSWD: ALL' | sudo tee /etc/sudoers.d/microshift
sudo sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/g' /etc/ssh/sshd_config
-sudo systemctl reload sshd.service
+sudo systemctl reload sshd.service
```
> Choose a strong password and make sure you configure the security group to limit inbound connections on the SSH port 22 **only** from your IP address.
@@ -132,7 +131,7 @@ Follow the instructions in the [Create Virtual Machine](./devenv_setup_auto.md#c
```bash
./scripts/devenv-builder/create-vm.sh microshift-bench \
/var/lib/libvirt/images \
- /var/lib/libvirt/images/rhel-8.7-$(uname -i)-dvd.iso \
+ /var/lib/libvirt/images/rhel-baseos-9.*-$(uname -i)-dvd.iso \
2 2 10 0 1
```
@@ -140,7 +139,7 @@ Follow the instructions in the [Create Virtual Machine](./devenv_setup_auto.md#c
### Virtual Machine Management
-To manage a virtual machine remotely, it is possible to use `virsh` command line or `Cockpit` Web interface running on the MicroShift development host.
+To manage a virtual machine remotely, it is possible to use `virsh` command line or `Cockpit` Web interface running on the MicroShift development host.
However, it is recommended to use the Virtual Machine Manager application to connect to the host remotely via the `File > Add Connection` menu option.
diff --git a/docs/devenv_setup.md b/docs/devenv_setup.md
index a06a60dc98..dfbc0ff0d5 100644
--- a/docs/devenv_setup.md
+++ b/docs/devenv_setup.md
@@ -4,7 +4,7 @@ It is recommended to review the current document and use the automation instruct
## Create Development Virtual Machine
Start by downloading one of the supported boot images for the `x86_64` or `aarch64` architecture:
-* RHEL 9.1 from https://developers.redhat.com/products/rhel/download
+* RHEL 9.2 from https://developers.redhat.com/products/rhel/download
* CentOS 9 Stream from https://www.centos.org/download
### Creating VM
@@ -41,7 +41,7 @@ In the OS installation wizard, set the following options:
- Select "Installation Destination"
- Under "Storage Configuration" sub-section, select "Custom" radial button
- Select "Done" to open a window for configuring partitions
- - Under "New Red Hat Enterprise Linux 8.x Installation", click "Click here to create them automatically"
+ - Under "New Red Hat Enterprise Linux 9.x Installation", click "Click here to create them automatically"
- Select the root partition (`/`)
- On the right side of the menu, set "Desired Capacity" to `40 GiB`
- On the right side of the menu, verify the volume group is `rhel`.
@@ -50,7 +50,7 @@ In the OS installation wizard, set the following options:
- Click "Done" button.
- At the "Summary of Changes" window, select "Accept Changes"
-- Connect network card and set the hostname (i.e. `microshift-dev.localdomain`)
+- Connect network card and set the hostname (i.e. `microshift-dev`)
- Register the system with Red Hat using your credentials (toggle off Red Hat Insights connection)
- In the Software Selection, select Minimal Install base environment and toggle on Headless Management to enable Cockpit
@@ -66,19 +66,6 @@ sudo dnf clean all -y
sudo dnf update -y
sudo dnf install -y git cockpit make golang selinux-policy-devel rpm-build jq bash-completion
sudo systemctl enable --now cockpit.socket
-
-# Install go1.19
-# This is installed into different location (/usr/local/bin/go) from dnf installed Go (/usr/bin/go) so it doesn't conflict
-# /usr/local/bin is before /usr/bin in $PATH so newer one is picked up
-GO_VER=1.19.4
-GO_ARCH=$([ "$(uname -i)" == "x86_64" ] && echo "amd64" || echo "arm64")
-curl -L -o "go${GO_VER}.linux-${GO_ARCH}.tar.gz" "https://go.dev/dl/go${GO_VER}.linux-${GO_ARCH}.tar.gz" &&
- sudo rm -rf "/usr/local/go${GO_VER}" && \
- sudo mkdir -p "/usr/local/go${GO_VER}" && \
- sudo tar -C "/usr/local/go${GO_VER}" -xzf "go${GO_VER}.linux-${GO_ARCH}.tar.gz" --strip-components 1 && \
- sudo rm -rfv /usr/local/bin/{go,gofmt} && \
- sudo ln --symbolic /usr/local/go${GO_VER}/bin/{go,gofmt} /usr/local/bin/ && \
- rm -rfv "go${GO_VER}.linux-${GO_ARCH}.tar.gz"
```
You should now be able to access the VM Cockpit console using `https://:9090` URL.
@@ -107,7 +94,7 @@ make srpm
The artifacts of the build are located in the `_output/rpmbuild` directory.
```bash
-$ cd ~/microshift/_output/rpmbuild && find . -name \*.rpm
+$ cd ~/microshift/_output/rpmbuild && find . -name \*.rpm
./RPMS/x86_64/microshift-4.13.0_0.nightly_2023_01_17_152326_20230124054037_b67f6bc3_dirty-1.el8.x86_64.rpm
./RPMS/x86_64/microshift-networking-4.13.0_0.nightly_2023_01_17_152326_20230124054037_b67f6bc3_dirty-1.el8.x86_64.rpm
./RPMS/noarch/microshift-release-info-4.13.0_0.nightly_2023_01_17_152326_20230124054037_b67f6bc3_dirty-1.el8.noarch.rpm
@@ -128,7 +115,7 @@ Enable the repositories required for installing MicroShift dependencies.
RHEL
-When working with MicroShift based on a pre-release _minor_ version `Y` of OpenShift, the corresponding RPM repository `rhocp-4.$Y-for-rhel-8-$ARCH-rpms` may not be available yet. In that case, use the `Y-1` released version or a `Y-beta` version from the public `https://mirror.openshift.com/pub/openshift-v4/$ARCH/dependencies/rpms/` OpenShift mirror repository.
+When working with MicroShift based on a pre-release _minor_ version `Y` of OpenShift, the corresponding RPM repository `rhocp-4.$Y-for-rhel-9-$ARCH-rpms` may not be available yet. In that case, use the `Y-1` released version or a `Y-beta` version from the public `https://mirror.openshift.com/pub/openshift-v4/$ARCH/dependencies/rpms/` OpenShift mirror repository.
```bash
OSVERSION=$(awk -F: '{print $5}' /etc/system-release-cpe)
@@ -232,7 +219,7 @@ Examine the `/tmp/microshift.log` log file to ensure a successful startup.
> An alternative way of running MicroShift is to update `/usr/bin/microshift` file and restart the service. The logs would then be accessible by running the `journalctl -xu microshift` command.
> ```bash
-> sudo cp -f ~/microshift/_output_/bin/microshift /usr/bin/microshift
+> sudo cp -f ~/microshift/_output/bin/microshift /usr/bin/microshift
> sudo systemctl restart microshift
> ```
@@ -314,7 +301,7 @@ To view all the available profiles, run `oc get --raw /debug/pprof`.
The following error message may be encountered when enabling the OpenShift RPM repositories.
```
-Error: 'fast-datapath-for-rhel-8-x86_64-rpms' does not match a valid repository ID.
+Error: 'fast-datapath-for-rhel-9-x86_64-rpms' does not match a valid repository ID.
Use "subscription-manager repos --list" to see valid repositories.
```
@@ -325,18 +312,18 @@ $ sudo subscription-manager repos --list-enabled
+----------------------------------------------------------+
Available Repositories in /etc/yum.repos.d/redhat.repo
+----------------------------------------------------------+
-Repo ID: rhel-8-for-x86_64-appstream-rpms
-Repo Name: Red Hat Enterprise Linux 8 for x86_64 - AppStream (RPMs)
-Repo URL: https://cdn.redhat.com/content/dist/rhel8/$releasever/x86_64/appstream/os
+Repo ID: rhel-9-for-x86_64-baseos-rpms
+Repo Name: Red Hat Enterprise Linux 9 for x86_64 - BaseOS (RPMs)
+Repo URL: https://cdn.redhat.com/content/dist/rhel9/$releasever/x86_64/baseos/os
Enabled: 1
-Repo ID: fast-datapath-for-rhel-8-x86_64-rpms
-Repo Name: Fast Datapath for RHEL 8 x86_64 (RPMs)
-Repo URL: https://cdn.redhat.com/content/dist/layered/rhel8/x86_64/fast-datapath/os
+Repo ID: fast-datapath-for-rhel-9-x86_64-rpms
+Repo Name: Fast Datapath for RHEL 9 x86_64 (RPMs)
+Repo URL: https://cdn.redhat.com/content/dist/layered/rhel9/x86_64/fast-datapath/os
Enabled: 1
-Repo ID: rhel-8-for-x86_64-baseos-rpms
-Repo Name: Red Hat Enterprise Linux 8 for x86_64 - BaseOS (RPMs)
-Repo URL: https://cdn.redhat.com/content/dist/rhel8/$releasever/x86_64/baseos/os
+Repo ID: rhel-9-for-x86_64-appstream-rpms
+Repo Name: Red Hat Enterprise Linux 9 for x86_64 - AppStream (RPMs)
+Repo URL: https://cdn.redhat.com/content/dist/rhel9/$releasever/x86_64/appstream/os
Enabled: 1
```
diff --git a/docs/greenboot.md b/docs/greenboot.md
index 80c4a876fd..c0285c40d9 100644
--- a/docs/greenboot.md
+++ b/docs/greenboot.md
@@ -4,7 +4,7 @@
Serviceability of Edge Devices is often limited or non-existent, which makes it
challenging to troubleshoot device problems following a failed software or
-operating system upgrade.
+operating system upgrade.
To mitigate these problems, MicroShift uses [greenboot](https://github.com/fedora-iot/greenboot),
the Generic Health Check Framework for `systemd` on `rpm-ostree` based systems.
@@ -105,8 +105,11 @@ and setting limits on the maximal journal data size.
Run the following commands to configure the journal data persistency and limits.
```bash
-sudo mkdir -p /var/log/journal/
-sudo sed -i 's/.*Storage=.*/Storage=auto/g' /etc/systemd/journald.conf
-sudo sed -i 's/.*SystemMaxUse=.*/SystemMaxUse=1G/g' /etc/systemd/journald.conf
-sudo sed -i 's/.*RuntimeMaxUse=.*/RuntimeMaxUse=1G/g' /etc/systemd/journald.conf
+sudo mkdir -p /etc/systemd/journald.conf.d
+cat </dev/null
+[Journal]
+Storage=persistent
+SystemMaxUse=1G
+RuntimeMaxUse=1G
+EOF
```
diff --git a/docs/greenboot_dev.md b/docs/greenboot_dev.md
index 3eda4dc1ad..75a6fc64ec 100644
--- a/docs/greenboot_dev.md
+++ b/docs/greenboot_dev.md
@@ -3,10 +3,68 @@
## Motivation
[Integrating MicroShift with Greenboot](./greenboot.md) allows for automatic
-software upgrade rollbacks in case of a failure. The current document describes
-a few techniques for simulating software upgrade failures in a development
-environment. These guidelines can be used by developers for implementing CI/CD
-pipelines testing MicroShift integration with Greenboot.
+software upgrade rollbacks in case of a failure.
+
+The current document describes a few techniques for:
+* Adding user workload health check procedures in a production environment
+* Simulating software upgrade failures in a development environment
+
+These guidelines can be used by developers for implementing user workload
+health check using Greenboot facilities, as well as simulating failures for
+testing MicroShift integration with Greenboot in CI/CD pipelines.
+
+## User Workload Health
+
+### Installation
+
+Follow the instructions in [Auto-applying Manifests](./howto_config.md#auto-applying-manifests)
+section to install a dummy user workload, without restarting the MicroShift service
+at this time.
+
+Proceed by creating a health check script in the `/etc/greenboot/check/required.d`
+directory.
+> The name prefix of the user script should be chosen to make sure it runs after
+> the `40_microshift_running_check.sh` script, which implements the MicroShift
+> health check procedure for its core services.
+
+```
+SCRIPT_FILE=/etc/greenboot/check/required.d/50_busybox_running_check.sh
+sudo curl -s https://raw.githubusercontent.com/openshift/microshift/main/docs/config/busybox_running_check.sh \
+ -o ${SCRIPT_FILE} && echo SUCCESS || echo ERROR
+sudo chmod 755 ${SCRIPT_FILE}
+```
+
+### Testing
+
+Reboot the system and run the following command to examine the output of the
+Greenboot health checks. Note that the MicroShift core service health checks
+are running before the user workload health checks.
+
+```bash
+sudo journalctl -o cat -u greenboot-healthcheck.service
+```
+
+### Health Check Implementation
+
+The script utilizes the MicroShift health check functions that are available
+in the `/usr/share/microshift/functions/greenboot.sh` file to reuse procedures
+already implemented for the MicroShift core services. These functions need a
+definition of the user workload namespaces and the expected count of pods.
+
+```bash
+PODS_NS_LIST=(busybox)
+PODS_CT_LIST=(1 )
+```
+
+The script starts by running sanity checks to verify that it is executed from
+the `root` account and that the MicroShift service is enabled.
+
+Finally, the MicroShift health check functions are called to perform the
+following actions:
+- Get a wait timeout of the current boot cycle for the `wait_for` function
+- Call the `namespace_images_downloaded` function to wait until pod images are available
+- Call the `namespace_pods_ready` function to wait until pods are ready
+- Call the `namespace_pods_not_restarting` function to verify pods are not restarting
## MicroShift Service Failure
@@ -51,13 +109,13 @@ previous state. Use the `rpm-ostree` command to verify the current deployment.
$ rpm-ostree status
State: idle
Deployments:
- edge:rhel/8/x86_64/edge
- Version: 8.7 (2022-12-26T10:28:32Z)
+ edge:rhel/9/x86_64/edge
+ Version: 9.1 (2022-12-26T10:28:32Z)
Diff: 1 removed
RemovedBasePackages: hostname 3.20-6.el8
-* edge:rhel/8/x86_64/edge
- Version: 8.7 (2022-12-26T10:28:32Z)
+* edge:rhel/9/x86_64/edge
+ Version: 9.1 (2022-12-26T10:28:32Z)
```
Finish by checking that all MicroShift pods run normally and cleaning up
@@ -69,7 +127,7 @@ sudo rpm-ostree cleanup -b -r
## MicroShift Pod Failure
-To simulate a situation with the MicroShift pod failure after an upgrade,
+To simulate a situation with the MicroShift pod failure after an upgrade,
one can set the `network.serviceNetwork` MicroShift configuration option to a
non-default `10.66.0.0/16` value without resetting the MicroShift data at the
`/var/lib/microshift` directory.
@@ -139,8 +197,8 @@ previous state. Use the `rpm-ostree` command to verify the current deployment.
$ rpm-ostree status
State: idle
Deployments:
-* edge:rhel/8/x86_64/edge
- Version: 8.7 (2022-12-28T16:50:54Z)
+* edge:rhel/9/x86_64/edge
+ Version: 9.1 (2022-12-28T16:50:54Z)
edge:eae8486a204bd72eb56ac35ca9c911a46aff3c68e83855f377ae36a3ea4e87ef
Timestamp: 2022-12-29T14:44:48Z
diff --git a/docs/howto_config.md b/docs/howto_config.md
index 5d5f63d637..36b85feabc 100644
--- a/docs/howto_config.md
+++ b/docs/howto_config.md
@@ -20,6 +20,8 @@ apiServer:
- ""
debugging:
logLevel: ""
+etcd:
+ memoryLimitMB: 0
```
## Default Settings
@@ -42,6 +44,8 @@ apiServer:
subjectAltNames: []
debugging:
logLevel: "Normal"
+etcd:
+ memoryLimitMB: 0
```
## Service NodePort range
@@ -78,6 +82,14 @@ List of ports that you must avoid:
| 10259/tcp | kube scheduler
|---------------|-----------------------------------------------------------------|
+## Etcd Memory Limit
+
+By default, etcd will be allowed to use as much memory as it needs to handle the load on the system; however, in memory constrained systems, it may be preferred or necessary to limit the amount of memory etcd is allowed to use at a given time.
+
+Setting the `memoryLimitMB` to a value greater than 0 will result in a soft memory limit being applied to etcd; etcd will be allowed to go over this value during operation, but memory will be more aggresively reclaimed from it if it does. A value of `128` megabytes is the configuration floor - attempting to set the limit below 128 megabytes will result in the configuration being 128 megabytes.
+
+Please note that values close to the floor may be more likely to impact etcd performance - the memory limit is a trade-off of memory footprint and etcd performance. The lower the limit, the more time etcd will spend on paging memory to disk and will take longer to respond to queries or even timing requests out if the limit is low and the etcd usage is high.
+
# Auto-applying Manifests
MicroShift leverages `kustomize` for Kubernetes-native templating and declarative management of resource objects. Upon start-up, it searches `/etc/microshift/manifests` and `/usr/lib/microshift/manifests` directories for a `kustomization.yaml` file. If it finds one, it automatically runs `kubectl apply -k` command to apply that manifest.
diff --git a/docs/howto_firewall.md b/docs/howto_firewall.md
index ce4fb70dd3..7b63339fcc 100644
--- a/docs/howto_firewall.md
+++ b/docs/howto_firewall.md
@@ -8,6 +8,7 @@ It is mandatory to allow MicroShift pods the access to the internal CoreDNS and
|:-------------|:---------------------|:-----------|
|10.42.0.0/16 |Yes |Pod network access to other pods |
|10.43.0.0/16 |No |ClusterIP service network, used by pods to access services (like CoreDNS and MicroShift API) |
+|10.44.0.0/32 |No |Next available subnet from ClusterIP service network, used by pods to access apiserver |
|169.254.169.1 |Yes |Special IP to access services backed by host endpoints, like MicroShift API Server |
The following ports are optional and they should be considered for MicroShift if a firewall is enabled.
diff --git a/docs/network/host_networking.md b/docs/network/host_networking.md
index 8b8372c1b7..2ca054ccd6 100644
--- a/docs/network/host_networking.md
+++ b/docs/network/host_networking.md
@@ -34,6 +34,8 @@ Not all IP addresses are attached to specific physical interfaces, some of them
**NOTE:** These IPs shall be reserved for MicroShift.
+**NOTE:** There is another special IP reserved for MicroShift's apiserver. In order to allow external access using host IPs a new local IP must be allocated for the apiserver. It defaults to the first IP in the next available subnet from the service CIDR. If service CIDR is 10.43.0.0/16, then the new IP will be 10.44.0.0/32. This default IP is added to the loopback interface to allow connectivity without ovnk.
+
## Interfaces
The following physical network interfaces are created or modified by ovn-kubernetes:
diff --git a/docs/network/ovn_kubernetes_traffic_flows.md b/docs/network/ovn_kubernetes_traffic_flows.md
index 5ed0efcd59..5a63fac5f4 100644
--- a/docs/network/ovn_kubernetes_traffic_flows.md
+++ b/docs/network/ovn_kubernetes_traffic_flows.md
@@ -29,7 +29,7 @@ Below is the node/service/pod information used for the traffic flow examples in
```text
(host)$ oc get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE
-microshift-dev Ready control-plane,master,worker 6d6h v1.25.0 192.168.122.14 Red Hat Enterprise Linux 8.6 (Ootpa)
+microshift-dev Ready control-plane,master,worker 6d6h v1.26.0 192.168.122.14 Red Hat Enterprise Linux 9.1 (Plow)
(host)$ oc get pods -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
@@ -242,12 +242,12 @@ What traffic uses this flow?
Example: from `pod-1` to kubernetes APIServer service `10.43.0.1:443`.
-|step|where |srcIP:port |dstIP:port |comment |
-|:---|:------------------------|:---------------|:-------------------|:---------------------------------------------------------------------------------------|
-|1 |ovn node switch |10.42.0.12 |10.43.0.1:443 |a loadbalancer rule DNAT’s the service IP 10.43.0.1:443 to endpoint 192.168.122.14:6443 |
-|2 |ovn cluster router |10.42.0.12 |192.168.122.14:6443 |a router policy rule routes the packet to ovn-k8s-mp0 via ovn node switch |
-|3 |ovn node switch |10.42.0.12 |192.168.122.14:6443 |forward to ovn-k8s-mp0 |
-|4 |ovn-k8s-mp0 |10.42.0.12 |192.168.122.14:6443 |packet is received by the host |
+|step|where |srcIP:port |dstIP:port |comment |
+|:---|:------------------------|:---------------|:--------------|:----------------------------------------------------------------------------------|
+|1 |ovn node switch |10.42.0.12 |10.43.0.1:443 |a loadbalancer rule DNAT’s the service IP 10.43.0.1:443 to endpoint 10.44.0.0:6443 |
+|2 |ovn cluster router |10.42.0.12 |10.44.0.0:6443 |a router policy rule routes the packet to ovn-k8s-mp0 via ovn node switch |
+|3 |ovn node switch |10.42.0.12 |10.44.0.0:6443 |forward to ovn-k8s-mp0 |
+|4 |ovn-k8s-mp0 |10.42.0.12 |10.44.0.0:6443 |packet is received by the host |
Step 1, load balancer in ovn node switch:
@@ -265,7 +265,7 @@ name : "Service_default/kubernetes_TCP_node_switch_microshift-dev
options : {event="false", reject="true", skip_snat="false"}
protocol : tcp
selection_fields : []
-vips : {"10.43.0.1:443"="192.168.122.14:6443"}
+vips : {"10.43.0.1:443"="10.44.0.0:6443"}
```
Step 2, static policy in ovn cluster router:
@@ -344,15 +344,13 @@ What traffic uses this flow?
Example: from host `192.168.122.14` to kubernetes APIServer service `10.43.0.1:443`.
-|step|where |srcIP:port |dstIP:port |comment |
-|:---|:------------------------|:---------------|:-------------------|:------------------------------------------------------------------------------------|
-|1 |br-ex |192.168.122.14 |10.43.0.1:443 |a static openflow rule SNAT's the node IP to 169.254.169.2 |
-|2 |ovn gateway router |169.254.169.2 |10.43.0.1:443 |a loadbalancer rule DNAT’s the service IP 10.43.0.1:43 to endpoint 169.254.169.2:6443|
-|3 |ovn gateway router |169.254.169.2 |169.254.169.2:6443 |a static route sets nexthop to the node gateway 192.168.122.1 |
-|4 |ovn gateway router |169.254.169.2 |169.254.169.2:6443 |a router port rule SNAT’s 169.254.169.2 to the node IP address 192.168.122.14 |
-|5 |br-ex |192.168.122.14 |169.254.169.2:6443 |a static openflow rule DNAT's the 169.254.169.2 to 192.168.122.14 |
-|6 |br-ex |192.168.122.14 |192.168.122.14:6443 |a static openflow rule SNAT's the node IP to 169.254.169.1 |
-|7 |br-ex |169.254.169.1 |192.168.122.14:6443 |forward to br-ex |
+|step|where |srcIP:port |dstIP:port |comment |
+|:---|:------------------------|:---------------|:-------------------|:---------------------------------------------------------------------------------|
+|1 |br-ex |192.168.122.14 |10.43.0.1:443 |a static openflow rule SNAT's the node IP to 169.254.169.2 |
+|2 |ovn gateway router |169.254.169.2 |10.43.0.1:443 |a loadbalancer rule DNAT’s the service IP 10.43.0.1:43 to endpoint 10.44.0.0:6443 |
+|3 |ovn gateway router |169.254.169.2 |10.44.0.0:6443 |a static route sets nexthop to the node gateway 192.168.122.1 |
+|4 |ovn gateway router |169.254.169.2 |10.44.0.0:6443 |a router port rule SNAT’s 169.254.169.2 to the node IP address 192.168.122.14 |
+|5 |br-ex |192.168.122.14 |10.44.0.0:6443 |forward to loopback interface |
Step 1, static openflow rule in br-ex:
@@ -370,18 +368,18 @@ c531f4a6-170e-4a4a-9d73-4363e3c1e609 (ovn_cluster_router)
(northd)$ ovn-nbctl lr-lb-list GR_microshift-dev
UUID LB PROTO VIP IPs
-be7decc8-1a90-464a-8d10-10b3752955e4 Service_default/ tcp 10.43.0.1:443 169.254.169.2:6443
+be7decc8-1a90-464a-8d10-10b3752955e4 Service_default/ tcp 10.43.0.1:443 10.44.0.0:6443
(northd)$ ovn-nbctl list load_balancer be7decc8-1a90-464a-8d10-10b3752955e4
_uuid : be7decc8-1a90-464a-8d10-10b3752955e4
external_ids : {"k8s.ovn.org/kind"=Service, "k8s.ovn.org/owner"="default/kubernetes"}
health_check : []
ip_port_mappings : {}
-name : "Service_default/kubernetes_TCP_node_router_microshift-dev"
-options : {event="false", reject="true", skip_snat="false"}
+name : "Service_default/kubernetes_TCP_node_router+switch_microshift-dev"
+options : {event="false", hairpin_snat_ip="169.254.169.5 fd69::5", neighbor_responder=none, reject="true", skip_snat="false"}
protocol : tcp
selection_fields : []
-vips : {"10.43.0.1:443"="169.254.169.2:6443"}
+vips : {"10.43.0.1:443"="10.44.0.0:6443"}
```
Step 3, static route in ovn gateway router:
@@ -390,6 +388,8 @@ Step 3, static route in ovn gateway router:
(northd)$ ovn-nbctl lr-route-list GR_microshift-dev
IPv4 Routes
Route Table :
+ 169.254.169.0/29 169.254.169.4 dst-ip rtoe-GR_microshift-dev
+ 10.42.0.0/16 100.64.0.1 dst-ip
0.0.0.0/0 192.168.122.1 dst-ip rtoe-GR_microshift-dev
```
@@ -411,14 +411,7 @@ Step 5, static openflow rule in br-ex:
```text
(host)$ ovs-appctl bridge/dump-flows br-ex
-duration=538658s, n_packets=90, n_bytes=12756, priority=500,ip,in_port=2,nw_src=192.168.122.14,nw_dst=169.254.169.2,actions=ct(commit,table=4,zone=64001,nat(dst=192.168.122.14))
-```
-
-Step 6, static openflow rule in br-ex:
-
-```text
-(host)$ ovs-appctl bridge/dump-flows br-ex
-table_id=4, duration=522559s, n_packets=90, n_bytes=12756, ip,actions=ct(commit,table=3,zone=64002,nat(src=169.254.169.1))
+duration=538658s, n_packets=90, n_bytes=12756, priority=500,ip,in_port=2,nw_src=192.168.122.14,nw_dst=10.44.0.0,actions=ct(commit,table=4,zone=64001)
```
### pod to nodePortService
diff --git a/docs/openshift_ci.md b/docs/openshift_ci.md
index 89db4b72ab..c348a73c95 100644
--- a/docs/openshift_ci.md
+++ b/docs/openshift_ci.md
@@ -31,8 +31,6 @@ MicroShift generates a set of kubeconfig files in default configuration:
# tree /var/lib/microshift/resources/kubeadmin/
/var/lib/microshift/resources/kubeadmin/
├── kubeconfig
-├── localhost
-│ └── kubeconfig
├── microshift-dev
│ └── kubeconfig
└── microshift-dev.localdomain
@@ -40,7 +38,7 @@ MicroShift generates a set of kubeconfig files in default configuration:
3 directories, 4 files
```
-Using default configuration there is a kubeconfig for each of the subject alternative names, localhost, and the one at the root directory which is using the cluster URL. If cluster URL is not using localhost then all these files are not generated.
+Using default configuration there is a kubeconfig for each of the subject alternative names and the one at the root directory which is using the cluster URL, which defaults to localhost.
Having a DNS (or simply changing `/etc/hosts`) we have to select which of the kubeconfig files we need to use according to it. In this case we may copy the `microshift-dev` kubeconfig to our local environment and we will be able to use `oc`:
```
diff --git a/docs/rhel4edge_iso.md b/docs/rhel4edge_iso.md
index a537f2b72c..f91dc711b8 100644
--- a/docs/rhel4edge_iso.md
+++ b/docs/rhel4edge_iso.md
@@ -132,6 +132,17 @@ auth_file_path = "/etc/osbuild-worker/pull-secret.json"
EOF
```
+> **NOTE**
+> Embedding container images in the generated ISO requires the functionality from the latest version of the `osbuild` and `osbuild-composer` packages.
+> This functionality will be available in the future releases of the RHEL 9 operating system.
+
+To install the necessary functionality, run the following command to upgrade your system with the up-to-date software from the `copr` repository.
+```bash
+~/microshift/hack/osbuild2copr.sh copr
+```
+
+> If necessary, rerun the `hack/osbuild2copr.sh` script with the `appstream` argument to revert to the standard `osbuild` and `osbuild-composer` packages.
+
Proceed by running the build script with the `-embed_containers` argument to include the dependent container images into the generated ISO.
```bash
~/microshift/scripts/image-builder/build.sh -pull_secret_file ~/.pull-secret.json -embed_containers
@@ -220,12 +231,17 @@ sudo virsh net-autostart isolated
Follow the instruction in the [Install MicroShift for Edge](#install-microshift-for-edge) section to install a new virtual machine using the `isolated` network configuration.
> When running the `virt-install` command, specify the `--network network=isolated,model=virtio` option to select the `isolated` network configuration.
-After the virtual machine is created, log into the system and verify that the Internet is not accessible.
+After the virtual machine is created, log into the system using the Virtual Machine Manager console and verify that the Internet is not accessible.
```bash
$ curl -I redhat.com
curl: (6) Could not resolve host: redhat.com
```
+> **NOTE**
+> It may be more convenient to connect to the virtual machine using its serial console.
+> * Run the `sudo systemctl enable --now serial-getty@ttyS0.service` command on the virtual machine to enable the serial console service.
+> * Run the `sudo virsh console microshift-edge` command on the hypervisor to connect to the serial console.
+
Make sure that `CRI-O` has access to all the container images required by MicroShift.
```bash
$ sudo crictl images
diff --git a/etcd/cmd/microshift-etcd/run.go b/etcd/cmd/microshift-etcd/run.go
index 451403a0ae..0ea60ef6ec 100644
--- a/etcd/cmd/microshift-etcd/run.go
+++ b/etcd/cmd/microshift-etcd/run.go
@@ -106,6 +106,10 @@ func (s *EtcdService) configure(cfg *config.MicroshiftConfig) {
}
func (s *EtcdService) Run() error {
+ if os.Geteuid() > 0 {
+ klog.Fatalf("microshift-etcd must be run privileged")
+ }
+
e, err := etcd.StartEtcd(s.etcdCfg)
if err != nil {
return fmt.Errorf("microshift-etcd failed to start: %v", err)
@@ -186,6 +190,9 @@ func setURL(hostnames []string, port string) []url.URL {
return urls
}
+// The following 'fragemented' logic is copied from the Openshift Cluster Etcd Operator.
+//
+// https://github.com/openshift/cluster-etcd-operator/blob/0584b0d1c8868535baf889d8c199f605aef4a3ae/pkg/operator/defragcontroller/defragcontroller.go#L282
func isBackendFragmented(b backend.Backend, maxFragmentedPercentage float64, minDefragBytes int64) bool {
fragmentedPercentage := checkFragmentationPercentage(b.Size(), b.SizeInUse())
if fragmentedPercentage > 0.00 {
diff --git a/etcd/go.mod b/etcd/go.mod
index c4a7f7b8fe..4ea17a5b60 100644
--- a/etcd/go.mod
+++ b/etcd/go.mod
@@ -23,43 +23,12 @@ require (
)
require (
- github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/coreos/go-oidc v2.1.0+incompatible // indirect
- github.com/docker/distribution v2.8.1+incompatible // indirect
- github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/cel-go v0.12.6 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
- github.com/moby/sys/mountinfo v0.6.2 // indirect
- github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
- github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/selinux v1.10.0 // indirect
- github.com/openshift/library-go v0.0.0-20230130232623-47904dd9ff5a // indirect
- github.com/pquerna/cachecontrol v0.1.0 // indirect
- github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/stoewer/go-strcase v1.2.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
- go.opentelemetry.io/otel/metric v0.31.0 // indirect
- golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
- golang.org/x/tools v0.2.0 // indirect
- gopkg.in/square/go-jose.v2 v2.2.2 // indirect
- k8s.io/apiserver v0.26.1 // indirect
- k8s.io/cloud-provider v0.0.0 // indirect
- k8s.io/cluster-bootstrap v0.0.0 // indirect
- k8s.io/component-helpers v0.26.1 // indirect
- k8s.io/kms v0.26.1 // indirect
- k8s.io/kubelet v0.0.0 // indirect
- k8s.io/kubernetes v1.26.1 // indirect
- k8s.io/mount-utils v0.0.0 // indirect
- k8s.io/pod-security-admission v0.25.0 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
)
require (
@@ -142,11 +111,11 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
- golang.org/x/net v0.5.0 // indirect
+ golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
- golang.org/x/sys v0.4.0 // indirect
- golang.org/x/term v0.4.0 // indirect
- golang.org/x/text v0.6.0 // indirect
+ golang.org/x/sys v0.6.0 // indirect
+ golang.org/x/term v0.6.0 // indirect
+ golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
@@ -168,39 +137,39 @@ require (
replace (
github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.4.1-0.20221214150008-e73634cb3870 // from kubernetes
- go.etcd.io/etcd/api/v3 => github.com/openshift/etcd/api/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- go.etcd.io/etcd/client/pkg/v3 => github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- go.etcd.io/etcd/client/v3 => github.com/openshift/etcd/client/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- go.etcd.io/etcd/pkg/v3 => github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- go.etcd.io/etcd/raft/v3 => github.com/openshift/etcd/raft/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- go.etcd.io/etcd/server/v3 => github.com/openshift/etcd/server/v3 v3.5.1-0.20230125165349-13c18c444a8c // from etcd
- k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230223163248-89232647de67 // from kubernetes
- k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kms => github.com/openshift/kubernetes/staging/src/k8s.io/kms v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230223163248-89232647de67 // from kubernetes
- k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20230223163248-89232647de67 // staging kubernetes
- k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20230223163248-89232647de67 // staging kubernetes
+ go.etcd.io/etcd/api/v3 => github.com/openshift/etcd/api/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ go.etcd.io/etcd/client/pkg/v3 => github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ go.etcd.io/etcd/client/v3 => github.com/openshift/etcd/client/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ go.etcd.io/etcd/pkg/v3 => github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ go.etcd.io/etcd/raft/v3 => github.com/openshift/etcd/raft/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ go.etcd.io/etcd/server/v3 => github.com/openshift/etcd/server/v3 v3.5.1-0.20230322155524-f70da9d78221 // from etcd
+ k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230417131116-379cd9f22597 // from kubernetes
+ k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kms => github.com/openshift/kubernetes/staging/src/k8s.io/kms v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230417131116-379cd9f22597 // from kubernetes
+ k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
+ k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20230417131116-379cd9f22597 // staging kubernetes
)
diff --git a/etcd/go.sum b/etcd/go.sum
index c31c3580b3..28ce71045a 100644
--- a/etcd/go.sum
+++ b/etcd/go.sum
@@ -40,8 +40,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
-github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
-github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -49,8 +47,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -100,8 +96,6 @@ github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOi
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@@ -118,8 +112,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
-github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -140,11 +132,7 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -191,7 +179,6 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -222,8 +209,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
-github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -353,8 +338,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
-github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -370,60 +353,36 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
-github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
-github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
-github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/openshift/build-machinery-go v0.0.0-20220913142420-e25cf57ea46d h1:RR4ah7FfaPR1WePizm0jlrsbmPu91xQZnAsVVreQV1k=
github.com/openshift/build-machinery-go v0.0.0-20220913142420-e25cf57ea46d/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/etcd/api/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:V681KQQqIHNe46Cx+EIbw04kBeK73fVmjU17/bLSd48=
-github.com/openshift/etcd/api/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
-github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:CznQ70pOj1YwdXyPprLgAILh4TZPcI8IDML6OOXhg2o=
-github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
-github.com/openshift/etcd/client/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:+1seo/rJ92p92SGlTNJshSvmfY497KLj35J4MzDulJA=
-github.com/openshift/etcd/client/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
-github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:qvRmgvbB+XYn4xlw8hPDNJpecPuQRcHaQmcggIn3uOI=
-github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k=
-github.com/openshift/etcd/raft/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:F2zw47hfDpS5Q+bSD8zN3MCssTDibKsD2nOAqHxINc0=
-github.com/openshift/etcd/raft/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
-github.com/openshift/etcd/server/v3 v3.5.1-0.20230125165349-13c18c444a8c h1:dlpgsW1KSYbbI8pV2T1n38SvXGbxsJwwpHqJ8NHqH4w=
-github.com/openshift/etcd/server/v3 v3.5.1-0.20230125165349-13c18c444a8c/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo=
-github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230223163248-89232647de67 h1:jodZbyQh9GPxPR97CE430QxolCRK0mk7oZ70sulRxgI=
-github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230223163248-89232647de67/go.mod h1:SVFZVmcfr/QnDkKqmBvlVlhurEsml57BUNDH+R/CPqk=
-github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230223163248-89232647de67 h1:kO4mM0qG9nkleOgVM+P8j++CWIigGdBkaiOKAjeHxMs=
-github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230223163248-89232647de67/go.mod h1:fcaZu5DOMCvnjHsoJrKV5iQLvaiOTv50bj8okFxwYFw=
-github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20230223163248-89232647de67 h1:g8TIpCLp5/kZi6BBGDnY4Wqaw0Rj49QEX9RINciIeD8=
-github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20230223163248-89232647de67/go.mod h1:CO72fYg4kCwaSLKgV6oMT9N1k5jhe8ZfxN1Rv7i9aL0=
-github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230223163248-89232647de67 h1:B9MD7YTmV1H6YCyH/xQMBOt92xUvEUr2AIqSPNBKOW0=
-github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230223163248-89232647de67/go.mod h1:salqUyIbJbHU1a9StwrX18tkxIoZejk8BQB9aYNkIPk=
-github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230223163248-89232647de67 h1:XBWzd4WZ5WWElSF5NQIAc0amOIOPdF/1OLERO1fKwjg=
-github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230223163248-89232647de67/go.mod h1:g5iGehpfJ/sjrrPZotg2ndVBM1qrl/xlKDEC6CJwoRo=
-github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20230223163248-89232647de67 h1:oI+hUXmAZs5/DhIfBpD2IRZfR47eiKkjYaibFMja+44=
-github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20230223163248-89232647de67/go.mod h1:y/nn+aWz2wp+2mklRHKwQb5C36SqiS4mCgw2/C1HRpM=
-github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20230223163248-89232647de67 h1:eni4EViEO+I0Kfxyq0NW0C5AxQjGFKfakFsN1TJU1n4=
-github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20230223163248-89232647de67/go.mod h1:pfu0i4DFeQXwdlNs9TQpFEUK+8y4I3HSCzqbmQDobJ8=
-github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230223163248-89232647de67 h1:3o/vN/o1/BX0pSXnzxSBFAfs8xO+7YZkfA5x28p1TDI=
-github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230223163248-89232647de67/go.mod h1:+K4RuDOLxtVWiPZfUefwPoYThb9BEmWYp5zId0KFgU0=
-github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20230223163248-89232647de67 h1:4RoVjs10TFjwM1hKjMt1mRN/oVInSh6SfAqPLLUG1wM=
-github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20230223163248-89232647de67/go.mod h1:IrwH1whPmwqMMBuqU2Gkb2cdmdqTg5zpbYsPC9KF8hY=
-github.com/openshift/kubernetes/staging/src/k8s.io/kms v0.0.0-20230223163248-89232647de67 h1:ClQHFBI/h6g75rl75DYTd87lm3cqkWCcLz7ge5lpTRQ=
-github.com/openshift/kubernetes/staging/src/k8s.io/kms v0.0.0-20230223163248-89232647de67/go.mod h1:NXsaH8tQ1U2J3w6IW8hNaC4ZQFJvkC2lVoXOXZQ8ii0=
-github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230223163248-89232647de67 h1:bsS++odMO5hvu7njKc3sTt+QqR9mYszQSzvN+uaLLgQ=
-github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230223163248-89232647de67/go.mod h1:6p383itqHHhUKvZADAr+LDVXOJ4ZWo0tBKVxTjqc9UY=
-github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20230223163248-89232647de67 h1:9bgKIabZ59hgFivSWZb+0kPm/MbFcGD3f9JhF7HIbyw=
-github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20230223163248-89232647de67/go.mod h1:7VSG6sD9u3yCsNAz3+TYBN6oNXmBmfX32o9piXgdUHQ=
-github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20230223163248-89232647de67 h1:vmW1Jb6zsFOuYbCfDsMN5dDS2b6AI+4tjv5wCIaAXvY=
-github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20230223163248-89232647de67/go.mod h1:n49VMDfRrPr3RdbpZRjo4yIF65v6ryFoeflGeAFItyc=
-github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20230223163248-89232647de67 h1:Bc5AtKJhmjgGUF4Q5NX3LEP2HmU6EnnzvB0o5jnhEA4=
-github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20230223163248-89232647de67/go.mod h1:y8fO9+mZJPj9OK26w+MwxW4kUf+bJh1CHsPYvRjwYoA=
-github.com/openshift/library-go v0.0.0-20230130232623-47904dd9ff5a h1:OzF7I7mAzO4SBo5eO5CWoCTgMDydN/Tf2/Rq8YbMpT0=
-github.com/openshift/library-go v0.0.0-20230130232623-47904dd9ff5a/go.mod h1:xO4nAf0qa56dgvEJWVD1WuwSJ8JWPU1TYLBQrlutWnE=
+github.com/openshift/etcd/api/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:6iHSb9/2IO+KNysJe8f6uaOlgOlottjpWdNmqU1fOD0=
+github.com/openshift/etcd/api/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
+github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:GSVuFn24PRFJiteHhfZRuzHIuknW6YYF1GQbpFdiRwo=
+github.com/openshift/etcd/client/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
+github.com/openshift/etcd/client/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:QobW0Rx7CQCRtKrdx96mzeQ6dFr/bt9w7PUXSRX5rFw=
+github.com/openshift/etcd/client/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
+github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:t4Ou0Z3dAToKlmoi/e7M3a/NIakbPmrxufgUeg5rrcA=
+github.com/openshift/etcd/pkg/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k=
+github.com/openshift/etcd/raft/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:Sjmgmr4KVEXb1Z0Vv0yiq4uD33lz/FxzhVhz/8XCJ7Q=
+github.com/openshift/etcd/raft/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
+github.com/openshift/etcd/server/v3 v3.5.1-0.20230322155524-f70da9d78221 h1:oY9dmUpbeBrOE/0QAN0gL6Lz80E9J9KrXY1iz6a3ae8=
+github.com/openshift/etcd/server/v3 v3.5.1-0.20230322155524-f70da9d78221/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo=
+github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230417131116-379cd9f22597 h1:+tIqhG89txE3h0H6QpGTYd2oDruIBP6NQ4z410VilwY=
+github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20230417131116-379cd9f22597/go.mod h1:uLYjAyw1JyCS9EUj6oUhl4eRy4XthcFpSodl6cOokQI=
+github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230417131116-379cd9f22597 h1:NDsBsSXNz829Mqa+qrASIhRb4j+kPLPRTYtQngeLq88=
+github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20230417131116-379cd9f22597/go.mod h1:ApuQzVQOyTrgHIGrmVljD8zZ+ZoHmXYbsFwLvSelf84=
+github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230417131116-379cd9f22597 h1:s7GgvBoZe3ezzATYRU7HCRNpAD2+wsFs4jy4U9l08Io=
+github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20230417131116-379cd9f22597/go.mod h1:LumLfFU84tK2qax1WpUviAosYlqlUaSJTIEtYjYpfxw=
+github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230417131116-379cd9f22597 h1:eto1R2umIDZygj6cwmv8kxBo/FEYhMIODhIv8j72TYY=
+github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20230417131116-379cd9f22597/go.mod h1:0QH/+sNaHFjTGTSyuwBXuHHhyBRa2r6ndYXUxchMPKI=
+github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230417131116-379cd9f22597 h1:pF/q1vjooEBGg4IlToc/XC0uI2qN3STVizoBZaHVFZE=
+github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20230417131116-379cd9f22597/go.mod h1:4bzeXuIaKw5yabxEcNwxVYHKi0wLgcjl5naBxe4N1cw=
+github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230417131116-379cd9f22597 h1:3sYFRM6LtqV+NL2ZBdGFzSZez7nl129JTjHygIDqvaE=
+github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20230417131116-379cd9f22597/go.mod h1:plN+kBQozEYwSUD3aB9oFwrpkVeIN5xVpQ6sLRbSiNs=
github.com/openshift/onsi-ginkgo/v2 v2.4.1-0.20221214150008-e73634cb3870 h1:YH3Z3ZWCDWjkAGdZpK5rCm5pRZ4wt0uEx1GwvCiO3+I=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -437,8 +396,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
-github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -471,8 +428,6 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
-github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -505,7 +460,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -543,8 +497,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
@@ -556,8 +508,6 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
@@ -662,8 +612,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -685,8 +635,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -705,7 +653,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -739,14 +686,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -755,8 +700,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -812,8 +757,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
-golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -929,8 +872,6 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -959,15 +900,11 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
-k8s.io/kubernetes v1.26.1 h1:N+qxlptxpSU/VSLvqBGWyyw/kNhJRpEn1b5YP57+5rk=
-k8s.io/kubernetes v1.26.1/go.mod h1:dEfAfGVZBOr2uZLeVazLPj/8E+t8jYFbQqCiBudkB8o=
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs=
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/.gitignore b/etcd/vendor/github.com/NYTimes/gziphandler/.gitignore
deleted file mode 100644
index 1377554ebe..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.swp
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/.travis.yml b/etcd/vendor/github.com/NYTimes/gziphandler/.travis.yml
deleted file mode 100644
index 94dfae362d..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
- - 1.x
- - tip
-env:
- - GO111MODULE=on
-install:
- - go mod download
-script:
- - go test -race -v
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/etcd/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
deleted file mode 100644
index cdbca194c3..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: code-of-conduct
-version: v1.0
----
-
-This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community.
-
-Our open source community strives to:
-
-* **Be friendly and patient.**
-* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
-* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language.
-* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one.
-* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable.
-* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes.
-
-## Definitions
-
-Harassment includes, but is not limited to:
-
-- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation
-- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
-- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle
-- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop
-- Threats of violence, both physical and psychological
-- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
-- Deliberate intimidation
-- Stalking or following
-- Harassing photography or recording, including logging online activity for harassment purposes
-- Sustained disruption of discussion
-- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour
-- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others
-- Continued one-on-one communication after requests to cease
-- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse
-- Publication of non-harassing private communication
-
-Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
-
-- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’
-- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
-- Refusal to explain or debate social justice concepts
-- Communicating in a ‘tone’ you don’t find congenial
-- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions
-
-
-### Diversity Statement
-
-We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong.
-
-Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected
-characteristics above, including participants with disabilities.
-
-### Reporting Issues
-
-If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include:
-
-- Your contact information.
-- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please
-include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link.
-- Any additional information that may be helpful.
-
-After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse.
-
-### Attribution & Acknowledgements
-
-We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration:
-
-* [Django](https://www.djangoproject.com/conduct/reporting/)
-* [Python](https://www.python.org/community/diversity/)
-* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct)
-* [Contributor Covenant](http://contributor-covenant.org/)
-* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/)
-* [Citizen Code of Conduct](http://citizencodeofconduct.org/)
-
-This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/etcd/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
deleted file mode 100644
index b89a9eb4fb..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Contributing to NYTimes/gziphandler
-
-This is an open source project started by handful of developers at The New York Times and open to the entire Go community.
-
-We really appreciate your help!
-
-## Filing issues
-
-When filing an issue, make sure to answer these five questions:
-
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
-
-## Contributing code
-
-Before submitting changes, please follow these guidelines:
-
-1. Check the open issues and pull requests for existing discussions.
-2. Open an issue to discuss a new feature.
-3. Write tests.
-4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments).
-5. Make sure your changes pass `go test`.
-6. Make sure the entire test suite passes locally and on Travis CI.
-7. Open a Pull Request.
-8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
-
-Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file.
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/LICENSE b/etcd/vendor/github.com/NYTimes/gziphandler/LICENSE
deleted file mode 100644
index df6192d36f..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2016-2017 The New York Times Company
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/README.md b/etcd/vendor/github.com/NYTimes/gziphandler/README.md
deleted file mode 100644
index 6259acaca7..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Gzip Handler
-============
-
-This is a tiny Go package which wraps HTTP handlers to transparently gzip the
-response body, for clients which support it. Although it's usually simpler to
-leave that to a reverse proxy (like nginx or Varnish), this package is useful
-when that's undesirable.
-
-## Install
-```bash
-go get -u github.com/NYTimes/gziphandler
-```
-
-## Usage
-
-Call `GzipHandler` with any handler (an object which implements the
-`http.Handler` interface), and it'll return a new handler which gzips the
-response. For example:
-
-```go
-package main
-
-import (
- "io"
- "net/http"
- "github.com/NYTimes/gziphandler"
-)
-
-func main() {
- withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain")
- io.WriteString(w, "Hello, World")
- })
-
- withGz := gziphandler.GzipHandler(withoutGz)
-
- http.Handle("/", withGz)
- http.ListenAndServe("0.0.0.0:8000", nil)
-}
-```
-
-
-## Documentation
-
-The docs can be found at [godoc.org][docs], as usual.
-
-
-## License
-
-[Apache 2.0][license].
-
-
-
-
-[docs]: https://godoc.org/github.com/NYTimes/gziphandler
-[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/gzip.go b/etcd/vendor/github.com/NYTimes/gziphandler/gzip.go
deleted file mode 100644
index c112bbdf81..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/gzip.go
+++ /dev/null
@@ -1,532 +0,0 @@
-package gziphandler // import "github.com/NYTimes/gziphandler"
-
-import (
- "bufio"
- "compress/gzip"
- "fmt"
- "io"
- "mime"
- "net"
- "net/http"
- "strconv"
- "strings"
- "sync"
-)
-
-const (
- vary = "Vary"
- acceptEncoding = "Accept-Encoding"
- contentEncoding = "Content-Encoding"
- contentType = "Content-Type"
- contentLength = "Content-Length"
-)
-
-type codings map[string]float64
-
-const (
- // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
- // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
- // The examples seem to indicate that it is.
- DefaultQValue = 1.0
-
- // DefaultMinSize is the default minimum size until we enable gzip compression.
- // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
- // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
- // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
- DefaultMinSize = 1400
-)
-
-// gzipWriterPools stores a sync.Pool for each compression level for reuse of
-// gzip.Writers. Use poolIndex to covert a compression level to an index into
-// gzipWriterPools.
-var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
-
-func init() {
- for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
- addLevelPool(i)
- }
- addLevelPool(gzip.DefaultCompression)
-}
-
-// poolIndex maps a compression level to its index into gzipWriterPools. It
-// assumes that level is a valid gzip compression level.
-func poolIndex(level int) int {
- // gzip.DefaultCompression == -1, so we need to treat it special.
- if level == gzip.DefaultCompression {
- return gzip.BestCompression - gzip.BestSpeed + 1
- }
- return level - gzip.BestSpeed
-}
-
-func addLevelPool(level int) {
- gzipWriterPools[poolIndex(level)] = &sync.Pool{
- New: func() interface{} {
- // NewWriterLevel only returns error on a bad level, we are guaranteeing
- // that this will be a valid level so it is okay to ignore the returned
- // error.
- w, _ := gzip.NewWriterLevel(nil, level)
- return w
- },
- }
-}
-
-// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
-// bytes before writing them to the underlying response. This doesn't close the
-// writers, so don't forget to do that.
-// It can be configured to skip response smaller than minSize.
-type GzipResponseWriter struct {
- http.ResponseWriter
- index int // Index for gzipWriterPools.
- gw *gzip.Writer
-
- code int // Saves the WriteHeader value.
-
- minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
- buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
- ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
-
- contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
-}
-
-type GzipResponseWriterWithCloseNotify struct {
- *GzipResponseWriter
-}
-
-func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
- return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-// Write appends data to the gzip writer.
-func (w *GzipResponseWriter) Write(b []byte) (int, error) {
- // GZIP responseWriter is initialized. Use the GZIP responseWriter.
- if w.gw != nil {
- return w.gw.Write(b)
- }
-
- // If we have already decided not to use GZIP, immediately passthrough.
- if w.ignore {
- return w.ResponseWriter.Write(b)
- }
-
- // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
- // On the first write, w.buf changes from nil to a valid slice
- w.buf = append(w.buf, b...)
-
- var (
- cl, _ = strconv.Atoi(w.Header().Get(contentLength))
- ct = w.Header().Get(contentType)
- ce = w.Header().Get(contentEncoding)
- )
- // Only continue if they didn't already choose an encoding or a known unhandled content length or type.
- if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
- // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
- if len(w.buf) < w.minSize && cl == 0 {
- return len(b), nil
- }
- // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
- if cl >= w.minSize || len(w.buf) >= w.minSize {
- // If a Content-Type wasn't specified, infer it from the current buffer.
- if ct == "" {
- ct = http.DetectContentType(w.buf)
- w.Header().Set(contentType, ct)
- }
- // If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
- if handleContentType(w.contentTypes, ct) {
- if err := w.startGzip(); err != nil {
- return 0, err
- }
- return len(b), nil
- }
- }
- }
- // If we got here, we should not GZIP this response.
- if err := w.startPlain(); err != nil {
- return 0, err
- }
- return len(b), nil
-}
-
-// startGzip initializes a GZIP writer and writes the buffer.
-func (w *GzipResponseWriter) startGzip() error {
- // Set the GZIP header.
- w.Header().Set(contentEncoding, "gzip")
-
- // if the Content-Length is already set, then calls to Write on gzip
- // will fail to set the Content-Length header since its already set
- // See: https://github.com/golang/go/issues/14975.
- w.Header().Del(contentLength)
-
- // Write the header to gzip response.
- if w.code != 0 {
- w.ResponseWriter.WriteHeader(w.code)
- // Ensure that no other WriteHeader's happen
- w.code = 0
- }
-
- // Initialize and flush the buffer into the gzip response if there are any bytes.
- // If there aren't any, we shouldn't initialize it yet because on Close it will
- // write the gzip header even if nothing was ever written.
- if len(w.buf) > 0 {
- // Initialize the GZIP response.
- w.init()
- n, err := w.gw.Write(w.buf)
-
- // This should never happen (per io.Writer docs), but if the write didn't
- // accept the entire buffer but returned no specific error, we have no clue
- // what's going on, so abort just to be safe.
- if err == nil && n < len(w.buf) {
- err = io.ErrShortWrite
- }
- return err
- }
- return nil
-}
-
-// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
-func (w *GzipResponseWriter) startPlain() error {
- if w.code != 0 {
- w.ResponseWriter.WriteHeader(w.code)
- // Ensure that no other WriteHeader's happen
- w.code = 0
- }
- w.ignore = true
- // If Write was never called then don't call Write on the underlying ResponseWriter.
- if w.buf == nil {
- return nil
- }
- n, err := w.ResponseWriter.Write(w.buf)
- w.buf = nil
- // This should never happen (per io.Writer docs), but if the write didn't
- // accept the entire buffer but returned no specific error, we have no clue
- // what's going on, so abort just to be safe.
- if err == nil && n < len(w.buf) {
- err = io.ErrShortWrite
- }
- return err
-}
-
-// WriteHeader just saves the response code until close or GZIP effective writes.
-func (w *GzipResponseWriter) WriteHeader(code int) {
- if w.code == 0 {
- w.code = code
- }
-}
-
-// init graps a new gzip writer from the gzipWriterPool and writes the correct
-// content encoding header.
-func (w *GzipResponseWriter) init() {
- // Bytes written during ServeHTTP are redirected to this gzip writer
- // before being written to the underlying response.
- gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
- gzw.Reset(w.ResponseWriter)
- w.gw = gzw
-}
-
-// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
-func (w *GzipResponseWriter) Close() error {
- if w.ignore {
- return nil
- }
-
- if w.gw == nil {
- // GZIP not triggered yet, write out regular response.
- err := w.startPlain()
- // Returns the error if any at write.
- if err != nil {
- err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
- }
- return err
- }
-
- err := w.gw.Close()
- gzipWriterPools[w.index].Put(w.gw)
- w.gw = nil
- return err
-}
-
-// Flush flushes the underlying *gzip.Writer and then the underlying
-// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
-// an http.Flusher.
-func (w *GzipResponseWriter) Flush() {
- if w.gw == nil && !w.ignore {
- // Only flush once startGzip or startPlain has been called.
- //
- // Flush is thus a no-op until we're certain whether a plain
- // or gzipped response will be served.
- return
- }
-
- if w.gw != nil {
- w.gw.Flush()
- }
-
- if fw, ok := w.ResponseWriter.(http.Flusher); ok {
- fw.Flush()
- }
-}
-
-// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
-// Hijacker, its Hijack method is returned. Otherwise an error is returned.
-func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
- return hj.Hijack()
- }
- return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
-}
-
-// verify Hijacker interface implementation
-var _ http.Hijacker = &GzipResponseWriter{}
-
-// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
-// an error case it panics rather than returning an error.
-func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
- wrap, err := NewGzipLevelHandler(level)
- if err != nil {
- panic(err)
- }
- return wrap
-}
-
-// NewGzipLevelHandler returns a wrapper function (often known as middleware)
-// which can be used to wrap an HTTP handler to transparently gzip the response
-// body if the client supports it (via the Accept-Encoding header). Responses will
-// be encoded at the given gzip compression level. An error will be returned only
-// if an invalid gzip compression level is given, so if one can ensure the level
-// is valid, the returned error can be safely ignored.
-func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
- return NewGzipLevelAndMinSize(level, DefaultMinSize)
-}
-
-// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
-// specify the minimum size before compression.
-func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
- return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
-}
-
-func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
- c := &config{
- level: gzip.DefaultCompression,
- minSize: DefaultMinSize,
- }
-
- for _, o := range opts {
- o(c)
- }
-
- if err := c.validate(); err != nil {
- return nil, err
- }
-
- return func(h http.Handler) http.Handler {
- index := poolIndex(c.level)
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add(vary, acceptEncoding)
- if acceptsGzip(r) {
- gw := &GzipResponseWriter{
- ResponseWriter: w,
- index: index,
- minSize: c.minSize,
- contentTypes: c.contentTypes,
- }
- defer gw.Close()
-
- if _, ok := w.(http.CloseNotifier); ok {
- gwcn := GzipResponseWriterWithCloseNotify{gw}
- h.ServeHTTP(gwcn, r)
- } else {
- h.ServeHTTP(gw, r)
- }
-
- } else {
- h.ServeHTTP(w, r)
- }
- })
- }, nil
-}
-
-// Parsed representation of one of the inputs to ContentTypes.
-// See https://golang.org/pkg/mime/#ParseMediaType
-type parsedContentType struct {
- mediaType string
- params map[string]string
-}
-
-// equals returns whether this content type matches another content type.
-func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
- if pct.mediaType != mediaType {
- return false
- }
- // if pct has no params, don't care about other's params
- if len(pct.params) == 0 {
- return true
- }
-
- // if pct has any params, they must be identical to other's.
- if len(pct.params) != len(params) {
- return false
- }
- for k, v := range pct.params {
- if w, ok := params[k]; !ok || v != w {
- return false
- }
- }
- return true
-}
-
-// Used for functional configuration.
-type config struct {
- minSize int
- level int
- contentTypes []parsedContentType
-}
-
-func (c *config) validate() error {
- if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
- return fmt.Errorf("invalid compression level requested: %d", c.level)
- }
-
- if c.minSize < 0 {
- return fmt.Errorf("minimum size must be more than zero")
- }
-
- return nil
-}
-
-type option func(c *config)
-
-func MinSize(size int) option {
- return func(c *config) {
- c.minSize = size
- }
-}
-
-func CompressionLevel(level int) option {
- return func(c *config) {
- c.level = level
- }
-}
-
-// ContentTypes specifies a list of content types to compare
-// the Content-Type header to before compressing. If none
-// match, the response will be returned as-is.
-//
-// Content types are compared in a case-insensitive, whitespace-ignored
-// manner.
-//
-// A MIME type without any other directive will match a content type
-// that has the same MIME type, regardless of that content type's other
-// directives. I.e., "text/html" will match both "text/html" and
-// "text/html; charset=utf-8".
-//
-// A MIME type with any other directive will only match a content type
-// that has the same MIME type and other directives. I.e.,
-// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
-//
-// By default, responses are gzipped regardless of
-// Content-Type.
-func ContentTypes(types []string) option {
- return func(c *config) {
- c.contentTypes = []parsedContentType{}
- for _, v := range types {
- mediaType, params, err := mime.ParseMediaType(v)
- if err == nil {
- c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
- }
- }
- }
-}
-
-// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
-// the client supports it (via the Accept-Encoding header). This will compress at
-// the default compression level.
-func GzipHandler(h http.Handler) http.Handler {
- wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
- return wrapper(h)
-}
-
-// acceptsGzip returns true if the given HTTP request indicates that it will
-// accept a gzipped response.
-func acceptsGzip(r *http.Request) bool {
- acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
- return acceptedEncodings["gzip"] > 0.0
-}
-
-// returns true if we've been configured to compress the specific content type.
-func handleContentType(contentTypes []parsedContentType, ct string) bool {
- // If contentTypes is empty we handle all content types.
- if len(contentTypes) == 0 {
- return true
- }
-
- mediaType, params, err := mime.ParseMediaType(ct)
- if err != nil {
- return false
- }
-
- for _, c := range contentTypes {
- if c.equals(mediaType, params) {
- return true
- }
- }
-
- return false
-}
-
-// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
-// appear in an Accept-Encoding header. It returns a map of content-codings to
-// quality values, and an error containing the errors encountered. It's probably
-// safe to ignore those, because silently ignoring errors is how the internet
-// works.
-//
-// See: http://tools.ietf.org/html/rfc2616#section-14.3.
-func parseEncodings(s string) (codings, error) {
- c := make(codings)
- var e []string
-
- for _, ss := range strings.Split(s, ",") {
- coding, qvalue, err := parseCoding(ss)
-
- if err != nil {
- e = append(e, err.Error())
- } else {
- c[coding] = qvalue
- }
- }
-
- // TODO (adammck): Use a proper multi-error struct, so the individual errors
- // can be extracted if anyone cares.
- if len(e) > 0 {
- return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
- }
-
- return c, nil
-}
-
-// parseCoding parses a single conding (content-coding with an optional qvalue),
-// as might appear in an Accept-Encoding header. It attempts to forgive minor
-// formatting errors.
-func parseCoding(s string) (coding string, qvalue float64, err error) {
- for n, part := range strings.Split(s, ";") {
- part = strings.TrimSpace(part)
- qvalue = DefaultQValue
-
- if n == 0 {
- coding = strings.ToLower(part)
- } else if strings.HasPrefix(part, "q=") {
- qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
-
- if qvalue < 0.0 {
- qvalue = 0.0
- } else if qvalue > 1.0 {
- qvalue = 1.0
- }
- }
- }
-
- if coding == "" {
- err = fmt.Errorf("empty content-coding")
- }
-
- return
-}
diff --git a/etcd/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/etcd/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
deleted file mode 100644
index fa9665b7e8..0000000000
--- a/etcd/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// +build go1.8
-
-package gziphandler
-
-import "net/http"
-
-// Push initiates an HTTP/2 server push.
-// Push returns ErrNotSupported if the client has disabled push or if push
-// is not supported on the underlying connection.
-func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
- pusher, ok := w.ResponseWriter.(http.Pusher)
- if ok && pusher != nil {
- return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
- }
- return http.ErrNotSupported
-}
-
-// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
-func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
-
- if opts == nil {
- opts = &http.PushOptions{
- Header: http.Header{
- acceptEncoding: []string{"gzip"},
- },
- }
- return opts
- }
-
- if opts.Header == nil {
- opts.Header = http.Header{
- acceptEncoding: []string{"gzip"},
- }
- return opts
- }
-
- if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
- opts.Header.Add(acceptEncoding, "gzip")
- return opts
- }
-
- return opts
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
deleted file mode 100644
index 52cf18e425..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright 2021 The ANTLR Project
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- 3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
deleted file mode 100644
index a4e2079e65..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "sync"
-
-var ATNInvalidAltNumber int
-
-type ATN struct {
- // DecisionToState is the decision points for all rules, subrules, optional
- // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
- DecisionToState []DecisionState
-
- // grammarType is the ATN type and is used for deserializing ATNs from strings.
- grammarType int
-
- // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
- lexerActions []LexerAction
-
- // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
- maxTokenType int
-
- modeNameToStartState map[string]*TokensStartState
-
- modeToStartState []*TokensStartState
-
- // ruleToStartState maps from rule index to starting state number.
- ruleToStartState []*RuleStartState
-
- // ruleToStopState maps from rule index to stop state number.
- ruleToStopState []*RuleStopState
-
- // ruleToTokenType maps the rule index to the resulting token type for lexer
- // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
- // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
- // specified, and otherwise is nil.
- ruleToTokenType []int
-
- states []ATNState
-
- mu sync.Mutex
- stateMu sync.RWMutex
- edgeMu sync.RWMutex
-}
-
-func NewATN(grammarType int, maxTokenType int) *ATN {
- return &ATN{
- grammarType: grammarType,
- maxTokenType: maxTokenType,
- modeNameToStartState: make(map[string]*TokensStartState),
- }
-}
-
-// NextTokensInContext computes the set of valid tokens that can occur starting
-// in state s. If ctx is nil, the set of tokens will not include what can follow
-// the rule surrounding s. In other words, the set will be restricted to tokens
-// reachable staying within the rule of s.
-func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
- return NewLL1Analyzer(a).Look(s, nil, ctx)
-}
-
-// NextTokensNoContext computes the set of valid tokens that can occur starting
-// in s and staying in same rule. Token.EPSILON is in set if we reach end of
-// rule.
-func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
- a.mu.Lock()
- defer a.mu.Unlock()
- iset := s.GetNextTokenWithinRule()
- if iset == nil {
- iset = a.NextTokensInContext(s, nil)
- iset.readOnly = true
- s.SetNextTokenWithinRule(iset)
- }
- return iset
-}
-
-func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
- if ctx == nil {
- return a.NextTokensNoContext(s)
- }
-
- return a.NextTokensInContext(s, ctx)
-}
-
-func (a *ATN) addState(state ATNState) {
- if state != nil {
- state.SetATN(a)
- state.SetStateNumber(len(a.states))
- }
-
- a.states = append(a.states, state)
-}
-
-func (a *ATN) removeState(state ATNState) {
- a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
-}
-
-func (a *ATN) defineDecisionState(s DecisionState) int {
- a.DecisionToState = append(a.DecisionToState, s)
- s.setDecision(len(a.DecisionToState) - 1)
-
- return s.getDecision()
-}
-
-func (a *ATN) getDecisionState(decision int) DecisionState {
- if len(a.DecisionToState) == 0 {
- return nil
- }
-
- return a.DecisionToState[decision]
-}
-
-// getExpectedTokens computes the set of input symbols which could follow ATN
-// state number stateNumber in the specified full parse context ctx and returns
-// the set of potentially valid input symbols which could follow the specified
-// state in the specified context. This method considers the complete parser
-// context, but does not evaluate semantic predicates (i.e. all predicates
-// encountered during the calculation are assumed true). If a path in the ATN
-// exists from the starting state to the RuleStopState of the outermost context
-// without Matching any symbols, Token.EOF is added to the returned set.
-//
-// A nil ctx defaults to ParserRuleContext.EMPTY.
-//
-// It panics if the ATN does not contain state stateNumber.
-func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
- if stateNumber < 0 || stateNumber >= len(a.states) {
- panic("Invalid state number.")
- }
-
- s := a.states[stateNumber]
- following := a.NextTokens(s, nil)
-
- if !following.contains(TokenEpsilon) {
- return following
- }
-
- expected := NewIntervalSet()
-
- expected.addSet(following)
- expected.removeOne(TokenEpsilon)
-
- for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
- invokingState := a.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
-
- following = a.NextTokens(rt.(*RuleTransition).followState, nil)
- expected.addSet(following)
- expected.removeOne(TokenEpsilon)
- ctx = ctx.GetParent().(RuleContext)
- }
-
- if following.contains(TokenEpsilon) {
- expected.addOne(TokenEOF)
- }
-
- return expected
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
deleted file mode 100644
index 97ba417f74..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-type comparable interface {
- equals(other interface{}) bool
-}
-
-// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
-// context). The syntactic context is a graph-structured stack node whose
-// path(s) to the root is the rule invocation(s) chain used to arrive at the
-// state. The semantic context is the tree of semantic predicates encountered
-// before reaching an ATN state.
-type ATNConfig interface {
- comparable
-
- hash() int
-
- GetState() ATNState
- GetAlt() int
- GetSemanticContext() SemanticContext
-
- GetContext() PredictionContext
- SetContext(PredictionContext)
-
- GetReachesIntoOuterContext() int
- SetReachesIntoOuterContext(int)
-
- String() string
-
- getPrecedenceFilterSuppressed() bool
- setPrecedenceFilterSuppressed(bool)
-}
-
-type BaseATNConfig struct {
- precedenceFilterSuppressed bool
- state ATNState
- alt int
- context PredictionContext
- semanticContext SemanticContext
- reachesIntoOuterContext int
-}
-
-func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
- return &BaseATNConfig{
- state: old.state,
- alt: old.alt,
- context: old.context,
- semanticContext: old.semanticContext,
- reachesIntoOuterContext: old.reachesIntoOuterContext,
- }
-}
-
-func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
-}
-
-func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Necessary?
- }
-
- return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
-}
-
-func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
-}
-
-func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
-}
-
-func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil")
- }
-
- return &BaseATNConfig{
- state: state,
- alt: c.GetAlt(),
- context: context,
- semanticContext: semanticContext,
- reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
- precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
- }
-}
-
-func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
- return b.precedenceFilterSuppressed
-}
-
-func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
- b.precedenceFilterSuppressed = v
-}
-
-func (b *BaseATNConfig) GetState() ATNState {
- return b.state
-}
-
-func (b *BaseATNConfig) GetAlt() int {
- return b.alt
-}
-
-func (b *BaseATNConfig) SetContext(v PredictionContext) {
- b.context = v
-}
-func (b *BaseATNConfig) GetContext() PredictionContext {
- return b.context
-}
-
-func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
- return b.semanticContext
-}
-
-func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
- return b.reachesIntoOuterContext
-}
-
-func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
- b.reachesIntoOuterContext = v
-}
-
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (b *BaseATNConfig) equals(o interface{}) bool {
- if b == o {
- return true
- }
-
- var other, ok = o.(*BaseATNConfig)
-
- if !ok {
- return false
- }
-
- var equal bool
-
- if b.context == nil {
- equal = other.context == nil
- } else {
- equal = b.context.equals(other.context)
- }
-
- var (
- nums = b.state.GetStateNumber() == other.state.GetStateNumber()
- alts = b.alt == other.alt
- cons = b.semanticContext.equals(other.semanticContext)
- sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
- )
-
- return nums && alts && cons && sups && equal
-}
-
-func (b *BaseATNConfig) hash() int {
- var c int
- if b.context != nil {
- c = b.context.hash()
- }
-
- h := murmurInit(7)
- h = murmurUpdate(h, b.state.GetStateNumber())
- h = murmurUpdate(h, b.alt)
- h = murmurUpdate(h, c)
- h = murmurUpdate(h, b.semanticContext.hash())
- return murmurFinish(h, 4)
-}
-
-func (b *BaseATNConfig) String() string {
- var s1, s2, s3 string
-
- if b.context != nil {
- s1 = ",[" + fmt.Sprint(b.context) + "]"
- }
-
- if b.semanticContext != SemanticContextNone {
- s2 = "," + fmt.Sprint(b.semanticContext)
- }
-
- if b.reachesIntoOuterContext > 0 {
- s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
- }
-
- return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
-}
-
-type LexerATNConfig struct {
- *BaseATNConfig
- lexerActionExecutor *LexerActionExecutor
- passedThroughNonGreedyDecision bool
-}
-
-func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
- lexerActionExecutor: lexerActionExecutor,
- }
-}
-
-func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-func (l *LexerATNConfig) hash() int {
- var f int
- if l.passedThroughNonGreedyDecision {
- f = 1
- } else {
- f = 0
- }
- h := murmurInit(7)
- h = murmurUpdate(h, l.state.GetStateNumber())
- h = murmurUpdate(h, l.alt)
- h = murmurUpdate(h, l.context.hash())
- h = murmurUpdate(h, l.semanticContext.hash())
- h = murmurUpdate(h, f)
- h = murmurUpdate(h, l.lexerActionExecutor.hash())
- h = murmurFinish(h, 6)
- return h
-}
-
-func (l *LexerATNConfig) equals(other interface{}) bool {
- var othert, ok = other.(*LexerATNConfig)
-
- if l == other {
- return true
- } else if !ok {
- return false
- } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
- return false
- }
-
- var b bool
-
- if l.lexerActionExecutor != nil {
- b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
- } else {
- b = othert.lexerActionExecutor != nil
- }
-
- if b {
- return false
- }
-
- return l.BaseATNConfig.equals(othert.BaseATNConfig)
-}
-
-
-func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
- var ds, ok = target.(DecisionState)
-
- return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
deleted file mode 100644
index 49ad4a7197..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "fmt"
-
-type ATNConfigSet interface {
- hash() int
- Add(ATNConfig, *DoubleDict) bool
- AddAll([]ATNConfig) bool
-
- GetStates() Set
- GetPredicates() []SemanticContext
- GetItems() []ATNConfig
-
- OptimizeConfigs(interpreter *BaseATNSimulator)
-
- Equals(other interface{}) bool
-
- Length() int
- IsEmpty() bool
- Contains(ATNConfig) bool
- ContainsFast(ATNConfig) bool
- Clear()
- String() string
-
- HasSemanticContext() bool
- SetHasSemanticContext(v bool)
-
- ReadOnly() bool
- SetReadOnly(bool)
-
- GetConflictingAlts() *BitSet
- SetConflictingAlts(*BitSet)
-
- Alts() *BitSet
-
- FullContext() bool
-
- GetUniqueAlt() int
- SetUniqueAlt(int)
-
- GetDipsIntoOuterContext() bool
- SetDipsIntoOuterContext(bool)
-}
-
-// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
-// about its elements and can combine similar configurations using a
-// graph-structured stack.
-type BaseATNConfigSet struct {
- cachedHash int
-
- // configLookup is used to determine whether two BaseATNConfigSets are equal. We
- // need all configurations with the same (s, i, _, semctx) to be equal. A key
- // effectively doubles the number of objects associated with ATNConfigs. All
- // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
- // read-only because a set becomes a DFA state.
- configLookup Set
-
- // configs is the added elements.
- configs []ATNConfig
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- conflictingAlts *BitSet
-
- // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
- // we hit a pred while computing a closure operation. Do not make a DFA state
- // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
- dipsIntoOuterContext bool
-
- // fullCtx is whether it is part of a full context LL prediction. Used to
- // determine how to merge $. It is a wildcard with SLL, but not for an LL
- // context merge.
- fullCtx bool
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from a.
- hasSemanticContext bool
-
- // readOnly is whether it is read-only. Do not
- // allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not protect other fields; conflictingAlts
- // in particular, which is assigned after readOnly.
- readOnly bool
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- uniqueAlt int
-}
-
-func (b *BaseATNConfigSet) Alts() *BitSet {
- alts := NewBitSet()
- for _, it := range b.configs {
- alts.add(it.GetAlt())
- }
- return alts
-}
-
-func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
- return &BaseATNConfigSet{
- cachedHash: -1,
- configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
- fullCtx: fullCtx,
- }
-}
-
-// Add merges contexts with existing configs for (s, i, pi, _), where s is the
-// ATNConfig.state, i is the ATNConfig.alt, and pi is the
-// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
-// dipsIntoOuterContext and hasSemanticContext when necessary.
-func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if config.GetSemanticContext() != SemanticContextNone {
- b.hasSemanticContext = true
- }
-
- if config.GetReachesIntoOuterContext() > 0 {
- b.dipsIntoOuterContext = true
- }
-
- existing := b.configLookup.Add(config).(ATNConfig)
-
- if existing == config {
- b.cachedHash = -1
- b.configs = append(b.configs, config) // Track order here
- return true
- }
-
- // Merge a previous (s, i, pi, _) with it and save the result
- rootIsWildcard := !b.fullCtx
- merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
-
- // No need to check for existing.context because config.context is in the cache,
- // since the only way to create new graphs is the "call rule" and here. We cache
- // at both places.
- existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
-
- // Preserve the precedence filter suppression during the merge
- if config.getPrecedenceFilterSuppressed() {
- existing.setPrecedenceFilterSuppressed(true)
- }
-
- // Replace the context because there is no need to do alt mapping
- existing.SetContext(merged)
-
- return true
-}
-
-func (b *BaseATNConfigSet) GetStates() Set {
- states := newArray2DHashSet(nil, nil)
-
- for i := 0; i < len(b.configs); i++ {
- states.Add(b.configs[i].GetState())
- }
-
- return states
-}
-
-func (b *BaseATNConfigSet) HasSemanticContext() bool {
- return b.hasSemanticContext
-}
-
-func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
- b.hasSemanticContext = v
-}
-
-func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
- preds := make([]SemanticContext, 0)
-
- for i := 0; i < len(b.configs); i++ {
- c := b.configs[i].GetSemanticContext()
-
- if c != SemanticContextNone {
- preds = append(preds, c)
- }
- }
-
- return preds
-}
-
-func (b *BaseATNConfigSet) GetItems() []ATNConfig {
- return b.configs
-}
-
-func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if b.configLookup.Len() == 0 {
- return
- }
-
- for i := 0; i < len(b.configs); i++ {
- config := b.configs[i]
-
- config.SetContext(interpreter.getCachedContext(config.GetContext()))
- }
-}
-
-func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
- for i := 0; i < len(coll); i++ {
- b.Add(coll[i], nil)
- }
-
- return false
-}
-
-func (b *BaseATNConfigSet) Equals(other interface{}) bool {
- if b == other {
- return true
- } else if _, ok := other.(*BaseATNConfigSet); !ok {
- return false
- }
-
- other2 := other.(*BaseATNConfigSet)
-
- return b.configs != nil &&
- // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
- b.fullCtx == other2.fullCtx &&
- b.uniqueAlt == other2.uniqueAlt &&
- b.conflictingAlts == other2.conflictingAlts &&
- b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext
-}
-
-func (b *BaseATNConfigSet) hash() int {
- if b.readOnly {
- if b.cachedHash == -1 {
- b.cachedHash = b.hashCodeConfigs()
- }
-
- return b.cachedHash
- }
-
- return b.hashCodeConfigs()
-}
-
-func (b *BaseATNConfigSet) hashCodeConfigs() int {
- h := 1
- for _, config := range b.configs {
- h = 31*h + config.hash()
- }
- return h
-}
-
-func (b *BaseATNConfigSet) Length() int {
- return len(b.configs)
-}
-
-func (b *BaseATNConfigSet) IsEmpty() bool {
- return len(b.configs) == 0
-}
-
-func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item)
-}
-
-func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
-}
-
-func (b *BaseATNConfigSet) Clear() {
- if b.readOnly {
- panic("set is read-only")
- }
-
- b.configs = make([]ATNConfig, 0)
- b.cachedHash = -1
- b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
-}
-
-func (b *BaseATNConfigSet) FullContext() bool {
- return b.fullCtx
-}
-
-func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
- return b.dipsIntoOuterContext
-}
-
-func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
- b.dipsIntoOuterContext = v
-}
-
-func (b *BaseATNConfigSet) GetUniqueAlt() int {
- return b.uniqueAlt
-}
-
-func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
- b.uniqueAlt = v
-}
-
-func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
- return b.conflictingAlts
-}
-
-func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
- b.conflictingAlts = v
-}
-
-func (b *BaseATNConfigSet) ReadOnly() bool {
- return b.readOnly
-}
-
-func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
- b.readOnly = readOnly
-
- if readOnly {
- b.configLookup = nil // Read only, so no need for the lookup cache
- }
-}
-
-func (b *BaseATNConfigSet) String() string {
- s := "["
-
- for i, c := range b.configs {
- s += c.String()
-
- if i != len(b.configs)-1 {
- s += ", "
- }
- }
-
- s += "]"
-
- if b.hasSemanticContext {
- s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
- }
-
- if b.uniqueAlt != ATNInvalidAltNumber {
- s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
- }
-
- if b.conflictingAlts != nil {
- s += ",conflictingAlts=" + b.conflictingAlts.String()
- }
-
- if b.dipsIntoOuterContext {
- s += ",dipsIntoOuterContext"
- }
-
- return s
-}
-
-type OrderedATNConfigSet struct {
- *BaseATNConfigSet
-}
-
-func NewOrderedATNConfigSet() *OrderedATNConfigSet {
- b := NewBaseATNConfigSet(false)
-
- b.configLookup = newArray2DHashSet(nil, nil)
-
- return &OrderedATNConfigSet{BaseATNConfigSet: b}
-}
-
-func hashATNConfig(i interface{}) int {
- o := i.(ATNConfig)
- hash := 7
- hash = 31*hash + o.GetState().GetStateNumber()
- hash = 31*hash + o.GetAlt()
- hash = 31*hash + o.GetSemanticContext().hash()
- return hash
-}
-
-func equalATNConfigs(a, b interface{}) bool {
- if a == nil || b == nil {
- return false
- }
-
- if a == b {
- return true
- }
-
- var ai, ok = a.(ATNConfig)
- var bi, ok1 = b.(ATNConfig)
-
- if !ok || !ok1 {
- return false
- }
-
- if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
- return false
- }
-
- if ai.GetAlt() != bi.GetAlt() {
- return false
- }
-
- return ai.GetSemanticContext().equals(bi.GetSemanticContext())
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
deleted file mode 100644
index cb8eafb0b2..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "errors"
-
-var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
-
-type ATNDeserializationOptions struct {
- readOnly bool
- verifyATN bool
- generateRuleBypassTransitions bool
-}
-
-func (opts *ATNDeserializationOptions) ReadOnly() bool {
- return opts.readOnly
-}
-
-func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
- if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
- }
- opts.readOnly = readOnly
-}
-
-func (opts *ATNDeserializationOptions) VerifyATN() bool {
- return opts.verifyATN
-}
-
-func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
- if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
- }
- opts.verifyATN = verifyATN
-}
-
-func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
- return opts.generateRuleBypassTransitions
-}
-
-func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
- if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
- }
- opts.generateRuleBypassTransitions = generateRuleBypassTransitions
-}
-
-func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
- return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
-}
-
-func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
- o := new(ATNDeserializationOptions)
- if other != nil {
- *o = *other
- o.readOnly = false
- }
- return o
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
deleted file mode 100644
index aea9bbfa93..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
+++ /dev/null
@@ -1,683 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-const serializedVersion = 4
-
-type loopEndStateIntPair struct {
- item0 *LoopEndState
- item1 int
-}
-
-type blockStartStateIntPair struct {
- item0 BlockStartState
- item1 int
-}
-
-type ATNDeserializer struct {
- options *ATNDeserializationOptions
- data []int32
- pos int
-}
-
-func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
- if options == nil {
- options = &defaultATNDeserializationOptions
- }
-
- return &ATNDeserializer{options: options}
-}
-
-func stringInSlice(a string, list []string) int {
- for i, b := range list {
- if b == a {
- return i
- }
- }
-
- return -1
-}
-
-func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
- a.data = data
- a.pos = 0
- a.checkVersion()
-
- atn := a.readATN()
-
- a.readStates(atn)
- a.readRules(atn)
- a.readModes(atn)
-
- sets := a.readSets(atn, nil)
-
- a.readEdges(atn, sets)
- a.readDecisions(atn)
- a.readLexerActions(atn)
- a.markPrecedenceDecisions(atn)
- a.verifyATN(atn)
-
- if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
- a.generateRuleBypassTransitions(atn)
- // Re-verify after modification
- a.verifyATN(atn)
- }
-
- return atn
-
-}
-
-func (a *ATNDeserializer) checkVersion() {
- version := a.readInt()
-
- if version != serializedVersion {
- panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
- }
-}
-
-func (a *ATNDeserializer) readATN() *ATN {
- grammarType := a.readInt()
- maxTokenType := a.readInt()
-
- return NewATN(grammarType, maxTokenType)
-}
-
-func (a *ATNDeserializer) readStates(atn *ATN) {
- nstates := a.readInt()
-
- // Allocate worst case size.
- loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
- endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
-
- // Preallocate states slice.
- atn.states = make([]ATNState, 0, nstates)
-
- for i := 0; i < nstates; i++ {
- stype := a.readInt()
-
- // Ignore bad types of states
- if stype == ATNStateInvalidType {
- atn.addState(nil)
- continue
- }
-
- ruleIndex := a.readInt()
-
- s := a.stateFactory(stype, ruleIndex)
-
- if stype == ATNStateLoopEnd {
- loopBackStateNumber := a.readInt()
-
- loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
- } else if s2, ok := s.(BlockStartState); ok {
- endStateNumber := a.readInt()
-
- endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
- }
-
- atn.addState(s)
- }
-
- // Delay the assignment of loop back and end states until we know all the state
- // instances have been initialized
- for _, pair := range loopBackStateNumbers {
- pair.item0.loopBackState = atn.states[pair.item1]
- }
-
- for _, pair := range endStateNumbers {
- pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
- }
-
- numNonGreedyStates := a.readInt()
- for j := 0; j < numNonGreedyStates; j++ {
- stateNumber := a.readInt()
-
- atn.states[stateNumber].(DecisionState).setNonGreedy(true)
- }
-
- numPrecedenceStates := a.readInt()
- for j := 0; j < numPrecedenceStates; j++ {
- stateNumber := a.readInt()
-
- atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
- }
-}
-
-func (a *ATNDeserializer) readRules(atn *ATN) {
- nrules := a.readInt()
-
- if atn.grammarType == ATNTypeLexer {
- atn.ruleToTokenType = make([]int, nrules)
- }
-
- atn.ruleToStartState = make([]*RuleStartState, nrules)
-
- for i := range atn.ruleToStartState {
- s := a.readInt()
- startState := atn.states[s].(*RuleStartState)
-
- atn.ruleToStartState[i] = startState
-
- if atn.grammarType == ATNTypeLexer {
- tokenType := a.readInt()
-
- atn.ruleToTokenType[i] = tokenType
- }
- }
-
- atn.ruleToStopState = make([]*RuleStopState, nrules)
-
- for _, state := range atn.states {
- if s2, ok := state.(*RuleStopState); ok {
- atn.ruleToStopState[s2.ruleIndex] = s2
- atn.ruleToStartState[s2.ruleIndex].stopState = s2
- }
- }
-}
-
-func (a *ATNDeserializer) readModes(atn *ATN) {
- nmodes := a.readInt()
- atn.modeToStartState = make([]*TokensStartState, nmodes)
-
- for i := range atn.modeToStartState {
- s := a.readInt()
-
- atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
- }
-}
-
-func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
- m := a.readInt()
-
- // Preallocate the needed capacity.
- if cap(sets)-len(sets) < m {
- isets := make([]*IntervalSet, len(sets), len(sets)+m)
- copy(isets, sets)
- sets = isets
- }
-
- for i := 0; i < m; i++ {
- iset := NewIntervalSet()
-
- sets = append(sets, iset)
-
- n := a.readInt()
- containsEOF := a.readInt()
-
- if containsEOF != 0 {
- iset.addOne(-1)
- }
-
- for j := 0; j < n; j++ {
- i1 := a.readInt()
- i2 := a.readInt()
-
- iset.addRange(i1, i2)
- }
- }
-
- return sets
-}
-
-func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
- nedges := a.readInt()
-
- for i := 0; i < nedges; i++ {
- var (
- src = a.readInt()
- trg = a.readInt()
- ttype = a.readInt()
- arg1 = a.readInt()
- arg2 = a.readInt()
- arg3 = a.readInt()
- trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
- srcState = atn.states[src]
- )
-
- srcState.AddTransition(trans, -1)
- }
-
- // Edges for rule stop states can be derived, so they are not serialized
- for _, state := range atn.states {
- for _, t := range state.GetTransitions() {
- var rt, ok = t.(*RuleTransition)
-
- if !ok {
- continue
- }
-
- outermostPrecedenceReturn := -1
-
- if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
- if rt.precedence == 0 {
- outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
- }
- }
-
- trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
-
- atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
- }
- }
-
- for _, state := range atn.states {
- if s2, ok := state.(BlockStartState); ok {
- // We need to know the end state to set its start state
- if s2.getEndState() == nil {
- panic("IllegalState")
- }
-
- // Block end states can only be associated to a single block start state
- if s2.getEndState().startState != nil {
- panic("IllegalState")
- }
-
- s2.getEndState().startState = state
- }
-
- if s2, ok := state.(*PlusLoopbackState); ok {
- for _, t := range s2.GetTransitions() {
- if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
- t2.loopBackState = state
- }
- }
- } else if s2, ok := state.(*StarLoopbackState); ok {
- for _, t := range s2.GetTransitions() {
- if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
- t2.loopBackState = state
- }
- }
- }
- }
-}
-
-func (a *ATNDeserializer) readDecisions(atn *ATN) {
- ndecisions := a.readInt()
-
- for i := 0; i < ndecisions; i++ {
- s := a.readInt()
- decState := atn.states[s].(DecisionState)
-
- atn.DecisionToState = append(atn.DecisionToState, decState)
- decState.setDecision(i)
- }
-}
-
-func (a *ATNDeserializer) readLexerActions(atn *ATN) {
- if atn.grammarType == ATNTypeLexer {
- count := a.readInt()
-
- atn.lexerActions = make([]LexerAction, count)
-
- for i := range atn.lexerActions {
- actionType := a.readInt()
- data1 := a.readInt()
- data2 := a.readInt()
- atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
- }
- }
-}
-
-func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
- count := len(atn.ruleToStartState)
-
- for i := 0; i < count; i++ {
- atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
- }
-
- for i := 0; i < count; i++ {
- a.generateRuleBypassTransition(atn, i)
- }
-}
-
-func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
- bypassStart := NewBasicBlockStartState()
-
- bypassStart.ruleIndex = idx
- atn.addState(bypassStart)
-
- bypassStop := NewBlockEndState()
-
- bypassStop.ruleIndex = idx
- atn.addState(bypassStop)
-
- bypassStart.endState = bypassStop
-
- atn.defineDecisionState(bypassStart.BaseDecisionState)
-
- bypassStop.startState = bypassStart
-
- var excludeTransition Transition
- var endState ATNState
-
- if atn.ruleToStartState[idx].isPrecedenceRule {
- // Wrap from the beginning of the rule to the StarLoopEntryState
- endState = nil
-
- for i := 0; i < len(atn.states); i++ {
- state := atn.states[i]
-
- if a.stateIsEndStateFor(state, idx) != nil {
- endState = state
- excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
-
- break
- }
- }
-
- if excludeTransition == nil {
- panic("Couldn't identify final state of the precedence rule prefix section.")
- }
- } else {
- endState = atn.ruleToStopState[idx]
- }
-
- // All non-excluded transitions that currently target end state need to target
- // blockEnd instead
- for i := 0; i < len(atn.states); i++ {
- state := atn.states[i]
-
- for j := 0; j < len(state.GetTransitions()); j++ {
- transition := state.GetTransitions()[j]
-
- if transition == excludeTransition {
- continue
- }
-
- if transition.getTarget() == endState {
- transition.setTarget(bypassStop)
- }
- }
- }
-
- // All transitions leaving the rule start state need to leave blockStart instead
- ruleToStartState := atn.ruleToStartState[idx]
- count := len(ruleToStartState.GetTransitions())
-
- for count > 0 {
- bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
- ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
- }
-
- // Link the new states
- atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
- bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
-
- MatchState := NewBasicState()
-
- atn.addState(MatchState)
- MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
- bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
-}
-
-func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
- if state.GetRuleIndex() != idx {
- return nil
- }
-
- if _, ok := state.(*StarLoopEntryState); !ok {
- return nil
- }
-
- maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
-
- if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
- return nil
- }
-
- var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
-
- if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
- return state
- }
-
- return nil
-}
-
-// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
-// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
-// the correct value.
-func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
- for _, state := range atn.states {
- if _, ok := state.(*StarLoopEntryState); !ok {
- continue
- }
-
- // We analyze the ATN to determine if a ATN decision state is the
- // decision for the closure block that determines whether a
- // precedence rule should continue or complete.
- if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
- maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
-
- if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
- var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
-
- if s3.epsilonOnlyTransitions && ok2 {
- state.(*StarLoopEntryState).precedenceRuleDecision = true
- }
- }
- }
- }
-}
-
-func (a *ATNDeserializer) verifyATN(atn *ATN) {
- if !a.options.VerifyATN() {
- return
- }
-
- // Verify assumptions
- for _, state := range atn.states {
- if state == nil {
- continue
- }
-
- a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
-
- switch s2 := state.(type) {
- case *PlusBlockStartState:
- a.checkCondition(s2.loopBackState != nil, "")
-
- case *StarLoopEntryState:
- a.checkCondition(s2.loopBackState != nil, "")
- a.checkCondition(len(s2.GetTransitions()) == 2, "")
-
- switch s2.transitions[0].getTarget().(type) {
- case *StarBlockStartState:
- _, ok := s2.transitions[1].getTarget().(*LoopEndState)
-
- a.checkCondition(ok, "")
- a.checkCondition(!s2.nonGreedy, "")
-
- case *LoopEndState:
- var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
-
- a.checkCondition(ok, "")
- a.checkCondition(s2.nonGreedy, "")
-
- default:
- panic("IllegalState")
- }
-
- case *StarLoopbackState:
- a.checkCondition(len(state.GetTransitions()) == 1, "")
-
- var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
-
- a.checkCondition(ok, "")
-
- case *LoopEndState:
- a.checkCondition(s2.loopBackState != nil, "")
-
- case *RuleStartState:
- a.checkCondition(s2.stopState != nil, "")
-
- case BlockStartState:
- a.checkCondition(s2.getEndState() != nil, "")
-
- case *BlockEndState:
- a.checkCondition(s2.startState != nil, "")
-
- case DecisionState:
- a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
-
- default:
- var _, ok = s2.(*RuleStopState)
-
- a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
- }
- }
-}
-
-func (a *ATNDeserializer) checkCondition(condition bool, message string) {
- if !condition {
- if message == "" {
- message = "IllegalState"
- }
-
- panic(message)
- }
-}
-
-func (a *ATNDeserializer) readInt() int {
- v := a.data[a.pos]
-
- a.pos++
-
- return int(v) // data is 32 bits but int is at least that big
-}
-
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
- target := atn.states[trg]
-
- switch typeIndex {
- case TransitionEPSILON:
- return NewEpsilonTransition(target, -1)
-
- case TransitionRANGE:
- if arg3 != 0 {
- return NewRangeTransition(target, TokenEOF, arg2)
- }
-
- return NewRangeTransition(target, arg1, arg2)
-
- case TransitionRULE:
- return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
-
- case TransitionPREDICATE:
- return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
-
- case TransitionPRECEDENCE:
- return NewPrecedencePredicateTransition(target, arg1)
-
- case TransitionATOM:
- if arg3 != 0 {
- return NewAtomTransition(target, TokenEOF)
- }
-
- return NewAtomTransition(target, arg1)
-
- case TransitionACTION:
- return NewActionTransition(target, arg1, arg2, arg3 != 0)
-
- case TransitionSET:
- return NewSetTransition(target, sets[arg1])
-
- case TransitionNOTSET:
- return NewNotSetTransition(target, sets[arg1])
-
- case TransitionWILDCARD:
- return NewWildcardTransition(target)
- }
-
- panic("The specified transition type is not valid.")
-}
-
-func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
- var s ATNState
-
- switch typeIndex {
- case ATNStateInvalidType:
- return nil
-
- case ATNStateBasic:
- s = NewBasicState()
-
- case ATNStateRuleStart:
- s = NewRuleStartState()
-
- case ATNStateBlockStart:
- s = NewBasicBlockStartState()
-
- case ATNStatePlusBlockStart:
- s = NewPlusBlockStartState()
-
- case ATNStateStarBlockStart:
- s = NewStarBlockStartState()
-
- case ATNStateTokenStart:
- s = NewTokensStartState()
-
- case ATNStateRuleStop:
- s = NewRuleStopState()
-
- case ATNStateBlockEnd:
- s = NewBlockEndState()
-
- case ATNStateStarLoopBack:
- s = NewStarLoopbackState()
-
- case ATNStateStarLoopEntry:
- s = NewStarLoopEntryState()
-
- case ATNStatePlusLoopBack:
- s = NewPlusLoopbackState()
-
- case ATNStateLoopEnd:
- s = NewLoopEndState()
-
- default:
- panic(fmt.Sprintf("state type %d is invalid", typeIndex))
- }
-
- s.SetRuleIndex(ruleIndex)
-
- return s
-}
-
-func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
- switch typeIndex {
- case LexerActionTypeChannel:
- return NewLexerChannelAction(data1)
-
- case LexerActionTypeCustom:
- return NewLexerCustomAction(data1, data2)
-
- case LexerActionTypeMode:
- return NewLexerModeAction(data1)
-
- case LexerActionTypeMore:
- return LexerMoreActionINSTANCE
-
- case LexerActionTypePopMode:
- return LexerPopModeActionINSTANCE
-
- case LexerActionTypePushMode:
- return NewLexerPushModeAction(data1)
-
- case LexerActionTypeSkip:
- return LexerSkipActionINSTANCE
-
- case LexerActionTypeType:
- return NewLexerTypeAction(data1)
-
- default:
- panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
deleted file mode 100644
index d5454d6d5d..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
-
-type IATNSimulator interface {
- SharedContextCache() *PredictionContextCache
- ATN() *ATN
- DecisionToDFA() []*DFA
-}
-
-type BaseATNSimulator struct {
- atn *ATN
- sharedContextCache *PredictionContextCache
- decisionToDFA []*DFA
-}
-
-func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
- b := new(BaseATNSimulator)
-
- b.atn = atn
- b.sharedContextCache = sharedContextCache
-
- return b
-}
-
-func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
- if b.sharedContextCache == nil {
- return context
- }
-
- visited := make(map[PredictionContext]PredictionContext)
-
- return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
-}
-
-func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
- return b.sharedContextCache
-}
-
-func (b *BaseATNSimulator) ATN() *ATN {
- return b.atn
-}
-
-func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
- return b.decisionToDFA
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
deleted file mode 100644
index 3835bb2e93..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "strconv"
-
-// Constants for serialization.
-const (
- ATNStateInvalidType = 0
- ATNStateBasic = 1
- ATNStateRuleStart = 2
- ATNStateBlockStart = 3
- ATNStatePlusBlockStart = 4
- ATNStateStarBlockStart = 5
- ATNStateTokenStart = 6
- ATNStateRuleStop = 7
- ATNStateBlockEnd = 8
- ATNStateStarLoopBack = 9
- ATNStateStarLoopEntry = 10
- ATNStatePlusLoopBack = 11
- ATNStateLoopEnd = 12
-
- ATNStateInvalidStateNumber = -1
-)
-
-var ATNStateInitialNumTransitions = 4
-
-type ATNState interface {
- GetEpsilonOnlyTransitions() bool
-
- GetRuleIndex() int
- SetRuleIndex(int)
-
- GetNextTokenWithinRule() *IntervalSet
- SetNextTokenWithinRule(*IntervalSet)
-
- GetATN() *ATN
- SetATN(*ATN)
-
- GetStateType() int
-
- GetStateNumber() int
- SetStateNumber(int)
-
- GetTransitions() []Transition
- SetTransitions([]Transition)
- AddTransition(Transition, int)
-
- String() string
- hash() int
-}
-
-type BaseATNState struct {
- // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
- NextTokenWithinRule *IntervalSet
-
- // atn is the current ATN.
- atn *ATN
-
- epsilonOnlyTransitions bool
-
- // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
- ruleIndex int
-
- stateNumber int
-
- stateType int
-
- // Track the transitions emanating from this ATN state.
- transitions []Transition
-}
-
-func NewBaseATNState() *BaseATNState {
- return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
-}
-
-func (as *BaseATNState) GetRuleIndex() int {
- return as.ruleIndex
-}
-
-func (as *BaseATNState) SetRuleIndex(v int) {
- as.ruleIndex = v
-}
-func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
- return as.epsilonOnlyTransitions
-}
-
-func (as *BaseATNState) GetATN() *ATN {
- return as.atn
-}
-
-func (as *BaseATNState) SetATN(atn *ATN) {
- as.atn = atn
-}
-
-func (as *BaseATNState) GetTransitions() []Transition {
- return as.transitions
-}
-
-func (as *BaseATNState) SetTransitions(t []Transition) {
- as.transitions = t
-}
-
-func (as *BaseATNState) GetStateType() int {
- return as.stateType
-}
-
-func (as *BaseATNState) GetStateNumber() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) SetStateNumber(stateNumber int) {
- as.stateNumber = stateNumber
-}
-
-func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
- return as.NextTokenWithinRule
-}
-
-func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
- as.NextTokenWithinRule = v
-}
-
-func (as *BaseATNState) hash() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) String() string {
- return strconv.Itoa(as.stateNumber)
-}
-
-func (as *BaseATNState) equals(other interface{}) bool {
- if ot, ok := other.(ATNState); ok {
- return as.stateNumber == ot.GetStateNumber()
- }
-
- return false
-}
-
-func (as *BaseATNState) isNonGreedyExitState() bool {
- return false
-}
-
-func (as *BaseATNState) AddTransition(trans Transition, index int) {
- if len(as.transitions) == 0 {
- as.epsilonOnlyTransitions = trans.getIsEpsilon()
- } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- as.epsilonOnlyTransitions = false
- }
-
- if index == -1 {
- as.transitions = append(as.transitions, trans)
- } else {
- as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
- // TODO: as.transitions.splice(index, 1, trans)
- }
-}
-
-type BasicState struct {
- *BaseATNState
-}
-
-func NewBasicState() *BasicState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBasic
-
- return &BasicState{BaseATNState: b}
-}
-
-type DecisionState interface {
- ATNState
-
- getDecision() int
- setDecision(int)
-
- getNonGreedy() bool
- setNonGreedy(bool)
-}
-
-type BaseDecisionState struct {
- *BaseATNState
- decision int
- nonGreedy bool
-}
-
-func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
-}
-
-func (s *BaseDecisionState) getDecision() int {
- return s.decision
-}
-
-func (s *BaseDecisionState) setDecision(b int) {
- s.decision = b
-}
-
-func (s *BaseDecisionState) getNonGreedy() bool {
- return s.nonGreedy
-}
-
-func (s *BaseDecisionState) setNonGreedy(b bool) {
- s.nonGreedy = b
-}
-
-type BlockStartState interface {
- DecisionState
-
- getEndState() *BlockEndState
- setEndState(*BlockEndState)
-}
-
-// BaseBlockStartState is the start of a regular (...) block.
-type BaseBlockStartState struct {
- *BaseDecisionState
- endState *BlockEndState
-}
-
-func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
-}
-
-func (s *BaseBlockStartState) getEndState() *BlockEndState {
- return s.endState
-}
-
-func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
- s.endState = b
-}
-
-type BasicBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewBasicBlockStartState() *BasicBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateBlockStart
-
- return &BasicBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &BasicBlockStartState{}
-
-// BlockEndState is a terminal node of a simple (a|b|c) block.
-type BlockEndState struct {
- *BaseATNState
- startState ATNState
-}
-
-func NewBlockEndState() *BlockEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBlockEnd
-
- return &BlockEndState{BaseATNState: b}
-}
-
-// RuleStopState is the last node in the ATN for a rule, unless that rule is the
-// start symbol. In that case, there is one transition to EOF. Later, we might
-// encode references to all calls to this rule to compute FOLLOW sets for error
-// handling.
-type RuleStopState struct {
- *BaseATNState
-}
-
-func NewRuleStopState() *RuleStopState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStop
-
- return &RuleStopState{BaseATNState: b}
-}
-
-type RuleStartState struct {
- *BaseATNState
- stopState ATNState
- isPrecedenceRule bool
-}
-
-func NewRuleStartState() *RuleStartState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStart
-
- return &RuleStartState{BaseATNState: b}
-}
-
-// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
-// transitions: one to the loop back to start of the block, and one to exit.
-type PlusLoopbackState struct {
- *BaseDecisionState
-}
-
-func NewPlusLoopbackState() *PlusLoopbackState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStatePlusLoopBack
-
- return &PlusLoopbackState{BaseDecisionState: b}
-}
-
-// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
-// decision state; we don't use it for code generation. Somebody might need it,
-// it is included for completeness. In reality, PlusLoopbackState is the real
-// decision-making node for A+.
-type PlusBlockStartState struct {
- *BaseBlockStartState
- loopBackState ATNState
-}
-
-func NewPlusBlockStartState() *PlusBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStatePlusBlockStart
-
- return &PlusBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &PlusBlockStartState{}
-
-// StarBlockStartState is the block that begins a closure loop.
-type StarBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewStarBlockStartState() *StarBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateStarBlockStart
-
- return &StarBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &StarBlockStartState{}
-
-type StarLoopbackState struct {
- *BaseATNState
-}
-
-func NewStarLoopbackState() *StarLoopbackState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateStarLoopBack
-
- return &StarLoopbackState{BaseATNState: b}
-}
-
-type StarLoopEntryState struct {
- *BaseDecisionState
- loopBackState ATNState
- precedenceRuleDecision bool
-}
-
-func NewStarLoopEntryState() *StarLoopEntryState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateStarLoopEntry
-
- // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{BaseDecisionState: b}
-}
-
-// LoopEndState marks the end of a * or + loop.
-type LoopEndState struct {
- *BaseATNState
- loopBackState ATNState
-}
-
-func NewLoopEndState() *LoopEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateLoopEnd
-
- return &LoopEndState{BaseATNState: b}
-}
-
-// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
-type TokensStartState struct {
- *BaseDecisionState
-}
-
-func NewTokensStartState() *TokensStartState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateTokenStart
-
- return &TokensStartState{BaseDecisionState: b}
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
deleted file mode 100644
index a7b48976b3..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// Represent the type of recognizer an ATN applies to.
-const (
- ATNTypeLexer = 0
- ATNTypeParser = 1
-)
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
deleted file mode 100644
index 70c1207f7f..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type CharStream interface {
- IntStream
- GetText(int, int) string
- GetTextFromTokens(start, end Token) string
- GetTextFromInterval(*Interval) string
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
deleted file mode 100644
index 330ff8f31f..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// TokenFactory creates CommonToken objects.
-type TokenFactory interface {
- Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
-}
-
-// CommonTokenFactory is the default TokenFactory implementation.
-type CommonTokenFactory struct {
- // copyText indicates whether CommonToken.setText should be called after
- // constructing tokens to explicitly set the text. This is useful for cases
- // where the input stream might not be able to provide arbitrary substrings of
- // text from the input after the lexer creates a token (e.g. the
- // implementation of CharStream.GetText in UnbufferedCharStream panics an
- // UnsupportedOperationException). Explicitly setting the token text allows
- // Token.GetText to be called at any time regardless of the input stream
- // implementation.
- //
- // The default value is false to avoid the performance and memory overhead of
- // copying text for every token unless explicitly requested.
- copyText bool
-}
-
-func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
- return &CommonTokenFactory{copyText: copyText}
-}
-
-// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
-// explicitly copy token text when constructing tokens.
-var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
-
-func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
- t := NewCommonToken(source, ttype, channel, start, stop)
-
- t.line = line
- t.column = column
-
- if text != "" {
- t.SetText(text)
- } else if c.copyText && source.charStream != nil {
- t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
- }
-
- return t
-}
-
-func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
- t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
- t.SetText(text)
-
- return t
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
deleted file mode 100644
index c90e9b8904..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
-)
-
-// CommonTokenStream is an implementation of TokenStream that loads tokens from
-// a TokenSource on-demand and places the tokens in a buffer to provide access
-// to any previous token by index. This token stream ignores the value of
-// Token.getChannel. If your parser requires the token stream filter tokens to
-// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
-// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
-type CommonTokenStream struct {
- channel int
-
- // fetchedEOF indicates whether the Token.EOF token has been fetched from
- // tokenSource and added to tokens. This field improves performance for the
- // following cases:
- //
- // consume: The lookahead check in consume to preven consuming the EOF symbol is
- // optimized by checking the values of fetchedEOF and p instead of calling LA.
- //
- // fetch: The check to prevent adding multiple EOF symbols into tokens is
- // trivial with bt field.
- fetchedEOF bool
-
- // index indexs into tokens of the current token (next token to consume).
- // tokens[p] should be LT(1). It is set to -1 when the stream is first
- // constructed or when SetTokenSource is called, indicating that the first token
- // has not yet been fetched from the token source. For additional information,
- // see the documentation of IntStream for a description of initializing methods.
- index int
-
- // tokenSource is the TokenSource from which tokens for the bt stream are
- // fetched.
- tokenSource TokenSource
-
- // tokens is all tokens fetched from the token source. The list is considered a
- // complete view of the input once fetchedEOF is set to true.
- tokens []Token
-}
-
-func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
- return &CommonTokenStream{
- channel: channel,
- index: -1,
- tokenSource: lexer,
- tokens: make([]Token, 0),
- }
-}
-
-func (c *CommonTokenStream) GetAllTokens() []Token {
- return c.tokens
-}
-
-func (c *CommonTokenStream) Mark() int {
- return 0
-}
-
-func (c *CommonTokenStream) Release(marker int) {}
-
-func (c *CommonTokenStream) reset() {
- c.Seek(0)
-}
-
-func (c *CommonTokenStream) Seek(index int) {
- c.lazyInit()
- c.index = c.adjustSeekIndex(index)
-}
-
-func (c *CommonTokenStream) Get(index int) Token {
- c.lazyInit()
-
- return c.tokens[index]
-}
-
-func (c *CommonTokenStream) Consume() {
- SkipEOFCheck := false
-
- if c.index >= 0 {
- if c.fetchedEOF {
- // The last token in tokens is EOF. Skip the check if p indexes any fetched.
- // token except the last.
- SkipEOFCheck = c.index < len(c.tokens)-1
- } else {
- // No EOF token in tokens. Skip the check if p indexes a fetched token.
- SkipEOFCheck = c.index < len(c.tokens)
- }
- } else {
- // Not yet initialized
- SkipEOFCheck = false
- }
-
- if !SkipEOFCheck && c.LA(1) == TokenEOF {
- panic("cannot consume EOF")
- }
-
- if c.Sync(c.index + 1) {
- c.index = c.adjustSeekIndex(c.index + 1)
- }
-}
-
-// Sync makes sure index i in tokens has a token and returns true if a token is
-// located at index i and otherwise false.
-func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
-
- if n > 0 {
- fetched := c.fetch(n)
- return fetched >= n
- }
-
- return true
-}
-
-// fetch adds n elements to buffer and returns the actual number of elements
-// added to the buffer.
-func (c *CommonTokenStream) fetch(n int) int {
- if c.fetchedEOF {
- return 0
- }
-
- for i := 0; i < n; i++ {
- t := c.tokenSource.NextToken()
-
- t.SetTokenIndex(len(c.tokens))
- c.tokens = append(c.tokens, t)
-
- if t.GetTokenType() == TokenEOF {
- c.fetchedEOF = true
-
- return i + 1
- }
- }
-
- return n
-}
-
-// GetTokens gets all tokens from start to stop inclusive.
-func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
- if start < 0 || stop < 0 {
- return nil
- }
-
- c.lazyInit()
-
- subset := make([]Token, 0)
-
- if stop >= len(c.tokens) {
- stop = len(c.tokens) - 1
- }
-
- for i := start; i < stop; i++ {
- t := c.tokens[i]
-
- if t.GetTokenType() == TokenEOF {
- break
- }
-
- if types == nil || types.contains(t.GetTokenType()) {
- subset = append(subset, t)
- }
- }
-
- return subset
-}
-
-func (c *CommonTokenStream) LA(i int) int {
- return c.LT(i).GetTokenType()
-}
-
-func (c *CommonTokenStream) lazyInit() {
- if c.index == -1 {
- c.setup()
- }
-}
-
-func (c *CommonTokenStream) setup() {
- c.Sync(0)
- c.index = c.adjustSeekIndex(0)
-}
-
-func (c *CommonTokenStream) GetTokenSource() TokenSource {
- return c.tokenSource
-}
-
-// SetTokenSource resets the c token stream by setting its token source.
-func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
- c.tokenSource = tokenSource
- c.tokens = make([]Token, 0)
- c.index = -1
-}
-
-// NextTokenOnChannel returns the index of the next token on channel given a
-// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between i and EOF.
-func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
- c.Sync(i)
-
- if i >= len(c.tokens) {
- return -1
- }
-
- token := c.tokens[i]
-
- for token.GetChannel() != c.channel {
- if token.GetTokenType() == TokenEOF {
- return -1
- }
-
- i++
- c.Sync(i)
- token = c.tokens[i]
- }
-
- return i
-}
-
-// previousTokenOnChannel returns the index of the previous token on channel
-// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
-// there are no tokens on channel between i and 0.
-func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
- for i >= 0 && c.tokens[i].GetChannel() != channel {
- i--
- }
-
- return i
-}
-
-// GetHiddenTokensToRight collects all tokens on a specified channel to the
-// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
-// or EOF. If channel is -1, it finds any non-default channel token.
-func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
- c.lazyInit()
-
- if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
- panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
- }
-
- nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
- from := tokenIndex + 1
-
- // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
- var to int
-
- if nextOnChannel == -1 {
- to = len(c.tokens) - 1
- } else {
- to = nextOnChannel
- }
-
- return c.filterForChannel(from, to, channel)
-}
-
-// GetHiddenTokensToLeft collects all tokens on channel to the left of the
-// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
-// -1, it finds any non default channel token.
-func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
- c.lazyInit()
-
- if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
- panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
- }
-
- prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
-
- if prevOnChannel == tokenIndex-1 {
- return nil
- }
-
- // If there are none on channel to the left and prevOnChannel == -1 then from = 0
- from := prevOnChannel + 1
- to := tokenIndex - 1
-
- return c.filterForChannel(from, to, channel)
-}
-
-func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
- hidden := make([]Token, 0)
-
- for i := left; i < right+1; i++ {
- t := c.tokens[i]
-
- if channel == -1 {
- if t.GetChannel() != LexerDefaultTokenChannel {
- hidden = append(hidden, t)
- }
- } else if t.GetChannel() == channel {
- hidden = append(hidden, t)
- }
- }
-
- if len(hidden) == 0 {
- return nil
- }
-
- return hidden
-}
-
-func (c *CommonTokenStream) GetSourceName() string {
- return c.tokenSource.GetSourceName()
-}
-
-func (c *CommonTokenStream) Size() int {
- return len(c.tokens)
-}
-
-func (c *CommonTokenStream) Index() int {
- return c.index
-}
-
-func (c *CommonTokenStream) GetAllText() string {
- return c.GetTextFromInterval(nil)
-}
-
-func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
- if start == nil || end == nil {
- return ""
- }
-
- return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
-}
-
-func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
- return c.GetTextFromInterval(interval.GetSourceInterval())
-}
-
-func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
- c.lazyInit()
- c.Fill()
-
- if interval == nil {
- interval = NewInterval(0, len(c.tokens)-1)
- }
-
- start := interval.Start
- stop := interval.Stop
-
- if start < 0 || stop < 0 {
- return ""
- }
-
- if stop >= len(c.tokens) {
- stop = len(c.tokens) - 1
- }
-
- s := ""
-
- for i := start; i < stop+1; i++ {
- t := c.tokens[i]
-
- if t.GetTokenType() == TokenEOF {
- break
- }
-
- s += t.GetText()
- }
-
- return s
-}
-
-// Fill gets all tokens from the lexer until EOF.
-func (c *CommonTokenStream) Fill() {
- c.lazyInit()
-
- for c.fetch(1000) == 1000 {
- continue
- }
-}
-
-func (c *CommonTokenStream) adjustSeekIndex(i int) int {
- return c.NextTokenOnChannel(i, c.channel)
-}
-
-func (c *CommonTokenStream) LB(k int) Token {
- if k == 0 || c.index-k < 0 {
- return nil
- }
-
- i := c.index
- n := 1
-
- // Find k good tokens looking backward
- for n <= k {
- // Skip off-channel tokens
- i = c.previousTokenOnChannel(i-1, c.channel)
- n++
- }
-
- if i < 0 {
- return nil
- }
-
- return c.tokens[i]
-}
-
-func (c *CommonTokenStream) LT(k int) Token {
- c.lazyInit()
-
- if k == 0 {
- return nil
- }
-
- if k < 0 {
- return c.LB(-k)
- }
-
- i := c.index
- n := 1 // We know tokens[n] is valid
-
- // Find k good tokens
- for n < k {
- // Skip off-channel tokens, but make sure to not look past EOF
- if c.Sync(i + 1) {
- i = c.NextTokenOnChannel(i+1, c.channel)
- }
-
- n++
- }
-
- return c.tokens[i]
-}
-
-// getNumberOfOnChannelTokens counts EOF once.
-func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
- var n int
-
- c.Fill()
-
- for i := 0; i < len(c.tokens); i++ {
- t := c.tokens[i]
-
- if t.GetChannel() == c.channel {
- n++
- }
-
- if t.GetTokenType() == TokenEOF {
- break
- }
- }
-
- return n
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
deleted file mode 100644
index d55a2a87d5..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "sort"
-)
-
-type DFA struct {
- // atnStartState is the ATN state in which this was created
- atnStartState DecisionState
-
- decision int
-
- // states is all the DFA states. Use Map to get the old state back; Set can only
- // indicate whether it is there.
- states map[int]*DFAState
-
- s0 *DFAState
-
- // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
- // True if the DFA is for a precedence decision and false otherwise.
- precedenceDfa bool
-}
-
-func NewDFA(atnStartState DecisionState, decision int) *DFA {
- dfa := &DFA{
- atnStartState: atnStartState,
- decision: decision,
- states: make(map[int]*DFAState),
- }
- if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
- dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
- dfa.s0.isAcceptState = false
- dfa.s0.requiresFullContext = false
- }
- return dfa
-}
-
-// getPrecedenceStartState gets the start state for the current precedence and
-// returns the start state corresponding to the specified precedence if a start
-// state exists for the specified precedence and nil otherwise. d must be a
-// precedence DFA. See also isPrecedenceDfa.
-func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
- if !d.getPrecedenceDfa() {
- panic("only precedence DFAs may contain a precedence start state")
- }
-
- // s0.edges is never nil for a precedence DFA
- if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
- return nil
- }
-
- return d.getS0().getIthEdge(precedence)
-}
-
-// setPrecedenceStartState sets the start state for the current precedence. d
-// must be a precedence DFA. See also isPrecedenceDfa.
-func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
- if !d.getPrecedenceDfa() {
- panic("only precedence DFAs may contain a precedence start state")
- }
-
- if precedence < 0 {
- return
- }
-
- // Synchronization on s0 here is ok. When the DFA is turned into a
- // precedence DFA, s0 will be initialized once and not updated again. s0.edges
- // is never nil for a precedence DFA.
- s0 := d.getS0()
- if precedence >= s0.numEdges() {
- edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
- s0.setEdges(edges)
- d.setS0(s0)
- }
-
- s0.setIthEdge(precedence, startState)
-}
-
-func (d *DFA) getPrecedenceDfa() bool {
- return d.precedenceDfa
-}
-
-// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
-// from the current DFA configuration, then d.states is cleared, the initial
-// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
-// store the start states for individual precedence values if precedenceDfa is
-// true or nil otherwise, and d.precedenceDfa is updated.
-func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
- if d.getPrecedenceDfa() != precedenceDfa {
- d.setStates(make(map[int]*DFAState))
-
- if precedenceDfa {
- precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
-
- precedenceState.setEdges(make([]*DFAState, 0))
- precedenceState.isAcceptState = false
- precedenceState.requiresFullContext = false
- d.setS0(precedenceState)
- } else {
- d.setS0(nil)
- }
-
- d.precedenceDfa = precedenceDfa
- }
-}
-
-func (d *DFA) getS0() *DFAState {
- return d.s0
-}
-
-func (d *DFA) setS0(s *DFAState) {
- d.s0 = s
-}
-
-func (d *DFA) getState(hash int) (*DFAState, bool) {
- s, ok := d.states[hash]
- return s, ok
-}
-
-func (d *DFA) setStates(states map[int]*DFAState) {
- d.states = states
-}
-
-func (d *DFA) setState(hash int, state *DFAState) {
- d.states[hash] = state
-}
-
-func (d *DFA) numStates() int {
- return len(d.states)
-}
-
-type dfaStateList []*DFAState
-
-func (d dfaStateList) Len() int { return len(d) }
-func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
-func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
-
-// sortedStates returns the states in d sorted by their state number.
-func (d *DFA) sortedStates() []*DFAState {
- vs := make([]*DFAState, 0, len(d.states))
-
- for _, v := range d.states {
- vs = append(vs, v)
- }
-
- sort.Sort(dfaStateList(vs))
-
- return vs
-}
-
-func (d *DFA) String(literalNames []string, symbolicNames []string) string {
- if d.getS0() == nil {
- return ""
- }
-
- return NewDFASerializer(d, literalNames, symbolicNames).String()
-}
-
-func (d *DFA) ToLexerString() string {
- if d.getS0() == nil {
- return ""
- }
-
- return NewLexerDFASerializer(d).String()
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
deleted file mode 100644
index bf2ccc06cd..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// DFASerializer is a DFA walker that knows how to dump them to serialized
-// strings.
-type DFASerializer struct {
- dfa *DFA
- literalNames []string
- symbolicNames []string
-}
-
-func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
- if literalNames == nil {
- literalNames = make([]string, 0)
- }
-
- if symbolicNames == nil {
- symbolicNames = make([]string, 0)
- }
-
- return &DFASerializer{
- dfa: dfa,
- literalNames: literalNames,
- symbolicNames: symbolicNames,
- }
-}
-
-func (d *DFASerializer) String() string {
- if d.dfa.getS0() == nil {
- return ""
- }
-
- buf := ""
- states := d.dfa.sortedStates()
-
- for _, s := range states {
- if s.edges != nil {
- n := len(s.edges)
-
- for j := 0; j < n; j++ {
- t := s.edges[j]
-
- if t != nil && t.stateNumber != 0x7FFFFFFF {
- buf += d.GetStateString(s)
- buf += "-"
- buf += d.getEdgeLabel(j)
- buf += "->"
- buf += d.GetStateString(t)
- buf += "\n"
- }
- }
- }
- }
-
- if len(buf) == 0 {
- return ""
- }
-
- return buf
-}
-
-func (d *DFASerializer) getEdgeLabel(i int) string {
- if i == 0 {
- return "EOF"
- } else if d.literalNames != nil && i-1 < len(d.literalNames) {
- return d.literalNames[i-1]
- } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
- return d.symbolicNames[i-1]
- }
-
- return strconv.Itoa(i - 1)
-}
-
-func (d *DFASerializer) GetStateString(s *DFAState) string {
- var a, b string
-
- if s.isAcceptState {
- a = ":"
- }
-
- if s.requiresFullContext {
- b = "^"
- }
-
- baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
-
- if s.isAcceptState {
- if s.predicates != nil {
- return baseStateStr + "=>" + fmt.Sprint(s.predicates)
- }
-
- return baseStateStr + "=>" + fmt.Sprint(s.prediction)
- }
-
- return baseStateStr
-}
-
-type LexerDFASerializer struct {
- *DFASerializer
-}
-
-func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
- return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
-}
-
-func (l *LexerDFASerializer) getEdgeLabel(i int) string {
- var sb strings.Builder
- sb.Grow(6)
- sb.WriteByte('\'')
- sb.WriteRune(rune(i))
- sb.WriteByte('\'')
- return sb.String()
-}
-
-func (l *LexerDFASerializer) String() string {
- if l.dfa.getS0() == nil {
- return ""
- }
-
- buf := ""
- states := l.dfa.sortedStates()
-
- for i := 0; i < len(states); i++ {
- s := states[i]
-
- if s.edges != nil {
- n := len(s.edges)
-
- for j := 0; j < n; j++ {
- t := s.edges[j]
-
- if t != nil && t.stateNumber != 0x7FFFFFFF {
- buf += l.GetStateString(s)
- buf += "-"
- buf += l.getEdgeLabel(j)
- buf += "->"
- buf += l.GetStateString(t)
- buf += "\n"
- }
- }
- }
- }
-
- if len(buf) == 0 {
- return ""
- }
-
- return buf
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
deleted file mode 100644
index 970ed19865..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-// PredPrediction maps a predicate to a predicted alternative.
-type PredPrediction struct {
- alt int
- pred SemanticContext
-}
-
-func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
- return &PredPrediction{alt: alt, pred: pred}
-}
-
-func (p *PredPrediction) String() string {
- return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
-}
-
-// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
-// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
-// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1a2..an, the DFA is in a state that represents the
-// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an." In conventional NFA-to-DFA
-// conversion, therefore, the subset T would be a bitset representing the set of
-// states the ATN could be in. We need to track the alt predicted by each state
-// as well, however. More importantly, we need to maintain a stack of states,
-// tracking the closure operations as they jump from rule to rule, emulating
-// rule invocations (method calls). I have to add a stack to simulate the proper
-// lookahead sequences for the underlying LL grammar from which the ATN was
-// derived.
-//
-// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
-// state (ala normal conversion) and a RuleContext describing the chain of rules
-// (if any) followed to arrive at that state.
-//
-// A DFAState may have multiple references to a particular state, but with
-// different ATN contexts (with same or different alts) meaning that state was
-// reached via a different set of rule invocations.
-type DFAState struct {
- stateNumber int
- configs ATNConfigSet
-
- // edges elements point to the target of the symbol. Shift up by 1 so (-1)
- // Token.EOF maps to the first element.
- edges []*DFAState
-
- isAcceptState bool
-
- // prediction is the ttype we match or alt we predict if the state is accept.
- // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
- // requiresFullContext.
- prediction int
-
- lexerActionExecutor *LexerActionExecutor
-
- // requiresFullContext indicates it was created during an SLL prediction that
- // discovered a conflict between the configurations in the state. Future
- // ParserATNSimulator.execATN invocations immediately jump doing
- // full context prediction if true.
- requiresFullContext bool
-
- // predicates is the predicates associated with the ATN configurations of the
- // DFA state during SLL parsing. When we have predicates, requiresFullContext
- // is false, since full context prediction evaluates predicates on-the-fly. If
- // d is
- // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
- //
- // We only use these for non-requiresFullContext but conflicting states. That
- // means we know from the context (it's $ or we don't dip into outer context)
- // that it's an ambiguity not a conflict.
- //
- // This list is computed by
- // ParserATNSimulator.predicateDFAState.
- predicates []*PredPrediction
-}
-
-func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
- if configs == nil {
- configs = NewBaseATNConfigSet(false)
- }
-
- return &DFAState{configs: configs, stateNumber: stateNumber}
-}
-
-// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
-func (d *DFAState) GetAltSet() Set {
- alts := newArray2DHashSet(nil, nil)
-
- if d.configs != nil {
- for _, c := range d.configs.GetItems() {
- alts.Add(c.GetAlt())
- }
- }
-
- if alts.Len() == 0 {
- return nil
- }
-
- return alts
-}
-
-func (d *DFAState) getEdges() []*DFAState {
- return d.edges
-}
-
-func (d *DFAState) numEdges() int {
- return len(d.edges)
-}
-
-func (d *DFAState) getIthEdge(i int) *DFAState {
- return d.edges[i]
-}
-
-func (d *DFAState) setEdges(newEdges []*DFAState) {
- d.edges = newEdges
-}
-
-func (d *DFAState) setIthEdge(i int, edge *DFAState) {
- d.edges[i] = edge
-}
-
-func (d *DFAState) setPrediction(v int) {
- d.prediction = v
-}
-
-// equals returns whether d equals other. Two DFAStates are equal if their ATN
-// configuration sets are the same. This method is used to see if a state
-// already exists.
-//
-// Because the number of alternatives and number of ATN configurations are
-// finite, there is a finite number of DFA states that can be processed. This is
-// necessary to show that the algorithm terminates.
-//
-// Cannot test the DFA state numbers here because in
-// ParserATNSimulator.addDFAState we need to know if any other state exists that
-// has d exact set of ATN configurations. The stateNumber is irrelevant.
-func (d *DFAState) equals(other interface{}) bool {
- if d == other {
- return true
- } else if _, ok := other.(*DFAState); !ok {
- return false
- }
-
- return d.configs.Equals(other.(*DFAState).configs)
-}
-
-func (d *DFAState) String() string {
- var s string
- if d.isAcceptState {
- if d.predicates != nil {
- s = "=>" + fmt.Sprint(d.predicates)
- } else {
- s = "=>" + fmt.Sprint(d.prediction)
- }
- }
-
- return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
-}
-
-func (d *DFAState) hash() int {
- h := murmurInit(7)
- h = murmurUpdate(h, d.configs.hash())
- return murmurFinish(h, 1)
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
deleted file mode 100644
index 1fec43d9dc..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
-)
-
-//
-// This implementation of {@link ANTLRErrorListener} can be used to identify
-// certain potential correctness and performance problems in grammars. "reports"
-// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
-// message.
-//
-//
-// Ambiguities : These are cases where more than one path through the
-// grammar can Match the input.
-// Weak context sensitivity : These are cases where full-context
-// prediction resolved an SLL conflict to a unique alternative which equaled the
-// minimum alternative of the SLL conflict.
-// Strong (forced) context sensitivity : These are cases where the
-// full-context prediction resolved an SLL conflict to a unique alternative,
-// and the minimum alternative of the SLL conflict was found to not be
-// a truly viable alternative. Two-stage parsing cannot be used for inputs where
-// d situation occurs.
-//
-
-type DiagnosticErrorListener struct {
- *DefaultErrorListener
-
- exactOnly bool
-}
-
-func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
-
- n := new(DiagnosticErrorListener)
-
- // whether all ambiguities or only exact ambiguities are Reported.
- n.exactOnly = exactOnly
- return n
-}
-
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- if d.exactOnly && !exact {
- return
- }
- msg := "reportAmbiguity d=" +
- d.getDecisionDescription(recognizer, dfa) +
- ": ambigAlts=" +
- d.getConflictingAlts(ambigAlts, configs).String() +
- ", input='" +
- recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
- recognizer.NotifyErrorListeners(msg, nil, nil)
-}
-
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
-
- msg := "reportAttemptingFullContext d=" +
- d.getDecisionDescription(recognizer, dfa) +
- ", input='" +
- recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
- recognizer.NotifyErrorListeners(msg, nil, nil)
-}
-
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
- msg := "reportContextSensitivity d=" +
- d.getDecisionDescription(recognizer, dfa) +
- ", input='" +
- recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
- recognizer.NotifyErrorListeners(msg, nil, nil)
-}
-
-func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
- decision := dfa.decision
- ruleIndex := dfa.atnStartState.GetRuleIndex()
-
- ruleNames := recognizer.GetRuleNames()
- if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
- return strconv.Itoa(decision)
- }
- ruleName := ruleNames[ruleIndex]
- if ruleName == "" {
- return strconv.Itoa(decision)
- }
- return strconv.Itoa(decision) + " (" + ruleName + ")"
-}
-
-//
-// Computes the set of conflicting or ambiguous alternatives from a
-// configuration set, if that information was not already provided by the
-// parser.
-//
-// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
-// Reported by the parser.
-// @param configs The conflicting or ambiguous configuration set.
-// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
-// returns the set of alternatives represented in {@code configs}.
-//
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
- if ReportedAlts != nil {
- return ReportedAlts
- }
- result := NewBitSet()
- for _, c := range set.GetItems() {
- result.add(c.GetAlt())
- }
-
- return result
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
deleted file mode 100644
index 028e1a9d7f..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "os"
- "strconv"
-)
-
-// Provides an empty default implementation of {@link ANTLRErrorListener}. The
-// default implementation of each method does nothing, but can be overridden as
-// necessary.
-
-type ErrorListener interface {
- SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
-}
-
-type DefaultErrorListener struct {
-}
-
-func NewDefaultErrorListener() *DefaultErrorListener {
- return new(DefaultErrorListener)
-}
-
-func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
-}
-
-func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
-}
-
-type ConsoleErrorListener struct {
- *DefaultErrorListener
-}
-
-func NewConsoleErrorListener() *ConsoleErrorListener {
- return new(ConsoleErrorListener)
-}
-
-//
-// Provides a default instance of {@link ConsoleErrorListener}.
-//
-var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-
-//
-// {@inheritDoc}
-//
-//
-// This implementation prints messages to {@link System//err} containing the
-// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
-// the following format.
-//
-//
-// line line :charPositionInLine msg
-//
-//
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
-}
-
-type ProxyErrorListener struct {
- *DefaultErrorListener
- delegates []ErrorListener
-}
-
-func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
- if delegates == nil {
- panic("delegates is not provided")
- }
- l := new(ProxyErrorListener)
- l.delegates = delegates
- return l
-}
-
-func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- for _, d := range p.delegates {
- d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
- }
-}
-
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
deleted file mode 100644
index c4080dbfd1..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
+++ /dev/null
@@ -1,762 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-type ErrorStrategy interface {
- reset(Parser)
- RecoverInline(Parser) Token
- Recover(Parser, RecognitionException)
- Sync(Parser)
- InErrorRecoveryMode(Parser) bool
- ReportError(Parser, RecognitionException)
- ReportMatch(Parser)
-}
-
-// This is the default implementation of {@link ANTLRErrorStrategy} used for
-// error Reporting and recovery in ANTLR parsers.
-//
-type DefaultErrorStrategy struct {
- errorRecoveryMode bool
- lastErrorIndex int
- lastErrorStates *IntervalSet
-}
-
-var _ ErrorStrategy = &DefaultErrorStrategy{}
-
-func NewDefaultErrorStrategy() *DefaultErrorStrategy {
-
- d := new(DefaultErrorStrategy)
-
- // Indicates whether the error strategy is currently "recovering from an
- // error". This is used to suppress Reporting multiple error messages while
- // attempting to recover from a detected syntax error.
- //
- // @see //InErrorRecoveryMode
- //
- d.errorRecoveryMode = false
-
- // The index into the input stream where the last error occurred.
- // This is used to prevent infinite loops where an error is found
- // but no token is consumed during recovery...another error is found,
- // ad nauseum. This is a failsafe mechanism to guarantee that at least
- // one token/tree node is consumed for two errors.
- //
- d.lastErrorIndex = -1
- d.lastErrorStates = nil
- return d
-}
-
-// The default implementation simply calls {@link //endErrorCondition} to
-// ensure that the handler is not in error recovery mode.
-func (d *DefaultErrorStrategy) reset(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-//
-// This method is called to enter error recovery mode when a recognition
-// exception is Reported.
-//
-// @param recognizer the parser instance
-//
-func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = true
-}
-
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
- return d.errorRecoveryMode
-}
-
-//
-// This method is called to leave error recovery mode after recovering from
-// a recognition exception.
-//
-// @param recognizer
-//
-func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = false
- d.lastErrorStates = nil
- d.lastErrorIndex = -1
-}
-
-//
-// {@inheritDoc}
-//
-// The default implementation simply calls {@link //endErrorCondition}.
-//
-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-//
-// {@inheritDoc}
-//
-// The default implementation returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
-// and dispatches the Reporting task based on the runtime type of {@code e}
-// according to the following table.
-//
-//
-// {@link NoViableAltException}: Dispatches the call to
-// {@link //ReportNoViableAlternative}
-// {@link InputMisMatchException}: Dispatches the call to
-// {@link //ReportInputMisMatch}
-// {@link FailedPredicateException}: Dispatches the call to
-// {@link //ReportFailedPredicate}
-// All other types: calls {@link Parser//NotifyErrorListeners} to Report
-// the exception
-//
-//
-func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
- // if we've already Reported an error and have not Matched a token
- // yet successfully, don't Report any errors.
- if d.InErrorRecoveryMode(recognizer) {
- return // don't Report spurious errors
- }
- d.beginErrorCondition(recognizer)
-
- switch t := e.(type) {
- default:
- fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
- // fmt.Println(e.stack)
- recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
- case *NoViableAltException:
- d.ReportNoViableAlternative(recognizer, t)
- case *InputMisMatchException:
- d.ReportInputMisMatch(recognizer, t)
- case *FailedPredicateException:
- d.ReportFailedPredicate(recognizer, t)
- }
-}
-
-// {@inheritDoc}
-//
-// The default implementation reSynchronizes the parser by consuming tokens
-// until we find one in the reSynchronization set--loosely the set of tokens
-// that can follow the current rule.
-//
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
-
- if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
- d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
- // uh oh, another error at same token index and previously-Visited
- // state in ATN must be a case where LT(1) is in the recovery
- // token set so nothing got consumed. Consume a single token
- // at least to prevent an infinite loop d is a failsafe.
- recognizer.Consume()
- }
- d.lastErrorIndex = recognizer.GetInputStream().Index()
- if d.lastErrorStates == nil {
- d.lastErrorStates = NewIntervalSet()
- }
- d.lastErrorStates.addOne(recognizer.GetState())
- followSet := d.getErrorRecoverySet(recognizer)
- d.consumeUntil(recognizer, followSet)
-}
-
-// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
-// that the current lookahead symbol is consistent with what were expecting
-// at d point in the ATN. You can call d anytime but ANTLR only
-// generates code to check before subrules/loops and each iteration.
-//
-// Implements Jim Idle's magic Sync mechanism in closures and optional
-// subrules. E.g.,
-//
-//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-//
-//
-// At the start of a sub rule upon error, {@link //Sync} performs single
-// token deletion, if possible. If it can't do that, it bails on the current
-// rule and uses the default error recovery, which consumes until the
-// reSynchronization set of the current rule.
-//
-// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
-// with an empty alternative), then the expected set includes what follows
-// the subrule.
-//
-// During loop iteration, it consumes until it sees a token that can start a
-// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible.
-//
-// ORIGINS
-//
-// Previous versions of ANTLR did a poor job of their recovery within loops.
-// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule
-//
-//
-// classfunc : 'class' ID '{' member* '}'
-//
-//
-// input with an extra token between members would force the parser to
-// consume until it found the next class definition rather than the next
-// member definition of the current class.
-//
-// This functionality cost a little bit of effort because the parser has to
-// compare token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off d
-// functionality by simply overriding d method as a blank { }.
-//
-func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
- // If already recovering, don't try to Sync
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- la := recognizer.GetTokenStream().LA(1)
-
- // try cheaper subset first might get lucky. seems to shave a wee bit off
- nextTokens := recognizer.GetATN().NextTokens(s, nil)
- if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
- return
- }
-
- switch s.GetStateType() {
- case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
- // Report error and recover if possible
- if d.SingleTokenDeletion(recognizer) != nil {
- return
- }
- panic(NewInputMisMatchException(recognizer))
- case ATNStatePlusLoopBack, ATNStateStarLoopBack:
- d.ReportUnwantedToken(recognizer)
- expecting := NewIntervalSet()
- expecting.addSet(recognizer.GetExpectedTokens())
- whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
- d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
- default:
- // do nothing if we can't identify the exact kind of ATN state
- }
-}
-
-// This is called by {@link //ReportError} when the exception is a
-// {@link NoViableAltException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-//
-func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
- tokens := recognizer.GetTokenStream()
- var input string
- if tokens != nil {
- if e.startToken.GetTokenType() == TokenEOF {
- input = ""
- } else {
- input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
- }
- } else {
- input = ""
- }
- msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-//
-// This is called by {@link //ReportError} when the exception is an
-// {@link InputMisMatchException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-//
-func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
- msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
- " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-//
-// This is called by {@link //ReportError} when the exception is a
-// {@link FailedPredicateException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-//
-func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
- ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
- msg := "rule " + ruleName + " " + e.message
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This method is called to Report a syntax error which requires the removal
-// of a token from the input stream. At the time d method is called, the
-// erroneous symbol is current {@code LT(1)} symbol and has not yet been
-// removed from the input stream. When d method returns,
-// {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenDeletion} identifies
-// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-//
-func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- tokenName := d.GetTokenErrorDisplay(t)
- expecting := d.GetExpectedTokens(recognizer)
- msg := "extraneous input " + tokenName + " expecting " +
- expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// This method is called to Report a syntax error which requires the
-// insertion of a missing token into the input stream. At the time d
-// method is called, the missing token has not yet been inserted. When d
-// method returns, {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenInsertion} identifies
-// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-//
-func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
- " at " + d.GetTokenErrorDisplay(t)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// The default implementation attempts to recover from the mismatched input
-// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, d method panics an
-// {@link InputMisMatchException}.
-//
-// EXTRA TOKEN (single token deletion)
-//
-// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
-// right token, however, then assume {@code LA(1)} is some extra spurious
-// token and delete it. Then consume and return the next token (which was
-// the {@code LA(2)} token) as the successful result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenDeletion}.
-//
-// MISSING TOKEN (single token insertion)
-//
-// If current token (at {@code LA(1)}) is consistent with what could come
-// after the expected {@code LA(1)} token, then assume the token is missing
-// and use the parser's {@link TokenFactory} to create it on the fly. The
-// "insertion" is performed by returning the created token as the successful
-// result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenInsertion}.
-//
-// EXAMPLE
-//
-// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
-// the parser returns from the nested call to {@code expr}, it will have
-// call chain:
-//
-//
-// stat &rarr expr &rarr atom
-//
-//
-// and it will be trying to Match the {@code ')'} at d point in the
-// derivation:
-//
-//
-// => ID '=' '(' INT ')' ('+' atom)* ''
-// ^
-//
-//
-// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
-// is in the set of tokens that can follow the {@code ')'} token reference
-// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-//
-func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
- // SINGLE TOKEN DELETION
- MatchedSymbol := d.SingleTokenDeletion(recognizer)
- if MatchedSymbol != nil {
- // we have deleted the extra token.
- // now, move past ttype token as if all were ok
- recognizer.Consume()
- return MatchedSymbol
- }
- // SINGLE TOKEN INSERTION
- if d.SingleTokenInsertion(recognizer) {
- return d.GetMissingSymbol(recognizer)
- }
- // even that didn't work must panic the exception
- panic(NewInputMisMatchException(recognizer))
-}
-
-//
-// This method implements the single-token insertion inline error recovery
-// strategy. It is called by {@link //recoverInline} if the single-token
-// deletion strategy fails to recover from the mismatched input. If this
-// method returns {@code true}, {@code recognizer} will be in error recovery
-// mode.
-//
-// This method determines whether or not single-token insertion is viable by
-// checking if the {@code LA(1)} input symbol could be successfully Matched
-// if it were instead the {@code LA(2)} symbol. If d method returns
-// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce d behavior.
-//
-// @param recognizer the parser instance
-// @return {@code true} if single-token insertion is a viable recovery
-// strategy for the current mismatched input, otherwise {@code false}
-//
-func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
- currentSymbolType := recognizer.GetTokenStream().LA(1)
- // if current token is consistent with what could come after current
- // ATN state, then we know we're missing a token error recovery
- // is free to conjure up and insert the missing token
- atn := recognizer.GetInterpreter().atn
- currentState := atn.states[recognizer.GetState()]
- next := currentState.GetTransitions()[0].getTarget()
- expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
- if expectingAtLL2.contains(currentSymbolType) {
- d.ReportMissingToken(recognizer)
- return true
- }
-
- return false
-}
-
-// This method implements the single-token deletion inline error recovery
-// strategy. It is called by {@link //recoverInline} to attempt to recover
-// from mismatched input. If this method returns nil, the parser and error
-// handler state will not have changed. If this method returns non-nil,
-// {@code recognizer} will not be in error recovery mode since the
-// returned token was a successful Match.
-//
-// If the single-token deletion is successful, d method calls
-// {@link //ReportUnwantedToken} to Report the error, followed by
-// {@link Parser//consume} to actually "delete" the extraneous token. Then,
-// before returning {@link //ReportMatch} is called to signal a successful
-// Match.
-//
-// @param recognizer the parser instance
-// @return the successfully Matched {@link Token} instance if single-token
-// deletion successfully recovers from the mismatched input, otherwise
-// {@code nil}
-//
-func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
- NextTokenType := recognizer.GetTokenStream().LA(2)
- expecting := d.GetExpectedTokens(recognizer)
- if expecting.contains(NextTokenType) {
- d.ReportUnwantedToken(recognizer)
- // print("recoverFromMisMatchedToken deleting " \
- // + str(recognizer.GetTokenStream().LT(1)) \
- // + " since " + str(recognizer.GetTokenStream().LT(2)) \
- // + " is what we want", file=sys.stderr)
- recognizer.Consume() // simply delete extra token
- // we want to return the token we're actually Matching
- MatchedSymbol := recognizer.GetCurrentToken()
- d.ReportMatch(recognizer) // we know current token is correct
- return MatchedSymbol
- }
-
- return nil
-}
-
-// Conjure up a missing token during error recovery.
-//
-// The recognizer attempts to recover from single missing
-// symbols. But, actions might refer to that missing symbol.
-// For example, x=ID {f($x)}. The action clearly assumes
-// that there has been an identifier Matched previously and that
-// $x points at that token. If that token is missing, but
-// the next token in the stream is what we want we assume that
-// d token is missing and we keep going. Because we
-// have to return some token to replace the missing token,
-// we have to conjure one up. This method gives the user control
-// over the tokens returned for missing tokens. Mostly,
-// you will want to create something special for identifier
-// tokens. For literals such as '{' and ',', the default
-// action in the parser or tree parser works. It simply creates
-// a CommonToken of the appropriate type. The text will be the token.
-// If you change what tokens must be created by the lexer,
-// override d method to create the appropriate tokens.
-//
-func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
- currentSymbol := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- expectedTokenType := expecting.first()
- var tokenText string
-
- if expectedTokenType == TokenEOF {
- tokenText = ""
- } else {
- ln := recognizer.GetLiteralNames()
- if expectedTokenType > 0 && expectedTokenType < len(ln) {
- tokenText = ""
- } else {
- tokenText = "" // TODO matches the JS impl
- }
- }
- current := currentSymbol
- lookback := recognizer.GetTokenStream().LT(-1)
- if current.GetTokenType() == TokenEOF && lookback != nil {
- current = lookback
- }
-
- tf := recognizer.GetTokenFactory()
-
- return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
-}
-
-func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
- return recognizer.GetExpectedTokens()
-}
-
-// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-//
-func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- return d.escapeWSAndQuote(s)
-}
-
-func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- return "'" + s + "'"
-}
-
-// Compute the error recovery set for the current rule. During
-// rule invocation, the parser pushes the set of tokens that can
-// follow that rule reference on the stack d amounts to
-// computing FIRST of what follows the rule reference in the
-// enclosing rule. See LinearApproximator.FIRST().
-// This local follow set only includes tokens
-// from within the rule i.e., the FIRST computation done by
-// ANTLR stops at the end of a rule.
-//
-// EXAMPLE
-//
-// When you find a "no viable alt exception", the input is not
-// consistent with any of the alternatives for rule r. The best
-// thing to do is to consume tokens until you see something that
-// can legally follow a call to r//or* any rule that called r.
-// You don't want the exact set of viable next tokens because the
-// input might just be missing a token--you might consume the
-// rest of the input looking for one of the missing tokens.
-//
-// Consider grammar:
-//
-// a : '[' b ']'
-// | '(' b ')'
-//
-// b : c '^' INT
-// c : ID
-// | INT
-//
-//
-// At each rule invocation, the set of tokens that could follow
-// that rule is pushed on a stack. Here are the various
-// context-sensitive follow sets:
-//
-// FOLLOW(b1_in_a) = FIRST(']') = ']'
-// FOLLOW(b2_in_a) = FIRST(')') = ')'
-// FOLLOW(c_in_b) = FIRST('^') = '^'
-//
-// Upon erroneous input "[]", the call chain is
-//
-// a -> b -> c
-//
-// and, hence, the follow context stack is:
-//
-// depth follow set start of rule execution
-// 0 a (from main())
-// 1 ']' b
-// 2 '^' c
-//
-// Notice that ')' is not included, because b would have to have
-// been called from a different context in rule a for ')' to be
-// included.
-//
-// For error recovery, we cannot consider FOLLOW(c)
-// (context-sensitive or otherwise). We need the combined set of
-// all context-sensitive FOLLOW sets--the set of all tokens that
-// could follow any reference in the call chain. We need to
-// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
-// we reSync'd to that token, we'd consume until EOF. We need to
-// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-// In this case, for input "[]", LA(1) is ']' and in the set, so we would
-// not consume anything. After printing an error, rule c would
-// return normally. Rule b would not find the required '^' though.
-// At this point, it gets a mismatched token error and panics an
-// exception (since LA(1) is not in the viable following token
-// set). The rule exception handler tries to recover, but finds
-// the same recovery set and doesn't consume anything. Rule b
-// exits normally returning to rule a. Now it finds the ']' (and
-// with the successful Match exits errorRecovery mode).
-//
-// So, you can see that the parser walks up the call chain looking
-// for the token that was a member of the recovery set.
-//
-// Errors are not generated in errorRecovery mode.
-//
-// ANTLR's error recovery mechanism is based upon original ideas:
-//
-// "Algorithms + Data Structures = Programs" by Niklaus Wirth
-//
-// and
-//
-// "A note on error recovery in recursive descent parsers":
-// http://portal.acm.org/citation.cfm?id=947902.947905
-//
-// Later, Josef Grosch had some good ideas:
-//
-// "Efficient and Comfortable Error Recovery in Recursive Descent
-// Parsers":
-// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-//
-// Like Grosch I implement context-sensitive FOLLOW sets that are combined
-// at run-time upon error to avoid overhead during parsing.
-//
-func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
- atn := recognizer.GetInterpreter().atn
- ctx := recognizer.GetParserRuleContext()
- recoverSet := NewIntervalSet()
- for ctx != nil && ctx.GetInvokingState() >= 0 {
- // compute what follows who invoked us
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
- recoverSet.addSet(follow)
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- recoverSet.removeOne(TokenEpsilon)
- return recoverSet
-}
-
-// Consume tokens until one Matches the given token set.//
-func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
- ttype := recognizer.GetTokenStream().LA(1)
- for ttype != TokenEOF && !set.contains(ttype) {
- recognizer.Consume()
- ttype = recognizer.GetTokenStream().LA(1)
- }
-}
-
-//
-// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
-// by immediately canceling the parse operation with a
-// {@link ParseCancellationException}. The implementation ensures that the
-// {@link ParserRuleContext//exception} field is set for all parse tree nodes
-// that were not completed prior to encountering the error.
-//
-//
-// This error strategy is useful in the following scenarios.
-//
-//
-// Two-stage parsing: This error strategy allows the first
-// stage of two-stage parsing to immediately terminate if an error is
-// encountered, and immediately fall back to the second stage. In addition to
-// avoiding wasted work by attempting to recover from errors here, the empty
-// implementation of {@link BailErrorStrategy//Sync} improves the performance of
-// the first stage.
-// Silent validation: When syntax errors are not being
-// Reported or logged, and the parse result is simply ignored if errors occur,
-// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
-// when the result will be ignored either way.
-//
-//
-//
-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
-//
-// @see Parser//setErrorHandler(ANTLRErrorStrategy)
-
-type BailErrorStrategy struct {
- *DefaultErrorStrategy
-}
-
-var _ ErrorStrategy = &BailErrorStrategy{}
-
-func NewBailErrorStrategy() *BailErrorStrategy {
-
- b := new(BailErrorStrategy)
-
- b.DefaultErrorStrategy = NewDefaultErrorStrategy()
-
- return b
-}
-
-// Instead of recovering from exception {@code e}, re-panic it wrapped
-// in a {@link ParseCancellationException} so it is not caught by the
-// rule func catches. Use {@link Exception//getCause()} to get the
-// original {@link RecognitionException}.
-//
-func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
- context := recognizer.GetParserRuleContext()
- for context != nil {
- context.SetException(e)
- if parent, ok := context.GetParent().(ParserRuleContext); ok {
- context = parent
- } else {
- context = nil
- }
- }
- panic(NewParseCancellationException()) // TODO we don't emit e properly
-}
-
-// Make sure we don't attempt to recover inline if the parser
-// successfully recovers, it won't panic an exception.
-//
-func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
- b.Recover(recognizer, NewInputMisMatchException(recognizer))
-
- return nil
-}
-
-// Make sure we don't attempt to recover from problems in subrules.//
-func (b *BailErrorStrategy) Sync(recognizer Parser) {
- // pass
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
deleted file mode 100644
index 2ef74926ec..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
-// 3 kinds of errors: prediction errors, failed predicate errors, and
-// mismatched input errors. In each case, the parser knows where it is
-// in the input, where it is in the ATN, the rule invocation stack,
-// and what kind of problem occurred.
-
-type RecognitionException interface {
- GetOffendingToken() Token
- GetMessage() string
- GetInputStream() IntStream
-}
-
-type BaseRecognitionException struct {
- message string
- recognizer Recognizer
- offendingToken Token
- offendingState int
- ctx RuleContext
- input IntStream
-}
-
-func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
-
- // todo
- // Error.call(this)
- //
- // if (!!Error.captureStackTrace) {
- // Error.captureStackTrace(this, RecognitionException)
- // } else {
- // stack := NewError().stack
- // }
- // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
-
- t := new(BaseRecognitionException)
-
- t.message = message
- t.recognizer = recognizer
- t.input = input
- t.ctx = ctx
- // The current {@link Token} when an error occurred. Since not all streams
- // support accessing symbols by index, we have to track the {@link Token}
- // instance itself.
- t.offendingToken = nil
- // Get the ATN state number the parser was in at the time the error
- // occurred. For {@link NoViableAltException} and
- // {@link LexerNoViableAltException} exceptions, this is the
- // {@link DecisionState} number. For others, it is the state whose outgoing
- // edge we couldn't Match.
- t.offendingState = -1
- if t.recognizer != nil {
- t.offendingState = t.recognizer.GetState()
- }
-
- return t
-}
-
-func (b *BaseRecognitionException) GetMessage() string {
- return b.message
-}
-
-func (b *BaseRecognitionException) GetOffendingToken() Token {
- return b.offendingToken
-}
-
-func (b *BaseRecognitionException) GetInputStream() IntStream {
- return b.input
-}
-
-// If the state number is not known, b method returns -1.
-
-//
-// Gets the set of input symbols which could potentially follow the
-// previously Matched symbol at the time b exception was panicn.
-//
-// If the set of expected tokens is not known and could not be computed,
-// b method returns {@code nil}.
-//
-// @return The set of token types that could potentially follow the current
-// state in the ATN, or {@code nil} if the information is not available.
-// /
-func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
- if b.recognizer != nil {
- return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
- }
-
- return nil
-}
-
-func (b *BaseRecognitionException) String() string {
- return b.message
-}
-
-type LexerNoViableAltException struct {
- *BaseRecognitionException
-
- startIndex int
- deadEndConfigs ATNConfigSet
-}
-
-func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
-
- l := new(LexerNoViableAltException)
-
- l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
-
- l.startIndex = startIndex
- l.deadEndConfigs = deadEndConfigs
-
- return l
-}
-
-func (l *LexerNoViableAltException) String() string {
- symbol := ""
- if l.startIndex >= 0 && l.startIndex < l.input.Size() {
- symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
- }
- return "LexerNoViableAltException" + symbol
-}
-
-type NoViableAltException struct {
- *BaseRecognitionException
-
- startToken Token
- offendingToken Token
- ctx ParserRuleContext
- deadEndConfigs ATNConfigSet
-}
-
-// Indicates that the parser could not decide which of two or more paths
-// to take based upon the remaining input. It tracks the starting token
-// of the offending input and also knows where the parser was
-// in the various paths when the error. Reported by ReportNoViableAlternative()
-//
-func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
-
- if ctx == nil {
- ctx = recognizer.GetParserRuleContext()
- }
-
- if offendingToken == nil {
- offendingToken = recognizer.GetCurrentToken()
- }
-
- if startToken == nil {
- startToken = recognizer.GetCurrentToken()
- }
-
- if input == nil {
- input = recognizer.GetInputStream().(TokenStream)
- }
-
- n := new(NoViableAltException)
- n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
-
- // Which configurations did we try at input.Index() that couldn't Match
- // input.LT(1)?//
- n.deadEndConfigs = deadEndConfigs
- // The token object at the start index the input stream might
- // not be buffering tokens so get a reference to it. (At the
- // time the error occurred, of course the stream needs to keep a
- // buffer all of the tokens but later we might not have access to those.)
- n.startToken = startToken
- n.offendingToken = offendingToken
-
- return n
-}
-
-type InputMisMatchException struct {
- *BaseRecognitionException
-}
-
-// This signifies any kind of mismatched input exceptions such as
-// when the current input does not Match the expected token.
-//
-func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
-
- i := new(InputMisMatchException)
- i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- i.offendingToken = recognizer.GetCurrentToken()
-
- return i
-
-}
-
-// A semantic predicate failed during validation. Validation of predicates
-// occurs when normally parsing the alternative just like Matching a token.
-// Disambiguating predicate evaluation occurs when we test a predicate during
-// prediction.
-
-type FailedPredicateException struct {
- *BaseRecognitionException
-
- ruleIndex int
- predicateIndex int
- predicate string
-}
-
-func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
-
- f := new(FailedPredicateException)
-
- f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- trans := s.GetTransitions()[0]
- if trans2, ok := trans.(*PredicateTransition); ok {
- f.ruleIndex = trans2.ruleIndex
- f.predicateIndex = trans2.predIndex
- } else {
- f.ruleIndex = 0
- f.predicateIndex = 0
- }
- f.predicate = predicate
- f.offendingToken = recognizer.GetCurrentToken()
-
- return f
-}
-
-func (f *FailedPredicateException) formatMessage(predicate, message string) string {
- if message != "" {
- return message
- }
-
- return "failed predicate: {" + predicate + "}?"
-}
-
-type ParseCancellationException struct {
-}
-
-func NewParseCancellationException() *ParseCancellationException {
- // Error.call(this)
- // Error.captureStackTrace(this, ParseCancellationException)
- return new(ParseCancellationException)
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
deleted file mode 100644
index 842170c086..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "io"
- "os"
-)
-
-// This is an InputStream that is loaded from a file all at once
-// when you construct the object.
-
-type FileStream struct {
- *InputStream
-
- filename string
-}
-
-func NewFileStream(fileName string) (*FileStream, error) {
-
- buf := bytes.NewBuffer(nil)
-
- f, err := os.Open(fileName)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- _, err = io.Copy(buf, f)
- if err != nil {
- return nil, err
- }
-
- fs := new(FileStream)
-
- fs.filename = fileName
- s := string(buf.Bytes())
-
- fs.InputStream = NewInputStream(s)
-
- return fs, nil
-
-}
-
-func (f *FileStream) GetSourceName() string {
- return f.filename
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
deleted file mode 100644
index 5ff270f536..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type InputStream struct {
- name string
- index int
- data []rune
- size int
-}
-
-func NewInputStream(data string) *InputStream {
-
- is := new(InputStream)
-
- is.name = ""
- is.index = 0
- is.data = []rune(data)
- is.size = len(is.data) // number of runes
-
- return is
-}
-
-func (is *InputStream) reset() {
- is.index = 0
-}
-
-func (is *InputStream) Consume() {
- if is.index >= is.size {
- // assert is.LA(1) == TokenEOF
- panic("cannot consume EOF")
- }
- is.index++
-}
-
-func (is *InputStream) LA(offset int) int {
-
- if offset == 0 {
- return 0 // nil
- }
- if offset < 0 {
- offset++ // e.g., translate LA(-1) to use offset=0
- }
- pos := is.index + offset - 1
-
- if pos < 0 || pos >= is.size { // invalid
- return TokenEOF
- }
-
- return int(is.data[pos])
-}
-
-func (is *InputStream) LT(offset int) int {
- return is.LA(offset)
-}
-
-func (is *InputStream) Index() int {
- return is.index
-}
-
-func (is *InputStream) Size() int {
- return is.size
-}
-
-// mark/release do nothing we have entire buffer
-func (is *InputStream) Mark() int {
- return -1
-}
-
-func (is *InputStream) Release(marker int) {
-}
-
-func (is *InputStream) Seek(index int) {
- if index <= is.index {
- is.index = index // just jump don't update stream state (line,...)
- return
- }
- // seek forward
- is.index = intMin(index, is.size)
-}
-
-func (is *InputStream) GetText(start int, stop int) string {
- if stop >= is.size {
- stop = is.size - 1
- }
- if start >= is.size {
- return ""
- }
-
- return string(is.data[start : stop+1])
-}
-
-func (is *InputStream) GetTextFromTokens(start, stop Token) string {
- if start != nil && stop != nil {
- return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
- }
-
- return ""
-}
-
-func (is *InputStream) GetTextFromInterval(i *Interval) string {
- return is.GetText(i.Start, i.Stop)
-}
-
-func (*InputStream) GetSourceName() string {
- return "Obtained from string"
-}
-
-func (is *InputStream) String() string {
- return string(is.data)
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
deleted file mode 100644
index 438e0ea6e7..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type IntStream interface {
- Consume()
- LA(int) int
- Mark() int
- Release(marker int)
- Index() int
- Seek(index int)
- Size() int
- GetSourceName() string
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
deleted file mode 100644
index 1e9393adb6..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
- "strings"
-)
-
-type Interval struct {
- Start int
- Stop int
-}
-
-/* stop is not included! */
-func NewInterval(start, stop int) *Interval {
- i := new(Interval)
-
- i.Start = start
- i.Stop = stop
- return i
-}
-
-func (i *Interval) Contains(item int) bool {
- return item >= i.Start && item < i.Stop
-}
-
-func (i *Interval) String() string {
- if i.Start == i.Stop-1 {
- return strconv.Itoa(i.Start)
- }
-
- return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
-}
-
-func (i *Interval) length() int {
- return i.Stop - i.Start
-}
-
-type IntervalSet struct {
- intervals []*Interval
- readOnly bool
-}
-
-func NewIntervalSet() *IntervalSet {
-
- i := new(IntervalSet)
-
- i.intervals = nil
- i.readOnly = false
-
- return i
-}
-
-func (i *IntervalSet) first() int {
- if len(i.intervals) == 0 {
- return TokenInvalidType
- }
-
- return i.intervals[0].Start
-}
-
-func (i *IntervalSet) addOne(v int) {
- i.addInterval(NewInterval(v, v+1))
-}
-
-func (i *IntervalSet) addRange(l, h int) {
- i.addInterval(NewInterval(l, h+1))
-}
-
-func (i *IntervalSet) addInterval(v *Interval) {
- if i.intervals == nil {
- i.intervals = make([]*Interval, 0)
- i.intervals = append(i.intervals, v)
- } else {
- // find insert pos
- for k, interval := range i.intervals {
- // distinct range -> insert
- if v.Stop < interval.Start {
- i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
- return
- } else if v.Stop == interval.Start {
- i.intervals[k].Start = v.Start
- return
- } else if v.Start <= interval.Stop {
- i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
-
- // if not applying to end, merge potential overlaps
- if k < len(i.intervals)-1 {
- l := i.intervals[k]
- r := i.intervals[k+1]
- // if r contained in l
- if l.Stop >= r.Stop {
- i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
- } else if l.Stop >= r.Start { // partial overlap
- i.intervals[k] = NewInterval(l.Start, r.Stop)
- i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
- }
- }
- return
- }
- }
- // greater than any exiting
- i.intervals = append(i.intervals, v)
- }
-}
-
-func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
- if other.intervals != nil {
- for k := 0; k < len(other.intervals); k++ {
- i2 := other.intervals[k]
- i.addInterval(NewInterval(i2.Start, i2.Stop))
- }
- }
- return i
-}
-
-func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
- result := NewIntervalSet()
- result.addInterval(NewInterval(start, stop+1))
- for j := 0; j < len(i.intervals); j++ {
- result.removeRange(i.intervals[j])
- }
- return result
-}
-
-func (i *IntervalSet) contains(item int) bool {
- if i.intervals == nil {
- return false
- }
- for k := 0; k < len(i.intervals); k++ {
- if i.intervals[k].Contains(item) {
- return true
- }
- }
- return false
-}
-
-func (i *IntervalSet) length() int {
- len := 0
-
- for _, v := range i.intervals {
- len += v.length()
- }
-
- return len
-}
-
-func (i *IntervalSet) removeRange(v *Interval) {
- if v.Start == v.Stop-1 {
- i.removeOne(v.Start)
- } else if i.intervals != nil {
- k := 0
- for n := 0; n < len(i.intervals); n++ {
- ni := i.intervals[k]
- // intervals are ordered
- if v.Stop <= ni.Start {
- return
- } else if v.Start > ni.Start && v.Stop < ni.Stop {
- i.intervals[k] = NewInterval(ni.Start, v.Start)
- x := NewInterval(v.Stop, ni.Stop)
- // i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
- return
- } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
- // i.intervals.splice(k, 1)
- i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
- k = k - 1 // need another pass
- } else if v.Start < ni.Stop {
- i.intervals[k] = NewInterval(ni.Start, v.Start)
- } else if v.Stop < ni.Stop {
- i.intervals[k] = NewInterval(v.Stop, ni.Stop)
- }
- k++
- }
- }
-}
-
-func (i *IntervalSet) removeOne(v int) {
- if i.intervals != nil {
- for k := 0; k < len(i.intervals); k++ {
- ki := i.intervals[k]
- // intervals i ordered
- if v < ki.Start {
- return
- } else if v == ki.Start && v == ki.Stop-1 {
- // i.intervals.splice(k, 1)
- i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
- return
- } else if v == ki.Start {
- i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
- return
- } else if v == ki.Stop-1 {
- i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
- return
- } else if v < ki.Stop-1 {
- x := NewInterval(ki.Start, v)
- ki.Start = v + 1
- // i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
- return
- }
- }
- }
-}
-
-func (i *IntervalSet) String() string {
- return i.StringVerbose(nil, nil, false)
-}
-
-func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
-
- if i.intervals == nil {
- return "{}"
- } else if literalNames != nil || symbolicNames != nil {
- return i.toTokenString(literalNames, symbolicNames)
- } else if elemsAreChar {
- return i.toCharString()
- }
-
- return i.toIndexString()
-}
-
-func (i *IntervalSet) toCharString() string {
- names := make([]string, len(i.intervals))
-
- var sb strings.Builder
-
- for j := 0; j < len(i.intervals); j++ {
- v := i.intervals[j]
- if v.Stop == v.Start+1 {
- if v.Start == TokenEOF {
- names = append(names, "")
- } else {
- sb.WriteByte('\'')
- sb.WriteRune(rune(v.Start))
- sb.WriteByte('\'')
- names = append(names, sb.String())
- sb.Reset()
- }
- } else {
- sb.WriteByte('\'')
- sb.WriteRune(rune(v.Start))
- sb.WriteString("'..'")
- sb.WriteRune(rune(v.Stop - 1))
- sb.WriteByte('\'')
- names = append(names, sb.String())
- sb.Reset()
- }
- }
- if len(names) > 1 {
- return "{" + strings.Join(names, ", ") + "}"
- }
-
- return names[0]
-}
-
-func (i *IntervalSet) toIndexString() string {
-
- names := make([]string, 0)
- for j := 0; j < len(i.intervals); j++ {
- v := i.intervals[j]
- if v.Stop == v.Start+1 {
- if v.Start == TokenEOF {
- names = append(names, "")
- } else {
- names = append(names, strconv.Itoa(v.Start))
- }
- } else {
- names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
- }
- }
- if len(names) > 1 {
- return "{" + strings.Join(names, ", ") + "}"
- }
-
- return names[0]
-}
-
-func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
- names := make([]string, 0)
- for _, v := range i.intervals {
- for j := v.Start; j < v.Stop; j++ {
- names = append(names, i.elementName(literalNames, symbolicNames, j))
- }
- }
- if len(names) > 1 {
- return "{" + strings.Join(names, ", ") + "}"
- }
-
- return names[0]
-}
-
-func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
- if a == TokenEOF {
- return ""
- } else if a == TokenEpsilon {
- return ""
- } else {
- if a < len(literalNames) && literalNames[a] != "" {
- return literalNames[a]
- }
-
- return symbolicNames[a]
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
deleted file mode 100644
index b04f04572f..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
+++ /dev/null
@@ -1,418 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-// A lexer is recognizer that draws input symbols from a character stream.
-// lexer grammars result in a subclass of this object. A Lexer object
-// uses simplified Match() and error recovery mechanisms in the interest
-// of speed.
-///
-
-type Lexer interface {
- TokenSource
- Recognizer
-
- Emit() Token
-
- SetChannel(int)
- PushMode(int)
- PopMode() int
- SetType(int)
- SetMode(int)
-}
-
-type BaseLexer struct {
- *BaseRecognizer
-
- Interpreter ILexerATNSimulator
- TokenStartCharIndex int
- TokenStartLine int
- TokenStartColumn int
- ActionType int
- Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
-
- input CharStream
- factory TokenFactory
- tokenFactorySourcePair *TokenSourceCharStreamPair
- token Token
- hitEOF bool
- channel int
- thetype int
- modeStack IntStack
- mode int
- text string
-}
-
-func NewBaseLexer(input CharStream) *BaseLexer {
-
- lexer := new(BaseLexer)
-
- lexer.BaseRecognizer = NewBaseRecognizer()
-
- lexer.input = input
- lexer.factory = CommonTokenFactoryDEFAULT
- lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
-
- lexer.Virt = lexer
-
- lexer.Interpreter = nil // child classes must populate it
-
- // The goal of all lexer rules/methods is to create a token object.
- // l is an instance variable as multiple rules may collaborate to
- // create a single token. NextToken will return l object after
- // Matching lexer rule(s). If you subclass to allow multiple token
- // emissions, then set l to the last token to be Matched or
- // something nonnil so that the auto token emit mechanism will not
- // emit another token.
- lexer.token = nil
-
- // What character index in the stream did the current token start at?
- // Needed, for example, to get the text for current token. Set at
- // the start of NextToken.
- lexer.TokenStartCharIndex = -1
-
- // The line on which the first character of the token resides///
- lexer.TokenStartLine = -1
-
- // The character position of first character within the line///
- lexer.TokenStartColumn = -1
-
- // Once we see EOF on char stream, next token will be EOF.
- // If you have DONE : EOF then you see DONE EOF.
- lexer.hitEOF = false
-
- // The channel number for the current token///
- lexer.channel = TokenDefaultChannel
-
- // The token type for the current token///
- lexer.thetype = TokenInvalidType
-
- lexer.modeStack = make([]int, 0)
- lexer.mode = LexerDefaultMode
-
- // You can set the text for the current token to override what is in
- // the input char buffer. Use setText() or can set l instance var.
- // /
- lexer.text = ""
-
- return lexer
-}
-
-const (
- LexerDefaultMode = 0
- LexerMore = -2
- LexerSkip = -3
-)
-
-const (
- LexerDefaultTokenChannel = TokenDefaultChannel
- LexerHidden = TokenHiddenChannel
- LexerMinCharValue = 0x0000
- LexerMaxCharValue = 0x10FFFF
-)
-
-func (b *BaseLexer) reset() {
- // wack Lexer state variables
- if b.input != nil {
- b.input.Seek(0) // rewind the input
- }
- b.token = nil
- b.thetype = TokenInvalidType
- b.channel = TokenDefaultChannel
- b.TokenStartCharIndex = -1
- b.TokenStartColumn = -1
- b.TokenStartLine = -1
- b.text = ""
-
- b.hitEOF = false
- b.mode = LexerDefaultMode
- b.modeStack = make([]int, 0)
-
- b.Interpreter.reset()
-}
-
-func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
- return b.Interpreter
-}
-
-func (b *BaseLexer) GetInputStream() CharStream {
- return b.input
-}
-
-func (b *BaseLexer) GetSourceName() string {
- return b.GrammarFileName
-}
-
-func (b *BaseLexer) SetChannel(v int) {
- b.channel = v
-}
-
-func (b *BaseLexer) GetTokenFactory() TokenFactory {
- return b.factory
-}
-
-func (b *BaseLexer) setTokenFactory(f TokenFactory) {
- b.factory = f
-}
-
-func (b *BaseLexer) safeMatch() (ret int) {
- defer func() {
- if e := recover(); e != nil {
- if re, ok := e.(RecognitionException); ok {
- b.notifyListeners(re) // Report error
- b.Recover(re)
- ret = LexerSkip // default
- }
- }
- }()
-
- return b.Interpreter.Match(b.input, b.mode)
-}
-
-// Return a token from l source i.e., Match a token on the char stream.
-func (b *BaseLexer) NextToken() Token {
- if b.input == nil {
- panic("NextToken requires a non-nil input stream.")
- }
-
- tokenStartMarker := b.input.Mark()
-
- // previously in finally block
- defer func() {
- // make sure we release marker after Match or
- // unbuffered char stream will keep buffering
- b.input.Release(tokenStartMarker)
- }()
-
- for {
- if b.hitEOF {
- b.EmitEOF()
- return b.token
- }
- b.token = nil
- b.channel = TokenDefaultChannel
- b.TokenStartCharIndex = b.input.Index()
- b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
- b.TokenStartLine = b.Interpreter.GetLine()
- b.text = ""
- continueOuter := false
- for {
- b.thetype = TokenInvalidType
- ttype := LexerSkip
-
- ttype = b.safeMatch()
-
- if b.input.LA(1) == TokenEOF {
- b.hitEOF = true
- }
- if b.thetype == TokenInvalidType {
- b.thetype = ttype
- }
- if b.thetype == LexerSkip {
- continueOuter = true
- break
- }
- if b.thetype != LexerMore {
- break
- }
- }
-
- if continueOuter {
- continue
- }
- if b.token == nil {
- b.Virt.Emit()
- }
- return b.token
- }
-
- return nil
-}
-
-// Instruct the lexer to Skip creating a token for current lexer rule
-// and look for another token. NextToken() knows to keep looking when
-// a lexer rule finishes with token set to SKIPTOKEN. Recall that
-// if token==nil at end of any token rule, it creates one for you
-// and emits it.
-// /
-func (b *BaseLexer) Skip() {
- b.thetype = LexerSkip
-}
-
-func (b *BaseLexer) More() {
- b.thetype = LexerMore
-}
-
-func (b *BaseLexer) SetMode(m int) {
- b.mode = m
-}
-
-func (b *BaseLexer) PushMode(m int) {
- if LexerATNSimulatorDebug {
- fmt.Println("pushMode " + strconv.Itoa(m))
- }
- b.modeStack.Push(b.mode)
- b.mode = m
-}
-
-func (b *BaseLexer) PopMode() int {
- if len(b.modeStack) == 0 {
- panic("Empty Stack")
- }
- if LexerATNSimulatorDebug {
- fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
- }
- i, _ := b.modeStack.Pop()
- b.mode = i
- return b.mode
-}
-
-func (b *BaseLexer) inputStream() CharStream {
- return b.input
-}
-
-// SetInputStream resets the lexer input stream and associated lexer state.
-func (b *BaseLexer) SetInputStream(input CharStream) {
- b.input = nil
- b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
- b.reset()
- b.input = input
- b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
-}
-
-func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
- return b.tokenFactorySourcePair
-}
-
-// By default does not support multiple emits per NextToken invocation
-// for efficiency reasons. Subclass and override l method, NextToken,
-// and GetToken (to push tokens into a list and pull from that list
-// rather than a single variable as l implementation does).
-// /
-func (b *BaseLexer) EmitToken(token Token) {
- b.token = token
-}
-
-// The standard method called to automatically emit a token at the
-// outermost lexical rule. The token object should point into the
-// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override l method to emit
-// custom Token objects or provide a Newfactory.
-// /
-func (b *BaseLexer) Emit() Token {
- t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
- b.EmitToken(t)
- return t
-}
-
-func (b *BaseLexer) EmitEOF() Token {
- cpos := b.GetCharPositionInLine()
- lpos := b.GetLine()
- eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
- b.EmitToken(eof)
- return eof
-}
-
-func (b *BaseLexer) GetCharPositionInLine() int {
- return b.Interpreter.GetCharPositionInLine()
-}
-
-func (b *BaseLexer) GetLine() int {
- return b.Interpreter.GetLine()
-}
-
-func (b *BaseLexer) GetType() int {
- return b.thetype
-}
-
-func (b *BaseLexer) SetType(t int) {
- b.thetype = t
-}
-
-// What is the index of the current character of lookahead?///
-func (b *BaseLexer) GetCharIndex() int {
- return b.input.Index()
-}
-
-// Return the text Matched so far for the current token or any text override.
-//Set the complete text of l token it wipes any previous changes to the text.
-func (b *BaseLexer) GetText() string {
- if b.text != "" {
- return b.text
- }
-
- return b.Interpreter.GetText(b.input)
-}
-
-func (b *BaseLexer) SetText(text string) {
- b.text = text
-}
-
-func (b *BaseLexer) GetATN() *ATN {
- return b.Interpreter.ATN()
-}
-
-// Return a list of all Token objects in input char stream.
-// Forces load of all tokens. Does not include EOF token.
-// /
-func (b *BaseLexer) GetAllTokens() []Token {
- vl := b.Virt
- tokens := make([]Token, 0)
- t := vl.NextToken()
- for t.GetTokenType() != TokenEOF {
- tokens = append(tokens, t)
- t = vl.NextToken()
- }
- return tokens
-}
-
-func (b *BaseLexer) notifyListeners(e RecognitionException) {
- start := b.TokenStartCharIndex
- stop := b.input.Index()
- text := b.input.GetTextFromInterval(NewInterval(start, stop))
- msg := "token recognition error at: '" + text + "'"
- listener := b.GetErrorListenerDispatch()
- listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
-}
-
-func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
- if c == TokenEOF {
- return ""
- } else if c == '\n' {
- return "\\n"
- } else if c == '\t' {
- return "\\t"
- } else if c == '\r' {
- return "\\r"
- } else {
- return string(c)
- }
-}
-
-func (b *BaseLexer) getCharErrorDisplay(c rune) string {
- return "'" + b.getErrorDisplayForChar(c) + "'"
-}
-
-// Lexers can normally Match any char in it's vocabulary after Matching
-// a token, so do the easy thing and just kill a character and hope
-// it all works out. You can instead use the rule invocation stack
-// to do sophisticated error recovery if you are in a fragment rule.
-// /
-func (b *BaseLexer) Recover(re RecognitionException) {
- if b.input.LA(1) != TokenEOF {
- if _, ok := re.(*LexerNoViableAltException); ok {
- // Skip a char and try again
- b.Interpreter.Consume(b.input)
- } else {
- // TODO: Do we lose character or line position information?
- b.input.Consume()
- }
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
deleted file mode 100644
index 5a325be137..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
+++ /dev/null
@@ -1,430 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "strconv"
-
-const (
- LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
- LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
- LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
- LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
- LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
- LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
- LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
- LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
-)
-
-type LexerAction interface {
- getActionType() int
- getIsPositionDependent() bool
- execute(lexer Lexer)
- hash() int
- equals(other LexerAction) bool
-}
-
-type BaseLexerAction struct {
- actionType int
- isPositionDependent bool
-}
-
-func NewBaseLexerAction(action int) *BaseLexerAction {
- la := new(BaseLexerAction)
-
- la.actionType = action
- la.isPositionDependent = false
-
- return la
-}
-
-func (b *BaseLexerAction) execute(lexer Lexer) {
- panic("Not implemented")
-}
-
-func (b *BaseLexerAction) getActionType() int {
- return b.actionType
-}
-
-func (b *BaseLexerAction) getIsPositionDependent() bool {
- return b.isPositionDependent
-}
-
-func (b *BaseLexerAction) hash() int {
- return b.actionType
-}
-
-func (b *BaseLexerAction) equals(other LexerAction) bool {
- return b == other
-}
-
-//
-// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
-//
-// The {@code Skip} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
-type LexerSkipAction struct {
- *BaseLexerAction
-}
-
-func NewLexerSkipAction() *LexerSkipAction {
- la := new(LexerSkipAction)
- la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
- return la
-}
-
-// Provides a singleton instance of l parameterless lexer action.
-var LexerSkipActionINSTANCE = NewLexerSkipAction()
-
-func (l *LexerSkipAction) execute(lexer Lexer) {
- lexer.Skip()
-}
-
-func (l *LexerSkipAction) String() string {
- return "skip"
-}
-
-// Implements the {@code type} lexer action by calling {@link Lexer//setType}
-// with the assigned type.
-type LexerTypeAction struct {
- *BaseLexerAction
-
- thetype int
-}
-
-func NewLexerTypeAction(thetype int) *LexerTypeAction {
- l := new(LexerTypeAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
- l.thetype = thetype
- return l
-}
-
-func (l *LexerTypeAction) execute(lexer Lexer) {
- lexer.SetType(l.thetype)
-}
-
-func (l *LexerTypeAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.actionType)
- h = murmurUpdate(h, l.thetype)
- return murmurFinish(h, 2)
-}
-
-func (l *LexerTypeAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerTypeAction); !ok {
- return false
- } else {
- return l.thetype == other.(*LexerTypeAction).thetype
- }
-}
-
-func (l *LexerTypeAction) String() string {
- return "actionType(" + strconv.Itoa(l.thetype) + ")"
-}
-
-// Implements the {@code pushMode} lexer action by calling
-// {@link Lexer//pushMode} with the assigned mode.
-type LexerPushModeAction struct {
- *BaseLexerAction
-
- mode int
-}
-
-func NewLexerPushModeAction(mode int) *LexerPushModeAction {
-
- l := new(LexerPushModeAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
-
- l.mode = mode
- return l
-}
-
-// This action is implemented by calling {@link Lexer//pushMode} with the
-// value provided by {@link //getMode}.
-func (l *LexerPushModeAction) execute(lexer Lexer) {
- lexer.PushMode(l.mode)
-}
-
-func (l *LexerPushModeAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.actionType)
- h = murmurUpdate(h, l.mode)
- return murmurFinish(h, 2)
-}
-
-func (l *LexerPushModeAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerPushModeAction); !ok {
- return false
- } else {
- return l.mode == other.(*LexerPushModeAction).mode
- }
-}
-
-func (l *LexerPushModeAction) String() string {
- return "pushMode(" + strconv.Itoa(l.mode) + ")"
-}
-
-// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
-//
-// The {@code popMode} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
-type LexerPopModeAction struct {
- *BaseLexerAction
-}
-
-func NewLexerPopModeAction() *LexerPopModeAction {
-
- l := new(LexerPopModeAction)
-
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
-
- return l
-}
-
-var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
-
-// This action is implemented by calling {@link Lexer//popMode}.
-func (l *LexerPopModeAction) execute(lexer Lexer) {
- lexer.PopMode()
-}
-
-func (l *LexerPopModeAction) String() string {
- return "popMode"
-}
-
-// Implements the {@code more} lexer action by calling {@link Lexer//more}.
-//
-// The {@code more} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
-
-type LexerMoreAction struct {
- *BaseLexerAction
-}
-
-func NewLexerMoreAction() *LexerMoreAction {
- l := new(LexerMoreAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
-
- return l
-}
-
-var LexerMoreActionINSTANCE = NewLexerMoreAction()
-
-// This action is implemented by calling {@link Lexer//popMode}.
-func (l *LexerMoreAction) execute(lexer Lexer) {
- lexer.More()
-}
-
-func (l *LexerMoreAction) String() string {
- return "more"
-}
-
-// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
-// the assigned mode.
-type LexerModeAction struct {
- *BaseLexerAction
-
- mode int
-}
-
-func NewLexerModeAction(mode int) *LexerModeAction {
- l := new(LexerModeAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
- l.mode = mode
- return l
-}
-
-// This action is implemented by calling {@link Lexer//mode} with the
-// value provided by {@link //getMode}.
-func (l *LexerModeAction) execute(lexer Lexer) {
- lexer.SetMode(l.mode)
-}
-
-func (l *LexerModeAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.actionType)
- h = murmurUpdate(h, l.mode)
- return murmurFinish(h, 2)
-}
-
-func (l *LexerModeAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerModeAction); !ok {
- return false
- } else {
- return l.mode == other.(*LexerModeAction).mode
- }
-}
-
-func (l *LexerModeAction) String() string {
- return "mode(" + strconv.Itoa(l.mode) + ")"
-}
-
-// Executes a custom lexer action by calling {@link Recognizer//action} with the
-// rule and action indexes assigned to the custom action. The implementation of
-// a custom action is added to the generated code for the lexer in an override
-// of {@link Recognizer//action} when the grammar is compiled.
-//
-// This class may represent embedded actions created with the {...}
-// syntax in ANTLR 4, as well as actions created for lexer commands where the
-// command argument could not be evaluated when the grammar was compiled.
-
-// Constructs a custom lexer action with the specified rule and action
-// indexes.
-//
-// @param ruleIndex The rule index to use for calls to
-// {@link Recognizer//action}.
-// @param actionIndex The action index to use for calls to
-// {@link Recognizer//action}.
-
-type LexerCustomAction struct {
- *BaseLexerAction
- ruleIndex, actionIndex int
-}
-
-func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
- l := new(LexerCustomAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
- l.ruleIndex = ruleIndex
- l.actionIndex = actionIndex
- l.isPositionDependent = true
- return l
-}
-
-// Custom actions are implemented by calling {@link Lexer//action} with the
-// appropriate rule and action indexes.
-func (l *LexerCustomAction) execute(lexer Lexer) {
- lexer.Action(nil, l.ruleIndex, l.actionIndex)
-}
-
-func (l *LexerCustomAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.actionType)
- h = murmurUpdate(h, l.ruleIndex)
- h = murmurUpdate(h, l.actionIndex)
- return murmurFinish(h, 3)
-}
-
-func (l *LexerCustomAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerCustomAction); !ok {
- return false
- } else {
- return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
- }
-}
-
-// Implements the {@code channel} lexer action by calling
-// {@link Lexer//setChannel} with the assigned channel.
-// Constructs a New{@code channel} action with the specified channel value.
-// @param channel The channel value to pass to {@link Lexer//setChannel}.
-type LexerChannelAction struct {
- *BaseLexerAction
-
- channel int
-}
-
-func NewLexerChannelAction(channel int) *LexerChannelAction {
- l := new(LexerChannelAction)
- l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
- l.channel = channel
- return l
-}
-
-// This action is implemented by calling {@link Lexer//setChannel} with the
-// value provided by {@link //getChannel}.
-func (l *LexerChannelAction) execute(lexer Lexer) {
- lexer.SetChannel(l.channel)
-}
-
-func (l *LexerChannelAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.actionType)
- h = murmurUpdate(h, l.channel)
- return murmurFinish(h, 2)
-}
-
-func (l *LexerChannelAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerChannelAction); !ok {
- return false
- } else {
- return l.channel == other.(*LexerChannelAction).channel
- }
-}
-
-func (l *LexerChannelAction) String() string {
- return "channel(" + strconv.Itoa(l.channel) + ")"
-}
-
-// This implementation of {@link LexerAction} is used for tracking input offsets
-// for position-dependent actions within a {@link LexerActionExecutor}.
-//
-// This action is not serialized as part of the ATN, and is only required for
-// position-dependent lexer actions which appear at a location other than the
-// end of a rule. For more information about DFA optimizations employed for
-// lexer actions, see {@link LexerActionExecutor//append} and
-// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
-
-// Constructs a Newindexed custom action by associating a character offset
-// with a {@link LexerAction}.
-//
-// Note: This class is only required for lexer actions for which
-// {@link LexerAction//isPositionDependent} returns {@code true}.
-//
-// @param offset The offset into the input {@link CharStream}, relative to
-// the token start index, at which the specified lexer action should be
-// executed.
-// @param action The lexer action to execute at a particular offset in the
-// input {@link CharStream}.
-type LexerIndexedCustomAction struct {
- *BaseLexerAction
-
- offset int
- lexerAction LexerAction
- isPositionDependent bool
-}
-
-func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
-
- l := new(LexerIndexedCustomAction)
- l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
-
- l.offset = offset
- l.lexerAction = lexerAction
- l.isPositionDependent = true
-
- return l
-}
-
-// This method calls {@link //execute} on the result of {@link //getAction}
-// using the provided {@code lexer}.
-func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
- // assume the input stream position was properly set by the calling code
- l.lexerAction.execute(lexer)
-}
-
-func (l *LexerIndexedCustomAction) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, l.offset)
- h = murmurUpdate(h, l.lexerAction.hash())
- return murmurFinish(h, 2)
-}
-
-func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
- if l == other {
- return true
- } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
- return false
- } else {
- return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
deleted file mode 100644
index 056941dd6e..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// Represents an executor for a sequence of lexer actions which traversed during
-// the Matching operation of a lexer rule (token).
-//
-// The executor tracks position information for position-dependent lexer actions
-// efficiently, ensuring that actions appearing only at the end of the rule do
-// not cause bloating of the {@link DFA} created for the lexer.
-
-type LexerActionExecutor struct {
- lexerActions []LexerAction
- cachedHash int
-}
-
-func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
-
- if lexerActions == nil {
- lexerActions = make([]LexerAction, 0)
- }
-
- l := new(LexerActionExecutor)
-
- l.lexerActions = lexerActions
-
- // Caches the result of {@link //hashCode} since the hash code is an element
- // of the performance-critical {@link LexerATNConfig//hashCode} operation.
- l.cachedHash = murmurInit(57)
- for _, a := range lexerActions {
- l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
- }
-
- return l
-}
-
-// Creates a {@link LexerActionExecutor} which executes the actions for
-// the input {@code lexerActionExecutor} followed by a specified
-// {@code lexerAction}.
-//
-// @param lexerActionExecutor The executor for actions already traversed by
-// the lexer while Matching a token within a particular
-// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
-// though it were an empty executor.
-// @param lexerAction The lexer action to execute after the actions
-// specified in {@code lexerActionExecutor}.
-//
-// @return A {@link LexerActionExecutor} for executing the combine actions
-// of {@code lexerActionExecutor} and {@code lexerAction}.
-func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
- if lexerActionExecutor == nil {
- return NewLexerActionExecutor([]LexerAction{lexerAction})
- }
-
- return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
-}
-
-// Creates a {@link LexerActionExecutor} which encodes the current offset
-// for position-dependent lexer actions.
-//
-// Normally, when the executor encounters lexer actions where
-// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
-// {@link IntStream//seek} on the input {@link CharStream} to set the input
-// position to the end of the current token. This behavior provides
-// for efficient DFA representation of lexer actions which appear at the end
-// of a lexer rule, even when the lexer rule Matches a variable number of
-// characters.
-//
-// Prior to traversing a Match transition in the ATN, the current offset
-// from the token start index is assigned to all position-dependent lexer
-// actions which have not already been assigned a fixed offset. By storing
-// the offsets relative to the token start index, the DFA representation of
-// lexer actions which appear in the middle of tokens remains efficient due
-// to sharing among tokens of the same length, regardless of their absolute
-// position in the input stream.
-//
-// If the current executor already has offsets assigned to all
-// position-dependent lexer actions, the method returns {@code this}.
-//
-// @param offset The current offset to assign to all position-dependent
-// lexer actions which do not already have offsets assigned.
-//
-// @return A {@link LexerActionExecutor} which stores input stream offsets
-// for all position-dependent lexer actions.
-// /
-func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
- var updatedLexerActions []LexerAction
- for i := 0; i < len(l.lexerActions); i++ {
- _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
- if l.lexerActions[i].getIsPositionDependent() && !ok {
- if updatedLexerActions == nil {
- updatedLexerActions = make([]LexerAction, 0)
-
- for _, a := range l.lexerActions {
- updatedLexerActions = append(updatedLexerActions, a)
- }
- }
-
- updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
- }
- }
- if updatedLexerActions == nil {
- return l
- }
-
- return NewLexerActionExecutor(updatedLexerActions)
-}
-
-// Execute the actions encapsulated by l executor within the context of a
-// particular {@link Lexer}.
-//
-// This method calls {@link IntStream//seek} to set the position of the
-// {@code input} {@link CharStream} prior to calling
-// {@link LexerAction//execute} on a position-dependent action. Before the
-// method returns, the input position will be restored to the same position
-// it was in when the method was invoked.
-//
-// @param lexer The lexer instance.
-// @param input The input stream which is the source for the current token.
-// When l method is called, the current {@link IntStream//index} for
-// {@code input} should be the start of the following token, i.e. 1
-// character past the end of the current token.
-// @param startIndex The token start index. This value may be passed to
-// {@link IntStream//seek} to set the {@code input} position to the beginning
-// of the token.
-// /
-func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
- requiresSeek := false
- stopIndex := input.Index()
-
- defer func() {
- if requiresSeek {
- input.Seek(stopIndex)
- }
- }()
-
- for i := 0; i < len(l.lexerActions); i++ {
- lexerAction := l.lexerActions[i]
- if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
- offset := la.offset
- input.Seek(startIndex + offset)
- lexerAction = la.lexerAction
- requiresSeek = (startIndex + offset) != stopIndex
- } else if lexerAction.getIsPositionDependent() {
- input.Seek(stopIndex)
- requiresSeek = false
- }
- lexerAction.execute(lexer)
- }
-}
-
-func (l *LexerActionExecutor) hash() int {
- if l == nil {
- return 61
- }
- return l.cachedHash
-}
-
-func (l *LexerActionExecutor) equals(other interface{}) bool {
- if l == other {
- return true
- }
- othert, ok := other.(*LexerActionExecutor)
- if !ok {
- return false
- }
- if othert == nil {
- return false
- }
- return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
deleted file mode 100644
index dc05153ea4..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
+++ /dev/null
@@ -1,679 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-var (
- LexerATNSimulatorDebug = false
- LexerATNSimulatorDFADebug = false
-
- LexerATNSimulatorMinDFAEdge = 0
- LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
-
- LexerATNSimulatorMatchCalls = 0
-)
-
-type ILexerATNSimulator interface {
- IATNSimulator
-
- reset()
- Match(input CharStream, mode int) int
- GetCharPositionInLine() int
- GetLine() int
- GetText(input CharStream) string
- Consume(input CharStream)
-}
-
-type LexerATNSimulator struct {
- *BaseATNSimulator
-
- recog Lexer
- predictionMode int
- mergeCache DoubleDict
- startIndex int
- Line int
- CharPositionInLine int
- mode int
- prevAccept *SimState
- MatchCalls int
-}
-
-func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
- l := new(LexerATNSimulator)
-
- l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
-
- l.decisionToDFA = decisionToDFA
- l.recog = recog
- // The current token's starting index into the character stream.
- // Shared across DFA to ATN simulation in case the ATN fails and the
- // DFA did not have a previous accept state. In l case, we use the
- // ATN-generated exception object.
- l.startIndex = -1
- // line number 1..n within the input///
- l.Line = 1
- // The index of the character relative to the beginning of the line
- // 0..n-1///
- l.CharPositionInLine = 0
- l.mode = LexerDefaultMode
- // Used during DFA/ATN exec to record the most recent accept configuration
- // info
- l.prevAccept = NewSimState()
- // done
- return l
-}
-
-func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
- l.CharPositionInLine = simulator.CharPositionInLine
- l.Line = simulator.Line
- l.mode = simulator.mode
- l.startIndex = simulator.startIndex
-}
-
-func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
- l.MatchCalls++
- l.mode = mode
- mark := input.Mark()
-
- defer func() {
- input.Release(mark)
- }()
-
- l.startIndex = input.Index()
- l.prevAccept.reset()
-
- dfa := l.decisionToDFA[mode]
-
- var s0 *DFAState
- l.atn.stateMu.RLock()
- s0 = dfa.getS0()
- l.atn.stateMu.RUnlock()
-
- if s0 == nil {
- return l.MatchATN(input)
- }
-
- return l.execATN(input, s0)
-}
-
-func (l *LexerATNSimulator) reset() {
- l.prevAccept.reset()
- l.startIndex = -1
- l.Line = 1
- l.CharPositionInLine = 0
- l.mode = LexerDefaultMode
-}
-
-func (l *LexerATNSimulator) MatchATN(input CharStream) int {
- startState := l.atn.modeToStartState[l.mode]
-
- if LexerATNSimulatorDebug {
- fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
- }
- oldMode := l.mode
- s0Closure := l.computeStartState(input, startState)
- suppressEdge := s0Closure.hasSemanticContext
- s0Closure.hasSemanticContext = false
-
- next := l.addDFAState(s0Closure, suppressEdge)
-
- predict := l.execATN(input, next)
-
- if LexerATNSimulatorDebug {
- fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
- }
- return predict
-}
-
-func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
-
- if LexerATNSimulatorDebug {
- fmt.Println("start state closure=" + ds0.configs.String())
- }
- if ds0.isAcceptState {
- // allow zero-length tokens
- l.captureSimState(l.prevAccept, input, ds0)
- }
- t := input.LA(1)
- s := ds0 // s is current/from DFA state
-
- for { // while more work
- if LexerATNSimulatorDebug {
- fmt.Println("execATN loop starting closure: " + s.configs.String())
- }
-
- // As we move src->trg, src->trg, we keep track of the previous trg to
- // avoid looking up the DFA state again, which is expensive.
- // If the previous target was already part of the DFA, we might
- // be able to avoid doing a reach operation upon t. If s!=nil,
- // it means that semantic predicates didn't prevent us from
- // creating a DFA state. Once we know s!=nil, we check to see if
- // the DFA state has an edge already for t. If so, we can just reuse
- // it's configuration set there's no point in re-computing it.
- // This is kind of like doing DFA simulation within the ATN
- // simulation because DFA simulation is really just a way to avoid
- // computing reach/closure sets. Technically, once we know that
- // we have a previously added DFA state, we could jump over to
- // the DFA simulator. But, that would mean popping back and forth
- // a lot and making things more complicated algorithmically.
- // This optimization makes a lot of sense for loops within DFA.
- // A character will take us back to an existing DFA state
- // that already has lots of edges out of it. e.g., .* in comments.
- target := l.getExistingTargetState(s, t)
- if target == nil {
- target = l.computeTargetState(input, s, t)
- // print("Computed:" + str(target))
- }
- if target == ATNSimulatorError {
- break
- }
- // If l is a consumable input element, make sure to consume before
- // capturing the accept state so the input index, line, and char
- // position accurately reflect the state of the interpreter at the
- // end of the token.
- if t != TokenEOF {
- l.Consume(input)
- }
- if target.isAcceptState {
- l.captureSimState(l.prevAccept, input, target)
- if t == TokenEOF {
- break
- }
- }
- t = input.LA(1)
- s = target // flip current DFA target becomes Newsrc/from state
- }
-
- return l.failOrAccept(l.prevAccept, input, s.configs, t)
-}
-
-// Get an existing target state for an edge in the DFA. If the target state
-// for the edge has not yet been computed or is otherwise not available,
-// l method returns {@code nil}.
-//
-// @param s The current DFA state
-// @param t The next input symbol
-// @return The existing target DFA state for the given input symbol
-// {@code t}, or {@code nil} if the target state for l edge is not
-// already cached
-func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
- if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
- return nil
- }
-
- l.atn.edgeMu.RLock()
- defer l.atn.edgeMu.RUnlock()
- if s.getEdges() == nil {
- return nil
- }
- target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
- if LexerATNSimulatorDebug && target != nil {
- fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
- }
- return target
-}
-
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
-//
-// @param input The input stream
-// @param s The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, l method
-// returns {@link //ERROR}.
-func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
- reach := NewOrderedATNConfigSet()
-
- // if we don't find an existing DFA state
- // Fill reach starting from closure, following t transitions
- l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
-
- if len(reach.configs) == 0 { // we got nowhere on t from s
- if !reach.hasSemanticContext {
- // we got nowhere on t, don't panic out l knowledge it'd
- // cause a failover from DFA later.
- l.addDFAEdge(s, t, ATNSimulatorError, nil)
- }
- // stop when we can't Match any more char
- return ATNSimulatorError
- }
- // Add an edge from s to target DFA found/created for reach
- return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
-}
-
-func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
- if l.prevAccept.dfaState != nil {
- lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
- l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
- return prevAccept.dfaState.prediction
- }
-
- // if no accept and EOF is first char, return EOF
- if t == TokenEOF && input.Index() == l.startIndex {
- return TokenEOF
- }
-
- panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
-}
-
-// Given a starting configuration set, figure out all ATN configurations
-// we can reach upon input {@code t}. Parameter {@code reach} is a return
-// parameter.
-func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
- // l is used to Skip processing for configs which have a lower priority
- // than a config that already reached an accept state for the same rule
- SkipAlt := ATNInvalidAltNumber
-
- for _, cfg := range closure.GetItems() {
- currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
- if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
- continue
- }
-
- if LexerATNSimulatorDebug {
-
- fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
- }
-
- for _, trans := range cfg.GetState().GetTransitions() {
- target := l.getReachableTarget(trans, t)
- if target != nil {
- lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
- if lexerActionExecutor != nil {
- lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
- }
- treatEOFAsEpsilon := (t == TokenEOF)
- config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
- if l.closure(input, config, reach,
- currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
- // any remaining configs for l alt have a lower priority
- // than the one that just reached an accept state.
- SkipAlt = cfg.GetAlt()
- }
- }
- }
- }
-}
-
-func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
- if LexerATNSimulatorDebug {
- fmt.Printf("ACTION %v\n", lexerActionExecutor)
- }
- // seek to after last char in token
- input.Seek(index)
- l.Line = line
- l.CharPositionInLine = charPos
- if lexerActionExecutor != nil && l.recog != nil {
- lexerActionExecutor.execute(l.recog, input, startIndex)
- }
-}
-
-func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
- if trans.Matches(t, 0, LexerMaxCharValue) {
- return trans.getTarget()
- }
-
- return nil
-}
-
-func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
- configs := NewOrderedATNConfigSet()
- for i := 0; i < len(p.GetTransitions()); i++ {
- target := p.GetTransitions()[i].getTarget()
- cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
- l.closure(input, cfg, configs, false, false, false)
- }
-
- return configs
-}
-
-// Since the alternatives within any lexer decision are ordered by
-// preference, l method stops pursuing the closure as soon as an accept
-// state is reached. After the first accept state is reached by depth-first
-// search from {@code config}, all other (potentially reachable) states for
-// l rule would have a lower priority.
-//
-// @return {@code true} if an accept state is reached, otherwise
-// {@code false}.
-func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
- currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
-
- if LexerATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
- }
-
- _, ok := config.state.(*RuleStopState)
- if ok {
-
- if LexerATNSimulatorDebug {
- if l.recog != nil {
- fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
- } else {
- fmt.Printf("closure at rule stop %s\n", config)
- }
- }
-
- if config.context == nil || config.context.hasEmptyPath() {
- if config.context == nil || config.context.isEmpty() {
- configs.Add(config, nil)
- return true
- }
-
- configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
- currentAltReachedAcceptState = true
- }
- if config.context != nil && !config.context.isEmpty() {
- for i := 0; i < config.context.length(); i++ {
- if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
- newContext := config.context.GetParent(i) // "pop" return state
- returnState := l.atn.states[config.context.getReturnState(i)]
- cfg := NewLexerATNConfig2(config, returnState, newContext)
- currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
- }
- }
- }
- return currentAltReachedAcceptState
- }
- // optimization
- if !config.state.GetEpsilonOnlyTransitions() {
- if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
- configs.Add(config, nil)
- }
- }
- for j := 0; j < len(config.state.GetTransitions()); j++ {
- trans := config.state.GetTransitions()[j]
- cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
- if cfg != nil {
- currentAltReachedAcceptState = l.closure(input, cfg, configs,
- currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
- }
- }
- return currentAltReachedAcceptState
-}
-
-// side-effect: can alter configs.hasSemanticContext
-func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
- configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
-
- var cfg *LexerATNConfig
-
- if trans.getSerializationType() == TransitionRULE {
-
- rt := trans.(*RuleTransition)
- newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
- cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
-
- } else if trans.getSerializationType() == TransitionPRECEDENCE {
- panic("Precedence predicates are not supported in lexers.")
- } else if trans.getSerializationType() == TransitionPREDICATE {
- // Track traversing semantic predicates. If we traverse,
- // we cannot add a DFA state for l "reach" computation
- // because the DFA would not test the predicate again in the
- // future. Rather than creating collections of semantic predicates
- // like v3 and testing them on prediction, v4 will test them on the
- // fly all the time using the ATN not the DFA. This is slower but
- // semantically it's not used that often. One of the key elements to
- // l predicate mechanism is not adding DFA states that see
- // predicates immediately afterwards in the ATN. For example,
-
- // a : ID {p1}? | ID {p2}?
-
- // should create the start state for rule 'a' (to save start state
- // competition), but should not create target of ID state. The
- // collection of ATN states the following ID references includes
- // states reached by traversing predicates. Since l is when we
- // test them, we cannot cash the DFA state target of ID.
-
- pt := trans.(*PredicateTransition)
-
- if LexerATNSimulatorDebug {
- fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
- }
- configs.SetHasSemanticContext(true)
- if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
- cfg = NewLexerATNConfig4(config, trans.getTarget())
- }
- } else if trans.getSerializationType() == TransitionACTION {
- if config.context == nil || config.context.hasEmptyPath() {
- // execute actions anywhere in the start rule for a token.
- //
- // TODO: if the entry rule is invoked recursively, some
- // actions may be executed during the recursive call. The
- // problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In l case, the config needs to be
- // split into two contexts - one with just the empty path
- // and another with everything but the empty path.
- // Unfortunately, the current algorithm does not allow
- // getEpsilonTarget to return two configurations, so
- // additional modifications are needed before we can support
- // the split operation.
- lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
- cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
- } else {
- // ignore actions in referenced rules
- cfg = NewLexerATNConfig4(config, trans.getTarget())
- }
- } else if trans.getSerializationType() == TransitionEPSILON {
- cfg = NewLexerATNConfig4(config, trans.getTarget())
- } else if trans.getSerializationType() == TransitionATOM ||
- trans.getSerializationType() == TransitionRANGE ||
- trans.getSerializationType() == TransitionSET {
- if treatEOFAsEpsilon {
- if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
- cfg = NewLexerATNConfig4(config, trans.getTarget())
- }
- }
- }
- return cfg
-}
-
-// Evaluate a predicate specified in the lexer.
-//
-// If {@code speculative} is {@code true}, l method was called before
-// {@link //consume} for the Matched character. This method should call
-// {@link //consume} before evaluating the predicate to ensure position
-// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
-// and {@link Lexer//getcolumn}, properly reflect the current
-// lexer state. This method should restore {@code input} and the simulator
-// to the original state before returning (i.e. undo the actions made by the
-// call to {@link //consume}.
-//
-// @param input The input stream.
-// @param ruleIndex The rule containing the predicate.
-// @param predIndex The index of the predicate within the rule.
-// @param speculative {@code true} if the current index in {@code input} is
-// one character before the predicate's location.
-//
-// @return {@code true} if the specified predicate evaluates to
-// {@code true}.
-// /
-func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
- // assume true if no recognizer was provided
- if l.recog == nil {
- return true
- }
- if !speculative {
- return l.recog.Sempred(nil, ruleIndex, predIndex)
- }
- savedcolumn := l.CharPositionInLine
- savedLine := l.Line
- index := input.Index()
- marker := input.Mark()
-
- defer func() {
- l.CharPositionInLine = savedcolumn
- l.Line = savedLine
- input.Seek(index)
- input.Release(marker)
- }()
-
- l.Consume(input)
- return l.recog.Sempred(nil, ruleIndex, predIndex)
-}
-
-func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
- settings.index = input.Index()
- settings.line = l.Line
- settings.column = l.CharPositionInLine
- settings.dfaState = dfaState
-}
-
-func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
- if to == nil && cfgs != nil {
- // leading to l call, ATNConfigSet.hasSemanticContext is used as a
- // marker indicating dynamic predicate evaluation makes l edge
- // dependent on the specific input sequence, so the static edge in the
- // DFA should be omitted. The target DFAState is still created since
- // execATN has the ability to reSynchronize with the DFA state cache
- // following the predicate evaluation step.
- //
- // TJP notes: next time through the DFA, we see a pred again and eval.
- // If that gets us to a previously created (but dangling) DFA
- // state, we can continue in pure DFA mode from there.
- // /
- suppressEdge := cfgs.HasSemanticContext()
- cfgs.SetHasSemanticContext(false)
-
- to = l.addDFAState(cfgs, true)
-
- if suppressEdge {
- return to
- }
- }
- // add the edge
- if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
- // Only track edges within the DFA bounds
- return to
- }
- if LexerATNSimulatorDebug {
- fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
- }
- l.atn.edgeMu.Lock()
- defer l.atn.edgeMu.Unlock()
- if from.getEdges() == nil {
- // make room for tokens 1..n and -1 masquerading as index 0
- from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
- }
- from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
-
- return to
-}
-
-// Add a NewDFA state if there isn't one with l set of
-// configurations already. This method also detects the first
-// configuration containing an ATN rule stop state. Later, when
-// traversing the DFA, we will know which rule to accept.
-func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
-
- proposed := NewDFAState(-1, configs)
- var firstConfigWithRuleStopState ATNConfig
-
- for _, cfg := range configs.GetItems() {
-
- _, ok := cfg.GetState().(*RuleStopState)
-
- if ok {
- firstConfigWithRuleStopState = cfg
- break
- }
- }
- if firstConfigWithRuleStopState != nil {
- proposed.isAcceptState = true
- proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
- proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
- }
- hash := proposed.hash()
- dfa := l.decisionToDFA[l.mode]
-
- l.atn.stateMu.Lock()
- defer l.atn.stateMu.Unlock()
- existing, ok := dfa.getState(hash)
- if ok {
- proposed = existing
- } else {
- proposed.stateNumber = dfa.numStates()
- configs.SetReadOnly(true)
- proposed.configs = configs
- dfa.setState(hash, proposed)
- }
- if !suppressEdge {
- dfa.setS0(proposed)
- }
- return proposed
-}
-
-func (l *LexerATNSimulator) getDFA(mode int) *DFA {
- return l.decisionToDFA[mode]
-}
-
-// Get the text Matched so far for the current token.
-func (l *LexerATNSimulator) GetText(input CharStream) string {
- // index is first lookahead char, don't include.
- return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
-}
-
-func (l *LexerATNSimulator) Consume(input CharStream) {
- curChar := input.LA(1)
- if curChar == int('\n') {
- l.Line++
- l.CharPositionInLine = 0
- } else {
- l.CharPositionInLine++
- }
- input.Consume()
-}
-
-func (l *LexerATNSimulator) GetCharPositionInLine() int {
- return l.CharPositionInLine
-}
-
-func (l *LexerATNSimulator) GetLine() int {
- return l.Line
-}
-
-func (l *LexerATNSimulator) GetTokenName(tt int) string {
- if tt == -1 {
- return "EOF"
- }
-
- var sb strings.Builder
- sb.Grow(6)
- sb.WriteByte('\'')
- sb.WriteRune(rune(tt))
- sb.WriteByte('\'')
-
- return sb.String()
-}
-
-func resetSimState(sim *SimState) {
- sim.index = -1
- sim.line = 0
- sim.column = -1
- sim.dfaState = nil
-}
-
-type SimState struct {
- index int
- line int
- column int
- dfaState *DFAState
-}
-
-func NewSimState() *SimState {
- s := new(SimState)
- resetSimState(s)
- return s
-}
-
-func (s *SimState) reset() {
- resetSimState(s)
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
deleted file mode 100644
index 6ffb37de69..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type LL1Analyzer struct {
- atn *ATN
-}
-
-func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
- la := new(LL1Analyzer)
- la.atn = atn
- return la
-}
-
-//* Special value added to the lookahead sets to indicate that we hit
-// a predicate during analysis if {@code seeThruPreds==false}.
-///
-const (
- LL1AnalyzerHitPred = TokenInvalidType
-)
-
-//*
-// Calculates the SLL(1) expected lookahead set for each outgoing transition
-// of an {@link ATNState}. The returned array has one element for each
-// outgoing transition in {@code s}. If the closure from transition
-// i leads to a semantic predicate before Matching a symbol, the
-// element at index i of the result will be {@code nil}.
-//
-// @param s the ATN state
-// @return the expected symbols for each outgoing transition of {@code s}.
-func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
- if s == nil {
- return nil
- }
- count := len(s.GetTransitions())
- look := make([]*IntervalSet, count)
- for alt := 0; alt < count; alt++ {
- look[alt] = NewIntervalSet()
- lookBusy := newArray2DHashSet(nil, nil)
- seeThruPreds := false // fail to get lookahead upon pred
- la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
- // Wipe out lookahead for la alternative if we found nothing
- // or we had a predicate when we !seeThruPreds
- if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
- look[alt] = nil
- }
- }
- return look
-}
-
-//*
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and the end of the rule containing
-// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
-// If {@code ctx} is not {@code nil} and the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx the complete parser context, or {@code nil} if the context
-// should be ignored
-//
-// @return The set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-///
-func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
- r := NewIntervalSet()
- seeThruPreds := true // ignore preds get all lookahead
- var lookContext PredictionContext
- if ctx != nil {
- lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
- }
- la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
- return r
-}
-
-//*
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
-// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
-// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
-// {@code true} and {@code stopState} or the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state.
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx The outer context, or {@code nil} if the outer context should
-// not be used.
-// @param look The result lookahead set.
-// @param lookBusy A set used for preventing epsilon closures in the ATN
-// from causing a stack overflow. Outside code should pass
-// {@code NewSet} for la argument.
-// @param calledRuleStack A set used for preventing left recursion in the
-// ATN from causing a stack overflow. Outside code should pass
-// {@code NewBitSet()} for la argument.
-// @param seeThruPreds {@code true} to true semantic predicates as
-// implicitly {@code true} and "see through them", otherwise {@code false}
-// to treat semantic predicates as opaque and add {@link //HitPred} to the
-// result if one is encountered.
-// @param addEOF Add {@link Token//EOF} to the result if the end of the
-// outermost context is reached. This parameter has no effect if {@code ctx}
-// is {@code nil}.
-
-func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
-
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
-
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
-
- c := NewBaseATNConfig6(s, 0, ctx)
-
- if lookBusy.Contains(c) {
- return
- }
-
- lookBusy.Add(c)
-
- if s == stopState {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
- }
-
- _, ok := s.(*RuleStopState)
-
- if ok {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
-
- if ctx != BasePredictionContextEMPTY {
- removed := calledRuleStack.contains(s.GetRuleIndex())
- defer func() {
- if removed {
- calledRuleStack.add(s.GetRuleIndex())
- }
- }()
- calledRuleStack.remove(s.GetRuleIndex())
- // run thru all possible stack tops in ctx
- for i := 0; i < ctx.length(); i++ {
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
- }
- return
- }
- }
-
- n := len(s.GetTransitions())
-
- for i := 0; i < n; i++ {
- t := s.GetTransitions()[i]
-
- if t1, ok := t.(*RuleTransition); ok {
- if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
- continue
- }
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
- la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
- } else if t2, ok := t.(AbstractPredicateTransition); ok {
- if seeThruPreds {
- la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else {
- look.addOne(LL1AnalyzerHitPred)
- }
- } else if t.getIsEpsilon() {
- la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else if _, ok := t.(*WildcardTransition); ok {
- look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
- } else {
- set := t.getLabel()
- if set != nil {
- if _, ok := t.(*NotSetTransition); ok {
- set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
- }
- look.addSet(set)
- }
- }
- }
-}
-
-func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
-
- defer func() {
- calledRuleStack.remove(t1.getTarget().GetRuleIndex())
- }()
-
- calledRuleStack.add(t1.getTarget().GetRuleIndex())
- la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
deleted file mode 100644
index 2ab2f56052..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
+++ /dev/null
@@ -1,718 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-type Parser interface {
- Recognizer
-
- GetInterpreter() *ParserATNSimulator
-
- GetTokenStream() TokenStream
- GetTokenFactory() TokenFactory
- GetParserRuleContext() ParserRuleContext
- SetParserRuleContext(ParserRuleContext)
- Consume() Token
- GetParseListeners() []ParseTreeListener
-
- GetErrorHandler() ErrorStrategy
- SetErrorHandler(ErrorStrategy)
- GetInputStream() IntStream
- GetCurrentToken() Token
- GetExpectedTokens() *IntervalSet
- NotifyErrorListeners(string, Token, RecognitionException)
- IsExpectedToken(int) bool
- GetPrecedence() int
- GetRuleInvocationStack(ParserRuleContext) []string
-}
-
-type BaseParser struct {
- *BaseRecognizer
-
- Interpreter *ParserATNSimulator
- BuildParseTrees bool
-
- input TokenStream
- errHandler ErrorStrategy
- precedenceStack IntStack
- ctx ParserRuleContext
-
- tracer *TraceListener
- parseListeners []ParseTreeListener
- _SyntaxErrors int
-}
-
-// p.is all the parsing support code essentially most of it is error
-// recovery stuff.//
-func NewBaseParser(input TokenStream) *BaseParser {
-
- p := new(BaseParser)
-
- p.BaseRecognizer = NewBaseRecognizer()
-
- // The input stream.
- p.input = nil
- // The error handling strategy for the parser. The default value is a new
- // instance of {@link DefaultErrorStrategy}.
- p.errHandler = NewDefaultErrorStrategy()
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- // The {@link ParserRuleContext} object for the currently executing rule.
- // p.is always non-nil during the parsing process.
- p.ctx = nil
- // Specifies whether or not the parser should construct a parse tree during
- // the parsing process. The default value is {@code true}.
- p.BuildParseTrees = true
- // When {@link //setTrace}{@code (true)} is called, a reference to the
- // {@link TraceListener} is stored here so it can be easily removed in a
- // later call to {@link //setTrace}{@code (false)}. The listener itself is
- // implemented as a parser listener so p.field is not directly used by
- // other parser methods.
- p.tracer = nil
- // The list of {@link ParseTreeListener} listeners registered to receive
- // events during the parse.
- p.parseListeners = nil
- // The number of syntax errors Reported during parsing. p.value is
- // incremented each time {@link //NotifyErrorListeners} is called.
- p._SyntaxErrors = 0
- p.SetInputStream(input)
-
- return p
-}
-
-// p.field maps from the serialized ATN string to the deserialized {@link
-// ATN} with
-// bypass alternatives.
-//
-// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
-//
-var bypassAltsAtnCache = make(map[string]int)
-
-// reset the parser's state//
-func (p *BaseParser) reset() {
- if p.input != nil {
- p.input.Seek(0)
- }
- p.errHandler.reset(p)
- p.ctx = nil
- p._SyntaxErrors = 0
- p.SetTrace(nil)
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- if p.Interpreter != nil {
- p.Interpreter.reset()
- }
-}
-
-func (p *BaseParser) GetErrorHandler() ErrorStrategy {
- return p.errHandler
-}
-
-func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
- p.errHandler = e
-}
-
-// Match current input symbol against {@code ttype}. If the symbol type
-// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
-// called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @param ttype the token type to Match
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// {@code ttype} and the error strategy could not recover from the
-// mismatched symbol
-
-func (p *BaseParser) Match(ttype int) Token {
-
- t := p.GetCurrentToken()
-
- if t.GetTokenType() == ttype {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
-
- return t
-}
-
-// Match current input symbol as a wildcard. If the symbol type Matches
-// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
-// and {@link //consume} are called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// a wildcard and the error strategy could not recover from the mismatched
-// symbol
-
-func (p *BaseParser) MatchWildcard() Token {
- t := p.GetCurrentToken()
- if t.GetTokenType() > 0 {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
- return t
-}
-
-func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
- return p.ctx
-}
-
-func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
- p.ctx = v
-}
-
-func (p *BaseParser) GetParseListeners() []ParseTreeListener {
- if p.parseListeners == nil {
- return make([]ParseTreeListener, 0)
- }
- return p.parseListeners
-}
-
-// Registers {@code listener} to receive events during the parsing process.
-//
-// To support output-preserving grammar transformations (including but not
-// limited to left-recursion removal, automated left-factoring, and
-// optimized code generation), calls to listener methods during the parse
-// may differ substantially from calls made by
-// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
-// particular, rule entry and exit events may occur in a different order
-// during the parse than after the parser. In addition, calls to certain
-// rule entry methods may be omitted.
-//
-// With the following specific exceptions, calls to listener events are
-// deterministic , i.e. for identical input the calls to listener
-// methods will be the same.
-//
-//
-// Alterations to the grammar used to generate code may change the
-// behavior of the listener calls.
-// Alterations to the command line options passed to ANTLR 4 when
-// generating the parser may change the behavior of the listener calls.
-// Changing the version of the ANTLR Tool used to generate the parser
-// may change the behavior of the listener calls.
-//
-//
-// @param listener the listener to add
-//
-// @panics nilPointerException if {@code} listener is {@code nil}
-//
-func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
- if listener == nil {
- panic("listener")
- }
- if p.parseListeners == nil {
- p.parseListeners = make([]ParseTreeListener, 0)
- }
- p.parseListeners = append(p.parseListeners, listener)
-}
-
-//
-// Remove {@code listener} from the list of parse listeners.
-//
-// If {@code listener} is {@code nil} or has not been added as a parse
-// listener, p.method does nothing.
-// @param listener the listener to remove
-//
-func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
-
- if p.parseListeners != nil {
-
- idx := -1
- for i, v := range p.parseListeners {
- if v == listener {
- idx = i
- break
- }
- }
-
- if idx == -1 {
- return
- }
-
- // remove the listener from the slice
- p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
-
- if len(p.parseListeners) == 0 {
- p.parseListeners = nil
- }
- }
-}
-
-// Remove all parse listeners.
-func (p *BaseParser) removeParseListeners() {
- p.parseListeners = nil
-}
-
-// Notify any parse listeners of an enter rule event.
-func (p *BaseParser) TriggerEnterRuleEvent() {
- if p.parseListeners != nil {
- ctx := p.ctx
- for _, listener := range p.parseListeners {
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
- }
- }
-}
-
-//
-// Notify any parse listeners of an exit rule event.
-//
-// @see //addParseListener
-//
-func (p *BaseParser) TriggerExitRuleEvent() {
- if p.parseListeners != nil {
- // reverse order walk of listeners
- ctx := p.ctx
- l := len(p.parseListeners) - 1
-
- for i := range p.parseListeners {
- listener := p.parseListeners[l-i]
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
- }
- }
-}
-
-func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
- return p.Interpreter
-}
-
-func (p *BaseParser) GetATN() *ATN {
- return p.Interpreter.atn
-}
-
-func (p *BaseParser) GetTokenFactory() TokenFactory {
- return p.input.GetTokenSource().GetTokenFactory()
-}
-
-// Tell our token source and error strategy about a Newway to create tokens.//
-func (p *BaseParser) setTokenFactory(factory TokenFactory) {
- p.input.GetTokenSource().setTokenFactory(factory)
-}
-
-// The ATN with bypass alternatives is expensive to create so we create it
-// lazily.
-//
-// @panics UnsupportedOperationException if the current parser does not
-// implement the {@link //getSerializedATN()} method.
-//
-func (p *BaseParser) GetATNWithBypassAlts() {
-
- // TODO
- panic("Not implemented!")
-
- // serializedAtn := p.getSerializedATN()
- // if (serializedAtn == nil) {
- // panic("The current parser does not support an ATN with bypass alternatives.")
- // }
- // result := p.bypassAltsAtnCache[serializedAtn]
- // if (result == nil) {
- // deserializationOptions := NewATNDeserializationOptions(nil)
- // deserializationOptions.generateRuleBypassTransitions = true
- // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
- // p.bypassAltsAtnCache[serializedAtn] = result
- // }
- // return result
-}
-
-// The preferred method of getting a tree pattern. For example, here's a
-// sample use:
-//
-//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-//
-
-func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
-
- panic("NewParseTreePatternMatcher not implemented!")
- //
- // if (lexer == nil) {
- // if (p.GetTokenStream() != nil) {
- // tokenSource := p.GetTokenStream().GetTokenSource()
- // if _, ok := tokenSource.(ILexer); ok {
- // lexer = tokenSource
- // }
- // }
- // }
- // if (lexer == nil) {
- // panic("Parser can't discover a lexer to use")
- // }
-
- // m := NewParseTreePatternMatcher(lexer, p)
- // return m.compile(pattern, patternRuleIndex)
-}
-
-func (p *BaseParser) GetInputStream() IntStream {
- return p.GetTokenStream()
-}
-
-func (p *BaseParser) SetInputStream(input TokenStream) {
- p.SetTokenStream(input)
-}
-
-func (p *BaseParser) GetTokenStream() TokenStream {
- return p.input
-}
-
-// Set the token stream and reset the parser.//
-func (p *BaseParser) SetTokenStream(input TokenStream) {
- p.input = nil
- p.reset()
- p.input = input
-}
-
-// Match needs to return the current input symbol, which gets put
-// into the label for the associated token ref e.g., x=ID.
-//
-func (p *BaseParser) GetCurrentToken() Token {
- return p.input.LT(1)
-}
-
-func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
- if offendingToken == nil {
- offendingToken = p.GetCurrentToken()
- }
- p._SyntaxErrors++
- line := offendingToken.GetLine()
- column := offendingToken.GetColumn()
- listener := p.GetErrorListenerDispatch()
- listener.SyntaxError(p, offendingToken, line, column, msg, err)
-}
-
-func (p *BaseParser) Consume() Token {
- o := p.GetCurrentToken()
- if o.GetTokenType() != TokenEOF {
- p.GetInputStream().Consume()
- }
- hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
- if p.BuildParseTrees || hasListener {
- if p.errHandler.InErrorRecoveryMode(p) {
- node := p.ctx.AddErrorNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitErrorNode(node)
- }
- }
-
- } else {
- node := p.ctx.AddTokenNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitTerminal(node)
- }
- }
- }
- // node.invokingState = p.state
- }
-
- return o
-}
-
-func (p *BaseParser) addContextToParseTree() {
- // add current context to parent if we have a parent
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
- }
-}
-
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
- p.SetState(state)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.BuildParseTrees {
- p.addContextToParseTree()
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent()
- }
-}
-
-func (p *BaseParser) ExitRule() {
- p.ctx.SetStop(p.input.LT(-1))
- // trigger event on ctx, before it reverts to parent
- if p.parseListeners != nil {
- p.TriggerExitRuleEvent()
- }
- p.SetState(p.ctx.GetInvokingState())
- if p.ctx.GetParent() != nil {
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- } else {
- p.ctx = nil
- }
-}
-
-func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
- localctx.SetAltNumber(altNum)
- // if we have Newlocalctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if p.BuildParseTrees && p.ctx != localctx {
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
- p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
- }
- }
- p.ctx = localctx
-}
-
-// Get the precedence level for the top-most precedence rule.
-//
-// @return The precedence level for the top-most precedence rule, or -1 if
-// the parser context is not nested within a precedence rule.
-
-func (p *BaseParser) GetPrecedence() int {
- if len(p.precedenceStack) == 0 {
- return -1
- }
-
- return p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
- p.SetState(state)
- p.precedenceStack.Push(precedence)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-//
-// Like {@link //EnterRule} but for recursive rules.
-
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
- previous := p.ctx
- previous.SetParent(localctx)
- previous.SetInvokingState(state)
- previous.SetStop(p.input.LT(-1))
-
- p.ctx = localctx
- p.ctx.SetStart(previous.GetStart())
- if p.BuildParseTrees {
- p.ctx.AddChild(previous)
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- p.precedenceStack.Pop()
- p.ctx.SetStop(p.input.LT(-1))
- retCtx := p.ctx // save current ctx (return value)
- // unroll so ctx is as it was before call to recursive method
- if p.parseListeners != nil {
- for p.ctx != parentCtx {
- p.TriggerExitRuleEvent()
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- }
- } else {
- p.ctx = parentCtx
- }
- // hook into tree
- retCtx.SetParent(parentCtx)
- if p.BuildParseTrees && parentCtx != nil {
- // add return ctx into invoking rule's tree
- parentCtx.AddChild(retCtx)
- }
-}
-
-func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
- ctx := p.ctx
- for ctx != nil {
- if ctx.GetRuleIndex() == ruleIndex {
- return ctx
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- return nil
-}
-
-func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
- return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) inContext(context ParserRuleContext) bool {
- // TODO: useful in parser?
- return false
-}
-
-//
-// Checks whether or not {@code symbol} can follow the current state in the
-// ATN. The behavior of p.method is equivalent to the following, but is
-// implemented such that the complete context-sensitive follow set does not
-// need to be explicitly constructed.
-//
-//
-// return getExpectedTokens().contains(symbol)
-//
-//
-// @param symbol the symbol type to check
-// @return {@code true} if {@code symbol} can follow the current state in
-// the ATN, otherwise {@code false}.
-
-func (p *BaseParser) IsExpectedToken(symbol int) bool {
- atn := p.Interpreter.atn
- ctx := p.ctx
- s := atn.states[p.state]
- following := atn.NextTokens(s, nil)
- if following.contains(symbol) {
- return true
- }
- if !following.contains(TokenEpsilon) {
- return false
- }
- for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
- if following.contains(symbol) {
- return true
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- if following.contains(TokenEpsilon) && symbol == TokenEOF {
- return true
- }
-
- return false
-}
-
-// Computes the set of input symbols which could follow the current parser
-// state and context, as given by {@link //GetState} and {@link //GetContext},
-// respectively.
-//
-// @see ATN//getExpectedTokens(int, RuleContext)
-//
-func (p *BaseParser) GetExpectedTokens() *IntervalSet {
- return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
-}
-
-func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
- atn := p.Interpreter.atn
- s := atn.states[p.state]
- return atn.NextTokens(s, nil)
-}
-
-// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
-func (p *BaseParser) GetRuleIndex(ruleName string) int {
- var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
- if ok {
- return ruleIndex
- }
-
- return -1
-}
-
-// Return List<String> of the rule names in your parser instance
-// leading up to a call to the current rule. You could override if
-// you want more details such as the file/line info of where
-// in the ATN a rule is invoked.
-//
-// this very useful for error messages.
-
-func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
- if c == nil {
- c = p.ctx
- }
- stack := make([]string, 0)
- for c != nil {
- // compute what follows who invoked us
- ruleIndex := c.GetRuleIndex()
- if ruleIndex < 0 {
- stack = append(stack, "n/a")
- } else {
- stack = append(stack, p.GetRuleNames()[ruleIndex])
- }
-
- vp := c.GetParent()
-
- if vp == nil {
- break
- }
-
- c = vp.(ParserRuleContext)
- }
- return stack
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) GetDFAStrings() string {
- return fmt.Sprint(p.Interpreter.decisionToDFA)
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) DumpDFA() {
- seenOne := false
- for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.numStates() > 0 {
- if seenOne {
- fmt.Println()
- }
- fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
- fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
- seenOne = true
- }
- }
-}
-
-func (p *BaseParser) GetSourceName() string {
- return p.GrammarFileName
-}
-
-// During a parse is sometimes useful to listen in on the rule entry and exit
-// events as well as token Matches. p.is for quick and dirty debugging.
-//
-func (p *BaseParser) SetTrace(trace *TraceListener) {
- if trace == nil {
- p.RemoveParseListener(p.tracer)
- p.tracer = nil
- } else {
- if p.tracer != nil {
- p.RemoveParseListener(p.tracer)
- }
- p.tracer = NewTraceListener(p)
- p.AddParseListener(p.tracer)
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
deleted file mode 100644
index 888d512975..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
+++ /dev/null
@@ -1,1544 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-var (
- ParserATNSimulatorDebug = false
- ParserATNSimulatorListATNDecisions = false
- ParserATNSimulatorDFADebug = false
- ParserATNSimulatorRetryDebug = false
- TurnOffLRLoopEntryBranchOpt = false
-)
-
-type ParserATNSimulator struct {
- *BaseATNSimulator
-
- parser Parser
- predictionMode int
- input TokenStream
- startIndex int
- dfa *DFA
- mergeCache *DoubleDict
- outerContext ParserRuleContext
-}
-
-func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
-
- p := new(ParserATNSimulator)
-
- p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
-
- p.parser = parser
- p.decisionToDFA = decisionToDFA
- // SLL, LL, or LL + exact ambig detection?//
- p.predictionMode = PredictionModeLL
- // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
- p.input = nil
- p.startIndex = 0
- p.outerContext = nil
- p.dfa = nil
- // Each prediction operation uses a cache for merge of prediction contexts.
- // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
- // isn't Synchronized but we're ok since two threads shouldn't reuse same
- // parser/atnsim object because it can only handle one input at a time.
- // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
- // the merge if we ever see a and b again. Note that (b,a)&rarrc should
- // also be examined during cache lookup.
- //
- p.mergeCache = nil
-
- return p
-}
-
-func (p *ParserATNSimulator) GetPredictionMode() int {
- return p.predictionMode
-}
-
-func (p *ParserATNSimulator) SetPredictionMode(v int) {
- p.predictionMode = v
-}
-
-func (p *ParserATNSimulator) reset() {
-}
-
-func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
- strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- p.input = input
- p.startIndex = input.Index()
- p.outerContext = outerContext
-
- dfa := p.decisionToDFA[decision]
- p.dfa = dfa
- m := input.Mark()
- index := input.Index()
-
- defer func() {
- p.dfa = nil
- p.mergeCache = nil // wack cache after each prediction
- input.Seek(index)
- input.Release(m)
- }()
-
- // Now we are certain to have a specific decision's DFA
- // But, do we still need an initial state?
- var s0 *DFAState
- p.atn.stateMu.RLock()
- if dfa.getPrecedenceDfa() {
- p.atn.edgeMu.RLock()
- // the start state for a precedence DFA depends on the current
- // parser precedence, and is provided by a DFA method.
- s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
- p.atn.edgeMu.RUnlock()
- } else {
- // the start state for a "regular" DFA is just s0
- s0 = dfa.getS0()
- }
- p.atn.stateMu.RUnlock()
-
- if s0 == nil {
- if outerContext == nil {
- outerContext = RuleContextEmpty
- }
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
- }
- fullCtx := false
- s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
-
- p.atn.stateMu.Lock()
- if dfa.getPrecedenceDfa() {
- // If p is a precedence DFA, we use applyPrecedenceFilter
- // to convert the computed start state to a precedence start
- // state. We then use DFA.setPrecedenceStartState to set the
- // appropriate start state for the precedence level rather
- // than simply setting DFA.s0.
- //
- dfa.s0.configs = s0Closure
- s0Closure = p.applyPrecedenceFilter(s0Closure)
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- p.atn.edgeMu.Lock()
- dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
- p.atn.edgeMu.Unlock()
- } else {
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- dfa.setS0(s0)
- }
- p.atn.stateMu.Unlock()
- }
-
- alt := p.execATN(dfa, s0, input, index, outerContext)
- if ParserATNSimulatorDebug {
- fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
- }
- return alt
-
-}
-
-// Performs ATN simulation to compute a predicted alternative based
-// upon the remaining input, but also updates the DFA cache to avoid
-// having to traverse the ATN again for the same input sequence.
-
-// There are some key conditions we're looking for after computing a new
-// set of ATN configs (proposed DFA state):
-// if the set is empty, there is no viable alternative for current symbol
-// does the state uniquely predict an alternative?
-// does the state have a conflict that would prevent us from
-// putting it on the work list?
-
-// We also have some key operations to do:
-// add an edge from previous DFA state to potentially NewDFA state, D,
-// upon current symbol but only if adding to work list, which means in all
-// cases except no viable alternative (and possibly non-greedy decisions?)
-// collecting predicates and adding semantic context to DFA accept states
-// adding rule context to context-sensitive DFA accept states
-// consuming an input symbol
-// Reporting a conflict
-// Reporting an ambiguity
-// Reporting a context sensitivity
-// Reporting insufficient predicates
-
-// cover these cases:
-// dead end
-// single alt
-// single alt + preds
-// conflict
-// conflict + preds
-//
-func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- previousD := s0
-
- if ParserATNSimulatorDebug {
- fmt.Println("s0 = " + s0.String())
- }
- t := input.LA(1)
- for { // for more work
- D := p.getExistingTargetState(previousD, t)
- if D == nil {
- D = p.computeTargetState(dfa, previousD, t)
- }
- if D == ATNSimulatorError {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for SLL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
- // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- conflictingAlts := D.configs.GetConflictingAlts()
- if D.predicates != nil {
- if ParserATNSimulatorDebug {
- fmt.Println("DFA state has preds in DFA sim LL failover")
- }
- conflictIndex := input.Index()
- if conflictIndex != startIndex {
- input.Seek(startIndex)
- }
- conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
- if conflictingAlts.length() == 1 {
- if ParserATNSimulatorDebug {
- fmt.Println("Full LL avoided")
- }
- return conflictingAlts.minValue()
- }
- if conflictIndex != startIndex {
- // restore the index so Reporting the fallback to full
- // context occurs with the index at the correct spot
- input.Seek(conflictIndex)
- }
- }
- if ParserATNSimulatorDFADebug {
- fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
- }
- fullCtx := true
- s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
- p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
- alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
- return alt
- }
- if D.isAcceptState {
- if D.predicates == nil {
- return D.prediction
- }
- stopIndex := input.Index()
- input.Seek(startIndex)
- alts := p.evalSemanticContext(D.predicates, outerContext, true)
-
- switch alts.length() {
- case 0:
- panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
- case 1:
- return alts.minValue()
- default:
- // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
- p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
- return alts.minValue()
- }
- }
- previousD = D
-
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
-
- panic("Should not have reached p state")
-}
-
-// Get an existing target state for an edge in the DFA. If the target state
-// for the edge has not yet been computed or is otherwise not available,
-// p method returns {@code nil}.
-//
-// @param previousD The current DFA state
-// @param t The next input symbol
-// @return The existing target DFA state for the given input symbol
-// {@code t}, or {@code nil} if the target state for p edge is not
-// already cached
-
-func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
- if t+1 < 0 {
- return nil
- }
-
- p.atn.edgeMu.RLock()
- defer p.atn.edgeMu.RUnlock()
- edges := previousD.getEdges()
- if edges == nil || t+1 >= len(edges) {
- return nil
- }
- return previousD.getIthEdge(t + 1)
-}
-
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
-//
-// @param dfa The DFA
-// @param previousD The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, p method
-// returns {@link //ERROR}.
-
-func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
- reach := p.computeReachSet(previousD.configs, t, false)
-
- if reach == nil {
- p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
- return ATNSimulatorError
- }
- // create Newtarget state we'll add to DFA after it's complete
- D := NewDFAState(-1, reach)
-
- predictedAlt := p.getUniqueAlt(reach)
-
- if ParserATNSimulatorDebug {
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
- ", previous=" + previousD.configs.String() +
- ", configs=" + reach.String() +
- ", predict=" + strconv.Itoa(predictedAlt) +
- ", allSubsetsConflict=" +
- fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
- ", conflictingAlts=" + p.getConflictingAlts(reach).String())
- }
- if predictedAlt != ATNInvalidAltNumber {
- // NO CONFLICT, UNIQUELY PREDICTED ALT
- D.isAcceptState = true
- D.configs.SetUniqueAlt(predictedAlt)
- D.setPrediction(predictedAlt)
- } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
- // MORE THAN ONE VIABLE ALTERNATIVE
- D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
- D.requiresFullContext = true
- // in SLL-only mode, we will stop at p state and return the minimum alt
- D.isAcceptState = true
- D.setPrediction(D.configs.GetConflictingAlts().minValue())
- }
- if D.isAcceptState && D.configs.HasSemanticContext() {
- p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
- if D.predicates != nil {
- D.setPrediction(ATNInvalidAltNumber)
- }
- }
- // all adds to dfa are done after we've created full D state
- D = p.addDFAEdge(dfa, previousD, t, D)
- return D
-}
-
-func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
- // We need to test all predicates, even in DFA states that
- // uniquely predict alternative.
- nalts := len(decisionState.GetTransitions())
- // Update DFA so reach becomes accept state with (predicate,alt)
- // pairs if preds found for conflicting alts
- altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
- altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
- if altToPred != nil {
- dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
- dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
- } else {
- // There are preds in configs but they might go away
- // when OR'd together like {p}? || NONE == NONE. If neither
- // alt has preds, resolve to min alt
- dfaState.setPrediction(altsToCollectPredsFrom.minValue())
- }
-}
-
-// comes back with reach.uniqueAlt set to a valid alt
-func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("execATNWithFullContext " + s0.String())
- }
-
- fullCtx := true
- foundExactAmbig := false
- var reach ATNConfigSet
- previous := s0
- input.Seek(startIndex)
- t := input.LA(1)
- predictedAlt := -1
-
- for { // for more work
- reach = p.computeReachSet(previous, t, fullCtx)
- if reach == nil {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for LL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previous, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- if ParserATNSimulatorDebug {
- fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
- strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
- fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
- }
- reach.SetUniqueAlt(p.getUniqueAlt(reach))
- // unique prediction?
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- predictedAlt = reach.GetUniqueAlt()
- break
- }
- if p.predictionMode != PredictionModeLLExactAmbigDetection {
- predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
- if predictedAlt != ATNInvalidAltNumber {
- break
- }
- } else {
- // In exact ambiguity mode, we never try to terminate early.
- // Just keeps scarfing until we know what the conflict is
- if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
- foundExactAmbig = true
- predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
- break
- }
- // else there are multiple non-conflicting subsets or
- // we're not sure what the ambiguity is yet.
- // So, keep going.
- }
- previous = reach
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
- // If the configuration set uniquely predicts an alternative,
- // without conflict, then we know that it's a full LL decision
- // not SLL.
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
- return predictedAlt
- }
- // We do not check predicates here because we have checked them
- // on-the-fly when doing full context prediction.
-
- //
- // In non-exact ambiguity detection mode, we might actually be able to
- // detect an exact ambiguity, but I'm not going to spend the cycles
- // needed to check. We only emit ambiguity warnings in exact ambiguity
- // mode.
- //
- // For example, we might know that we have conflicting configurations.
- // But, that does not mean that there is no way forward without a
- // conflict. It's possible to have nonconflicting alt subsets as in:
-
- // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
-
- // from
- //
- // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
- // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
- //
- // In p case, (17,1,[5 $]) indicates there is some next sequence that
- // would resolve p without conflict to alternative 1. Any other viable
- // next sequence, however, is associated with a conflict. We stop
- // looking for input because no amount of further lookahead will alter
- // the fact that we should predict alternative 1. We just can't say for
- // sure that there is an ambiguity without looking further.
-
- p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
-
- return predictedAlt
-}
-
-func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
- if ParserATNSimulatorDebug {
- fmt.Println("in computeReachSet, starting closure: " + closure.String())
- }
- if p.mergeCache == nil {
- p.mergeCache = NewDoubleDict()
- }
- intermediate := NewBaseATNConfigSet(fullCtx)
-
- // Configurations already in a rule stop state indicate reaching the end
- // of the decision rule (local context) or end of the start rule (full
- // context). Once reached, these configurations are never updated by a
- // closure operation, so they are handled separately for the performance
- // advantage of having a smaller intermediate set when calling closure.
- //
- // For full-context reach operations, separate handling is required to
- // ensure that the alternative Matching the longest overall sequence is
- // chosen when multiple such configurations can Match the input.
-
- var skippedStopStates []*BaseATNConfig
-
- // First figure out where we can reach on input t
- for _, c := range closure.GetItems() {
- if ParserATNSimulatorDebug {
- fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
- }
-
- if _, ok := c.GetState().(*RuleStopState); ok {
- if fullCtx || t == TokenEOF {
- skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
- if ParserATNSimulatorDebug {
- fmt.Println("added " + c.String() + " to SkippedStopStates")
- }
- }
- continue
- }
-
- for _, trans := range c.GetState().GetTransitions() {
- target := p.getReachableTarget(trans, t)
- if target != nil {
- cfg := NewBaseATNConfig4(c, target)
- intermediate.Add(cfg, p.mergeCache)
- if ParserATNSimulatorDebug {
- fmt.Println("added " + cfg.String() + " to intermediate")
- }
- }
- }
- }
-
- // Now figure out where the reach operation can take us...
- var reach ATNConfigSet
-
- // This block optimizes the reach operation for intermediate sets which
- // trivially indicate a termination state for the overall
- // AdaptivePredict operation.
- //
- // The conditions assume that intermediate
- // contains all configurations relevant to the reach set, but p
- // condition is not true when one or more configurations have been
- // withheld in SkippedStopStates, or when the current symbol is EOF.
- //
- if skippedStopStates == nil && t != TokenEOF {
- if len(intermediate.configs) == 1 {
- // Don't pursue the closure if there is just one state.
- // It can only have one alternative just add to result
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- }
- }
- // If the reach set could not be trivially determined, perform a closure
- // operation on the intermediate set to compute its initial value.
- //
- if reach == nil {
- reach = NewBaseATNConfigSet(fullCtx)
- closureBusy := newArray2DHashSet(nil, nil)
- treatEOFAsEpsilon := t == TokenEOF
- amount := len(intermediate.configs)
- for k := 0; k < amount; k++ {
- p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
- }
- }
- if t == TokenEOF {
- // After consuming EOF no additional input is possible, so we are
- // only interested in configurations which reached the end of the
- // decision rule (local context) or end of the start rule (full
- // context). Update reach to contain only these configurations. This
- // handles both explicit EOF transitions in the grammar and implicit
- // EOF transitions following the end of the decision or start rule.
- //
- // When reach==intermediate, no closure operation was performed. In
- // p case, removeAllConfigsNotInRuleStopState needs to check for
- // reachable rule stop states as well as configurations already in
- // a rule stop state.
- //
- // This is handled before the configurations in SkippedStopStates,
- // because any configurations potentially added from that list are
- // already guaranteed to meet p condition whether or not it's
- // required.
- //
- reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
- }
- // If SkippedStopStates!=nil, then it contains at least one
- // configuration. For full-context reach operations, these
- // configurations reached the end of the start rule, in which case we
- // only add them back to reach if no configuration during the current
- // closure operation reached such a state. This ensures AdaptivePredict
- // chooses an alternative Matching the longest overall sequence when
- // multiple alternatives are viable.
- //
- if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
- for l := 0; l < len(skippedStopStates); l++ {
- reach.Add(skippedStopStates[l], p.mergeCache)
- }
- }
- if len(reach.GetItems()) == 0 {
- return nil
- }
-
- return reach
-}
-
-//
-// Return a configuration set containing only the configurations from
-// {@code configs} which are in a {@link RuleStopState}. If all
-// configurations in {@code configs} are already in a rule stop state, p
-// method simply returns {@code configs}.
-//
-// When {@code lookToEndOfRule} is true, p method uses
-// {@link ATN//NextTokens} for each configuration in {@code configs} which is
-// not already in a rule stop state to see if a rule stop state is reachable
-// from the configuration via epsilon-only transitions.
-//
-// @param configs the configuration set to update
-// @param lookToEndOfRule when true, p method checks for rule stop states
-// reachable by epsilon-only transitions from each configuration in
-// {@code configs}.
-//
-// @return {@code configs} if all configurations in {@code configs} are in a
-// rule stop state, otherwise return a Newconfiguration set containing only
-// the configurations from {@code configs} which are in a rule stop state
-//
-func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return configs
- }
- result := NewBaseATNConfigSet(configs.FullContext())
- for _, config := range configs.GetItems() {
- if _, ok := config.GetState().(*RuleStopState); ok {
- result.Add(config, p.mergeCache)
- continue
- }
- if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
- NextTokens := p.atn.NextTokens(config.GetState(), nil)
- if NextTokens.contains(TokenEpsilon) {
- endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
- result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
- }
- }
- }
- return result
-}
-
-func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
- // always at least the implicit call to start rule
- initialContext := predictionContextFromRuleContext(p.atn, ctx)
- configs := NewBaseATNConfigSet(fullCtx)
- for i := 0; i < len(a.GetTransitions()); i++ {
- target := a.GetTransitions()[i].getTarget()
- c := NewBaseATNConfig6(target, i+1, initialContext)
- closureBusy := newArray2DHashSet(nil, nil)
- p.closure(c, configs, closureBusy, true, fullCtx, false)
- }
- return configs
-}
-
-//
-// This method transforms the start state computed by
-// {@link //computeStartState} to the special start state used by a
-// precedence DFA for a particular precedence value. The transformation
-// process applies the following changes to the start state's configuration
-// set.
-//
-//
-// Evaluate the precedence predicates for each configuration using
-// {@link SemanticContext//evalPrecedence}.
-// Remove all configurations which predict an alternative greater than
-// 1, for which another configuration that predicts alternative 1 is in the
-// same ATN state with the same prediction context. This transformation is
-// valid for the following reasons:
-//
-// The closure block cannot contain any epsilon transitions which bypass
-// the body of the closure, so all states reachable via alternative 1 are
-// part of the precedence alternatives of the transformed left-recursive
-// rule.
-// The "primary" portion of a left recursive rule cannot contain an
-// epsilon transition, so the only way an alternative other than 1 can exist
-// in a state that is also reachable via alternative 1 is by nesting calls
-// to the left-recursive rule, with the outer calls not being at the
-// preferred precedence level.
-//
-//
-//
-//
-//
-// The prediction context must be considered by p filter to address
-// situations like the following.
-//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-//
-// If the above grammar, the ATN state immediately before the token
-// reference {@code 'a'} in {@code letterA} is reachable from the left edge
-// of both the primary and closure blocks of the left-recursive rule
-// {@code statement}. The prediction context associated with each of these
-// configurations distinguishes between them, and prevents the alternative
-// which stepped out to {@code prog} (and then back in to {@code statement}
-// from being eliminated by the filter.
-//
-//
-// @param configs The configuration set computed by
-// {@link //computeStartState} as the start state for the DFA.
-// @return The transformed configuration set representing the start state
-// for a precedence DFA at a particular precedence level (determined by
-// calling {@link Parser//getPrecedence}).
-//
-func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
-
- statesFromAlt1 := make(map[int]PredictionContext)
- configSet := NewBaseATNConfigSet(configs.FullContext())
-
- for _, config := range configs.GetItems() {
- // handle alt 1 first
- if config.GetAlt() != 1 {
- continue
- }
- updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
- if updatedContext == nil {
- // the configuration was eliminated
- continue
- }
- statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
- if updatedContext != config.GetSemanticContext() {
- configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
- } else {
- configSet.Add(config, p.mergeCache)
- }
- }
- for _, config := range configs.GetItems() {
-
- if config.GetAlt() == 1 {
- // already handled
- continue
- }
- // In the future, p elimination step could be updated to also
- // filter the prediction context for alternatives predicting alt>1
- // (basically a graph subtraction algorithm).
- if !config.getPrecedenceFilterSuppressed() {
- context := statesFromAlt1[config.GetState().GetStateNumber()]
- if context != nil && context.equals(config.GetContext()) {
- // eliminated
- continue
- }
- }
- configSet.Add(config, p.mergeCache)
- }
- return configSet
-}
-
-func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
- if trans.Matches(ttype, 0, p.atn.maxTokenType) {
- return trans.getTarget()
- }
-
- return nil
-}
-
-func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
-
- altToPred := make([]SemanticContext, nalts+1)
- for _, c := range configs.GetItems() {
- if ambigAlts.contains(c.GetAlt()) {
- altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
- }
- }
- nPredAlts := 0
- for i := 1; i <= nalts; i++ {
- pred := altToPred[i]
- if pred == nil {
- altToPred[i] = SemanticContextNone
- } else if pred != SemanticContextNone {
- nPredAlts++
- }
- }
- // nonambig alts are nil in altToPred
- if nPredAlts == 0 {
- altToPred = nil
- }
- if ParserATNSimulatorDebug {
- fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
- }
- return altToPred
-}
-
-func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
- pairs := make([]*PredPrediction, 0)
- containsPredicate := false
- for i := 1; i < len(altToPred); i++ {
- pred := altToPred[i]
- // unpredicated is indicated by SemanticContextNONE
- if ambigAlts != nil && ambigAlts.contains(i) {
- pairs = append(pairs, NewPredPrediction(pred, i))
- }
- if pred != SemanticContextNone {
- containsPredicate = true
- }
- }
- if !containsPredicate {
- return nil
- }
- return pairs
-}
-
-//
-// This method is used to improve the localization of error messages by
-// choosing an alternative rather than panicing a
-// {@link NoViableAltException} in particular prediction scenarios where the
-// {@link //ERROR} state was reached during ATN simulation.
-//
-//
-// The default implementation of p method uses the following
-// algorithm to identify an ATN configuration which successfully parsed the
-// decision entry rule. Choosing such an alternative ensures that the
-// {@link ParserRuleContext} returned by the calling rule will be complete
-// and valid, and the syntax error will be Reported later at a more
-// localized location.
-//
-//
-// If a syntactically valid path or paths reach the end of the decision rule and
-// they are semantically valid if predicated, return the min associated alt.
-// Else, if a semantically invalid but syntactically valid path exist
-// or paths exist, return the minimum associated alt.
-//
-// Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
-//
-//
-//
-// In some scenarios, the algorithm described above could predict an
-// alternative which will result in a {@link FailedPredicateException} in
-// the parser. Specifically, p could occur if the only configuration
-// capable of successfully parsing to the end of the decision rule is
-// blocked by a semantic predicate. By choosing p alternative within
-// {@link //AdaptivePredict} instead of panicing a
-// {@link NoViableAltException}, the resulting
-// {@link FailedPredicateException} in the parser will identify the specific
-// predicate which is preventing the parser from successfully parsing the
-// decision rule, which helps developers identify and correct logic errors
-// in semantic predicates.
-//
-//
-// @param configs The ATN configurations which were valid immediately before
-// the {@link //ERROR} state was reached
-// @param outerContext The is the \gamma_0 initial parser context from the paper
-// or the parser stack at the instant before prediction commences.
-//
-// @return The value to return from {@link //AdaptivePredict}, or
-// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
-// identified and {@link //AdaptivePredict} should Report an error instead.
-//
-func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
- cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
- semValidConfigs := cfgs[0]
- semInvalidConfigs := cfgs[1]
- alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
- if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
- return alt
- }
- // Is there a syntactically valid path with a failed pred?
- if len(semInvalidConfigs.GetItems()) > 0 {
- alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
- if alt != ATNInvalidAltNumber { // syntactically viable path exists
- return alt
- }
- }
- return ATNInvalidAltNumber
-}
-
-func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
- alts := NewIntervalSet()
-
- for _, c := range configs.GetItems() {
- _, ok := c.GetState().(*RuleStopState)
-
- if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
- alts.addOne(c.GetAlt())
- }
- }
- if alts.length() == 0 {
- return ATNInvalidAltNumber
- }
-
- return alts.first()
-}
-
-// Walk the list of configurations and split them according to
-// those that have preds evaluating to true/false. If no pred, assume
-// true pred and include in succeeded set. Returns Pair of sets.
-//
-// Create a NewSet so as not to alter the incoming parameter.
-//
-// Assumption: the input stream has been restored to the starting point
-// prediction, which is where predicates need to evaluate.
-
-type ATNConfigSetPair struct {
- item0, item1 ATNConfigSet
-}
-
-func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
- succeeded := NewBaseATNConfigSet(configs.FullContext())
- failed := NewBaseATNConfigSet(configs.FullContext())
-
- for _, c := range configs.GetItems() {
- if c.GetSemanticContext() != SemanticContextNone {
- predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
- if predicateEvaluationResult {
- succeeded.Add(c, nil)
- } else {
- failed.Add(c, nil)
- }
- } else {
- succeeded.Add(c, nil)
- }
- }
- return []ATNConfigSet{succeeded, failed}
-}
-
-// Look through a list of predicate/alt pairs, returning alts for the
-// pairs that win. A {@code NONE} predicate indicates an alt containing an
-// unpredicated config which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
-//
-func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
- predictions := NewBitSet()
- for i := 0; i < len(predPredictions); i++ {
- pair := predPredictions[i]
- if pair.pred == SemanticContextNone {
- predictions.add(pair.alt)
- if !complete {
- break
- }
- continue
- }
-
- predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
- }
- if predicateEvaluationResult {
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
- }
- predictions.add(pair.alt)
- if !complete {
- break
- }
- }
- }
- return predictions
-}
-
-func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
- initialDepth := 0
- p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
- fullCtx, initialDepth, treatEOFAsEpsilon)
-}
-
-func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if ParserATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")")
- fmt.Println("configs(" + configs.String() + ")")
- if config.GetReachesIntoOuterContext() > 50 {
- panic("problem")
- }
- }
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if !config.GetContext().isEmpty() {
- for i := 0; i < config.GetContext().length(); i++ {
- if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
- if fullCtx {
- configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
- continue
- } else {
- // we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
- continue
- }
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- newContext := config.GetContext().GetParent(i) // "pop" return state
-
- c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
- p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
- }
- return
- } else if fullCtx {
- // reached end of start rule
- configs.Add(config, p.mergeCache)
- return
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- }
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
-}
-
-// Do the actual work of walking epsilon edges//
-func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- state := config.GetState()
- // optimization
- if !state.GetEpsilonOnlyTransitions() {
- configs.Add(config, p.mergeCache)
- // make sure to not return here, because EOF transitions can act as
- // both epsilon transitions and non-epsilon transitions.
- }
- for i := 0; i < len(state.GetTransitions()); i++ {
- if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
- continue
- }
-
- t := state.GetTransitions()[i]
- _, ok := t.(*ActionTransition)
- continueCollecting := collectPredicates && !ok
- c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
- if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
- newDepth := depth
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // target fell off end of rule mark resulting c as having dipped into outer context
- // We can't get here if incoming config was rule stop and we had context
- // track how far we dip into outer context. Might
- // come in handy and we avoid evaluating context dependent
- // preds if p is > 0.
-
- if p.dfa != nil && p.dfa.getPrecedenceDfa() {
- if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
- c.setPrecedenceFilterSuppressed(true)
- }
- }
-
- c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
-
- if closureBusy.Add(c) != c {
- // avoid infinite recursion for right-recursive rules
- continue
- }
-
- configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
- newDepth--
- if ParserATNSimulatorDebug {
- fmt.Println("dips into outer ctx: " + c.String())
- }
- } else {
- if !t.getIsEpsilon() && closureBusy.Add(c) != c {
- // avoid infinite recursion for EOF* and EOF+
- continue
- }
- if _, ok := t.(*RuleTransition); ok {
- // latch when newDepth goes negative - once we step out of the entry context we can't return
- if newDepth >= 0 {
- newDepth++
- }
- }
- }
- p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
- }
- }
-}
-
-func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
- if TurnOffLRLoopEntryBranchOpt {
- return false
- }
-
- _p := config.GetState()
-
- // First check to see if we are in StarLoopEntryState generated during
- // left-recursion elimination. For efficiency, also check if
- // the context has an empty stack case. If so, it would mean
- // global FOLLOW so we can't perform optimization
- if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
- return false
- }
-
- // Require all return states to return back to the same rule
- // that p is in.
- numCtxs := config.GetContext().length()
- for i := 0; i < numCtxs; i++ {
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- if returnState.GetRuleIndex() != _p.GetRuleIndex() {
- return false
- }
- }
-
- decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
- blockEndStateNum := decisionStartState.getEndState().stateNumber
- blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
-
- // Verify that the top of each stack context leads to loop entry/exit
- // state through epsilon edges and w/o leaving rule.
-
- for i := 0; i < numCtxs; i++ { // for each stack context
- returnStateNumber := config.GetContext().getReturnState(i)
- returnState := p.atn.states[returnStateNumber]
-
- // all states must have single outgoing epsilon edge
- if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
- return false
- }
-
- // Look for prefix op case like 'not expr', (' type ')' expr
- returnStateTarget := returnState.GetTransitions()[0].getTarget()
- if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
- continue
- }
-
- // Look for 'expr op expr' or case where expr's return state is block end
- // of (...)* internal block; the block end points to loop back
- // which points to p but we don't need to check that
- if returnState == blockEndState {
- continue
- }
-
- // Look for ternary expr ? expr : expr. The return state points at block end,
- // which points at loop entry state
- if returnStateTarget == blockEndState {
- continue
- }
-
- // Look for complex prefix 'between expr and expr' case where 2nd expr's
- // return state points at block end state of (...)* internal block
- if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
- len(returnStateTarget.GetTransitions()) == 1 &&
- returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
- returnStateTarget.GetTransitions()[0].getTarget() == _p {
- continue
- }
-
- // anything else ain't conforming
- return false
- }
-
- return true
-}
-
-func (p *ParserATNSimulator) getRuleName(index int) string {
- if p.parser != nil && index >= 0 {
- return p.parser.GetRuleNames()[index]
- }
- var sb strings.Builder
- sb.Grow(32)
-
- sb.WriteString("')
- return sb.String()
-}
-
-func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
-
- switch t.getSerializationType() {
- case TransitionRULE:
- return p.ruleTransition(config, t.(*RuleTransition))
- case TransitionPRECEDENCE:
- return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionPREDICATE:
- return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionACTION:
- return p.actionTransition(config, t.(*ActionTransition))
- case TransitionEPSILON:
- return NewBaseATNConfig4(config, t.getTarget())
- case TransitionATOM, TransitionRANGE, TransitionSET:
- // EOF transitions act like epsilon transitions after the first EOF
- // transition is traversed
- if treatEOFAsEpsilon {
- if t.Matches(TokenEOF, 0, 1) {
- return NewBaseATNConfig4(config, t.getTarget())
- }
- }
- return nil
- default:
- return nil
- }
-}
-
-func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
- }
- return NewBaseATNConfig4(config, t.getTarget())
-}
-
-func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
- pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
- strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && inContext {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
- ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && (!pt.isCtxDependent || inContext) {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
- }
- returnState := t.followState
- newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
- return NewBaseATNConfig1(config, t.getTarget(), newContext)
-}
-
-func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModeGetAlts(altsets)
-}
-
-// Sam pointed out a problem with the previous definition, v3, of
-// ambiguous states. If we have another state associated with conflicting
-// alternatives, we should keep going. For example, the following grammar
-//
-// s : (ID | ID ID?) ''
-//
-// When the ATN simulation reaches the state before '', it has a DFA
-// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
-// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
-// because alternative to has another way to continue, via [6|2|[]].
-// The key is that we have a single state that has config's only associated
-// with a single alternative, 2, and crucially the state transitions
-// among the configurations are all non-epsilon transitions. That means
-// we don't consider any conflicts that include alternative 2. So, we
-// ignore the conflict between alts 1 and 2. We ignore a set of
-// conflicting alts when there is an intersection with an alternative
-// associated with a single alt state in the state&rarrconfig-list map.
-//
-// It's also the case that we might have two conflicting configurations but
-// also a 3rd nonconflicting configuration for a different alternative:
-// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
-//
-// a : A | A | A B
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not
-// stop working on p state. In the previous example, we're concerned
-// with states associated with the conflicting alternatives. Here alt
-// 3 is not associated with the conflicting configs, but since we can continue
-// looking for input reasonably, I don't declare the state done. We
-// ignore a set of conflicting alts when we have an alternative
-// that we still need to pursue.
-//
-
-func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
- var conflictingAlts *BitSet
- if configs.GetUniqueAlt() != ATNInvalidAltNumber {
- conflictingAlts = NewBitSet()
- conflictingAlts.add(configs.GetUniqueAlt())
- } else {
- conflictingAlts = configs.GetConflictingAlts()
- }
- return conflictingAlts
-}
-
-func (p *ParserATNSimulator) GetTokenName(t int) string {
- if t == TokenEOF {
- return "EOF"
- }
-
- if p.parser != nil && p.parser.GetLiteralNames() != nil {
- if t >= len(p.parser.GetLiteralNames()) {
- fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
- // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
- } else {
- return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
- }
-
- return strconv.Itoa(t)
-}
-
-func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
- return p.GetTokenName(input.LA(1))
-}
-
-// Used for debugging in AdaptivePredict around execATN but I cut
-// it out for clarity now that alg. works well. We can leave p
-// "dead" code for a bit.
-//
-func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
-
- panic("Not implemented")
-
- // fmt.Println("dead end configs: ")
- // var decs = nvae.deadEndConfigs
- //
- // for i:=0; i0) {
- // var t = c.state.GetTransitions()[0]
- // if t2, ok := t.(*AtomTransition); ok {
- // trans = "Atom "+ p.GetTokenName(t2.label)
- // } else if t3, ok := t.(SetTransition); ok {
- // _, ok := t.(*NotSetTransition)
- //
- // var s string
- // if (ok){
- // s = "~"
- // }
- //
- // trans = s + "Set " + t3.set
- // }
- // }
- // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
- // }
-}
-
-func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
- return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
-}
-
-func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
- alt := ATNInvalidAltNumber
- for _, c := range configs.GetItems() {
- if alt == ATNInvalidAltNumber {
- alt = c.GetAlt() // found first alt
- } else if c.GetAlt() != alt {
- return ATNInvalidAltNumber
- }
- }
- return alt
-}
-
-//
-// Add an edge to the DFA, if possible. This method calls
-// {@link //addDFAState} to ensure the {@code to} state is present in the
-// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
-// range of edges that can be represented in the DFA tables, p method
-// returns without adding the edge to the DFA.
-//
-// If {@code to} is {@code nil}, p method returns {@code nil}.
-// Otherwise, p method returns the {@link DFAState} returned by calling
-// {@link //addDFAState} for the {@code to} state.
-//
-// @param dfa The DFA
-// @param from The source state for the edge
-// @param t The input symbol
-// @param to The target state for the edge
-//
-// @return If {@code to} is {@code nil}, p method returns {@code nil}
-// otherwise p method returns the result of calling {@link //addDFAState}
-// on {@code to}
-//
-func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
- if ParserATNSimulatorDebug {
- fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
- }
- if to == nil {
- return nil
- }
- p.atn.stateMu.Lock()
- to = p.addDFAState(dfa, to) // used existing if possible not incoming
- p.atn.stateMu.Unlock()
- if from == nil || t < -1 || t > p.atn.maxTokenType {
- return to
- }
- p.atn.edgeMu.Lock()
- if from.getEdges() == nil {
- from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
- }
- from.setIthEdge(t+1, to) // connect
- p.atn.edgeMu.Unlock()
-
- if ParserATNSimulatorDebug {
- var names []string
- if p.parser != nil {
- names = p.parser.GetLiteralNames()
- }
-
- fmt.Println("DFA=\n" + dfa.String(names, nil))
- }
- return to
-}
-
-//
-// Add state {@code D} to the DFA if it is not already present, and return
-// the actual instance stored in the DFA. If a state equivalent to {@code D}
-// is already in the DFA, the existing state is returned. Otherwise p
-// method returns {@code D} after adding it to the DFA.
-//
-// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
-// does not change the DFA.
-//
-// @param dfa The dfa
-// @param D The DFA state to add
-// @return The state stored in the DFA. This will be either the existing
-// state if {@code D} is already in the DFA, or {@code D} itself if the
-// state was not already present.
-//
-func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
- if d == ATNSimulatorError {
- return d
- }
- hash := d.hash()
- existing, ok := dfa.getState(hash)
- if ok {
- return existing
- }
- d.stateNumber = dfa.numStates()
- if !d.configs.ReadOnly() {
- d.configs.OptimizeConfigs(p.BaseATNSimulator)
- d.configs.SetReadOnly(true)
- }
- dfa.setState(hash, d)
- if ParserATNSimulatorDebug {
- fmt.Println("adding NewDFA state: " + d.String())
- }
- return d
-}
-
-func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
-
-// If context sensitive parsing, we know it's ambiguity not conflict//
-func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
- exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
deleted file mode 100644
index 49cd10c5ff..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "reflect"
- "strconv"
-)
-
-type ParserRuleContext interface {
- RuleContext
-
- SetException(RecognitionException)
-
- AddTokenNode(token Token) *TerminalNodeImpl
- AddErrorNode(badToken Token) *ErrorNodeImpl
-
- EnterRule(listener ParseTreeListener)
- ExitRule(listener ParseTreeListener)
-
- SetStart(Token)
- GetStart() Token
-
- SetStop(Token)
- GetStop() Token
-
- AddChild(child RuleContext) RuleContext
- RemoveLastChild()
-}
-
-type BaseParserRuleContext struct {
- *BaseRuleContext
-
- start, stop Token
- exception RecognitionException
- children []Tree
-}
-
-func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
- prc := new(BaseParserRuleContext)
-
- prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
-
- prc.RuleIndex = -1
- // * If we are debugging or building a parse tree for a Visitor,
- // we need to track all of the tokens and rule invocations associated
- // with prc rule's context. This is empty for parsing w/o tree constr.
- // operation because we don't the need to track the details about
- // how we parse prc rule.
- // /
- prc.children = nil
- prc.start = nil
- prc.stop = nil
- // The exception that forced prc rule to return. If the rule successfully
- // completed, prc is {@code nil}.
- prc.exception = nil
-
- return prc
-}
-
-func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
- prc.exception = e
-}
-
-func (prc *BaseParserRuleContext) GetChildren() []Tree {
- return prc.children
-}
-
-func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
- // from RuleContext
- prc.parentCtx = ctx.parentCtx
- prc.invokingState = ctx.invokingState
- prc.children = nil
- prc.start = ctx.start
- prc.stop = ctx.stop
-}
-
-func (prc *BaseParserRuleContext) GetText() string {
- if prc.GetChildCount() == 0 {
- return ""
- }
-
- var s string
- for _, child := range prc.children {
- s += child.(ParseTree).GetText()
- }
-
- return s
-}
-
-// Double dispatch methods for listeners
-func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
-}
-
-func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
-}
-
-// * Does not set parent link other add methods do that///
-func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
- if prc.children == nil {
- prc.children = make([]Tree, 0)
- }
- if child == nil {
- panic("Child may not be null")
- }
- prc.children = append(prc.children, child)
- return child
-}
-
-func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
- if prc.children == nil {
- prc.children = make([]Tree, 0)
- }
- if child == nil {
- panic("Child may not be null")
- }
- prc.children = append(prc.children, child)
- return child
-}
-
-// * Used by EnterOuterAlt to toss out a RuleContext previously added as
-// we entered a rule. If we have // label, we will need to remove
-// generic ruleContext object.
-// /
-func (prc *BaseParserRuleContext) RemoveLastChild() {
- if prc.children != nil && len(prc.children) > 0 {
- prc.children = prc.children[0 : len(prc.children)-1]
- }
-}
-
-func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
-
- node := NewTerminalNodeImpl(token)
- prc.addTerminalNodeChild(node)
- node.parentCtx = prc
- return node
-
-}
-
-func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
- node := NewErrorNodeImpl(badToken)
- prc.addTerminalNodeChild(node)
- node.parentCtx = prc
- return node
-}
-
-func (prc *BaseParserRuleContext) GetChild(i int) Tree {
- if prc.children != nil && len(prc.children) >= i {
- return prc.children[i]
- }
-
- return nil
-}
-
-func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
- if childType == nil {
- return prc.GetChild(i).(RuleContext)
- }
-
- for j := 0; j < len(prc.children); j++ {
- child := prc.children[j]
- if reflect.TypeOf(child) == childType {
- if i == 0 {
- return child.(RuleContext)
- }
-
- i--
- }
- }
-
- return nil
-}
-
-func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
- return TreesStringTree(prc, ruleNames, recog)
-}
-
-func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
- return prc
-}
-
-func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
- return visitor.VisitChildren(prc)
-}
-
-func (prc *BaseParserRuleContext) SetStart(t Token) {
- prc.start = t
-}
-
-func (prc *BaseParserRuleContext) GetStart() Token {
- return prc.start
-}
-
-func (prc *BaseParserRuleContext) SetStop(t Token) {
- prc.stop = t
-}
-
-func (prc *BaseParserRuleContext) GetStop() Token {
- return prc.stop
-}
-
-func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
-
- for j := 0; j < len(prc.children); j++ {
- child := prc.children[j]
- if c2, ok := child.(TerminalNode); ok {
- if c2.GetSymbol().GetTokenType() == ttype {
- if i == 0 {
- return c2
- }
-
- i--
- }
- }
- }
- return nil
-}
-
-func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
- if prc.children == nil {
- return make([]TerminalNode, 0)
- }
-
- tokens := make([]TerminalNode, 0)
-
- for j := 0; j < len(prc.children); j++ {
- child := prc.children[j]
- if tchild, ok := child.(TerminalNode); ok {
- if tchild.GetSymbol().GetTokenType() == ttype {
- tokens = append(tokens, tchild)
- }
- }
- }
-
- return tokens
-}
-
-func (prc *BaseParserRuleContext) GetPayload() interface{} {
- return prc
-}
-
-func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
- if prc.children == nil || i < 0 || i >= len(prc.children) {
- return nil
- }
-
- j := -1 // what element have we found with ctxType?
- for _, o := range prc.children {
-
- childType := reflect.TypeOf(o)
-
- if childType.Implements(ctxType) {
- j++
- if j == i {
- return o.(RuleContext)
- }
- }
- }
- return nil
-}
-
-// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
-// check for convertibility
-
-func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
- return prc.getChild(ctxType, i)
-}
-
-func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
- if prc.children == nil {
- return make([]RuleContext, 0)
- }
-
- contexts := make([]RuleContext, 0)
-
- for _, child := range prc.children {
- childType := reflect.TypeOf(child)
-
- if childType.ConvertibleTo(ctxType) {
- contexts = append(contexts, child.(RuleContext))
- }
- }
- return contexts
-}
-
-func (prc *BaseParserRuleContext) GetChildCount() int {
- if prc.children == nil {
- return 0
- }
-
- return len(prc.children)
-}
-
-func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
- if prc.start == nil || prc.stop == nil {
- return TreeInvalidInterval
- }
-
- return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
-}
-
-//need to manage circular dependencies, so export now
-
-// Print out a whole tree, not just a node, in LISP format
-// (root child1 .. childN). Print just a node if b is a leaf.
-//
-
-func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
-
- var p ParserRuleContext = prc
- s := "["
- for p != nil && p != stop {
- if ruleNames == nil {
- if !p.IsEmpty() {
- s += strconv.Itoa(p.GetInvokingState())
- }
- } else {
- ri := p.GetRuleIndex()
- var ruleName string
- if ri >= 0 && ri < len(ruleNames) {
- ruleName = ruleNames[ri]
- } else {
- ruleName = strconv.Itoa(ri)
- }
- s += ruleName
- }
- if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
- s += " "
- }
- pi := p.GetParent()
- if pi != nil {
- p = pi.(ParserRuleContext)
- } else {
- p = nil
- }
- }
- s += "]"
- return s
-}
-
-var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
-
-type InterpreterRuleContext interface {
- ParserRuleContext
-}
-
-type BaseInterpreterRuleContext struct {
- *BaseParserRuleContext
-}
-
-func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
-
- prc := new(BaseInterpreterRuleContext)
-
- prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
-
- prc.RuleIndex = ruleIndex
-
- return prc
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
deleted file mode 100644
index 9fdfd52b26..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
-)
-
-// Represents {@code $} in local context prediction, which means wildcard.
-// {@code//+x =//}.
-// /
-const (
- BasePredictionContextEmptyReturnState = 0x7FFFFFFF
-)
-
-// Represents {@code $} in an array in full context mode, when {@code $}
-// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
-// {@code $} = {@link //EmptyReturnState}.
-// /
-
-var (
- BasePredictionContextglobalNodeCount = 1
- BasePredictionContextid = BasePredictionContextglobalNodeCount
-)
-
-type PredictionContext interface {
- hash() int
- GetParent(int) PredictionContext
- getReturnState(int) int
- equals(PredictionContext) bool
- length() int
- isEmpty() bool
- hasEmptyPath() bool
- String() string
-}
-
-type BasePredictionContext struct {
- cachedHash int
-}
-
-func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
- pc := new(BasePredictionContext)
- pc.cachedHash = cachedHash
-
- return pc
-}
-
-func (b *BasePredictionContext) isEmpty() bool {
- return false
-}
-
-func calculateHash(parent PredictionContext, returnState int) int {
- h := murmurInit(1)
- h = murmurUpdate(h, parent.hash())
- h = murmurUpdate(h, returnState)
- return murmurFinish(h, 2)
-}
-
-var _emptyPredictionContextHash int
-
-func init() {
- _emptyPredictionContextHash = murmurInit(1)
- _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
-}
-
-func calculateEmptyHash() int {
- return _emptyPredictionContextHash
-}
-
-// Used to cache {@link BasePredictionContext} objects. Its used for the shared
-// context cash associated with contexts in DFA states. This cache
-// can be used for both lexers and parsers.
-
-type PredictionContextCache struct {
- cache map[PredictionContext]PredictionContext
-}
-
-func NewPredictionContextCache() *PredictionContextCache {
- t := new(PredictionContextCache)
- t.cache = make(map[PredictionContext]PredictionContext)
- return t
-}
-
-// Add a context to the cache and return it. If the context already exists,
-// return that one instead and do not add a Newcontext to the cache.
-// Protect shared cache from unsafe thread access.
-//
-func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
- if ctx == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY
- }
- existing := p.cache[ctx]
- if existing != nil {
- return existing
- }
- p.cache[ctx] = ctx
- return ctx
-}
-
-func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
- return p.cache[ctx]
-}
-
-func (p *PredictionContextCache) length() int {
- return len(p.cache)
-}
-
-type SingletonPredictionContext interface {
- PredictionContext
-}
-
-type BaseSingletonPredictionContext struct {
- *BasePredictionContext
-
- parentCtx PredictionContext
- returnState int
-}
-
-func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
- var cachedHash int
- if parent != nil {
- cachedHash = calculateHash(parent, returnState)
- } else {
- cachedHash = calculateEmptyHash()
- }
-
- s := new(BaseSingletonPredictionContext)
- s.BasePredictionContext = NewBasePredictionContext(cachedHash)
-
- s.parentCtx = parent
- s.returnState = returnState
-
- return s
-}
-
-func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
- if returnState == BasePredictionContextEmptyReturnState && parent == nil {
- // someone can pass in the bits of an array ctx that mean $
- return BasePredictionContextEMPTY
- }
-
- return NewBaseSingletonPredictionContext(parent, returnState)
-}
-
-func (b *BaseSingletonPredictionContext) length() int {
- return 1
-}
-
-func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
- return b.parentCtx
-}
-
-func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
- return b.returnState
-}
-
-func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
- return b.returnState == BasePredictionContextEmptyReturnState
-}
-
-func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
- if b == other {
- return true
- } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
- return false
- } else if b.hash() != other.hash() {
- return false // can't be same if hash is different
- }
-
- otherP := other.(*BaseSingletonPredictionContext)
-
- if b.returnState != other.getReturnState(0) {
- return false
- } else if b.parentCtx == nil {
- return otherP.parentCtx == nil
- }
-
- return b.parentCtx.equals(otherP.parentCtx)
-}
-
-func (b *BaseSingletonPredictionContext) hash() int {
- return b.cachedHash
-}
-
-func (b *BaseSingletonPredictionContext) String() string {
- var up string
-
- if b.parentCtx == nil {
- up = ""
- } else {
- up = b.parentCtx.String()
- }
-
- if len(up) == 0 {
- if b.returnState == BasePredictionContextEmptyReturnState {
- return "$"
- }
-
- return strconv.Itoa(b.returnState)
- }
-
- return strconv.Itoa(b.returnState) + " " + up
-}
-
-var BasePredictionContextEMPTY = NewEmptyPredictionContext()
-
-type EmptyPredictionContext struct {
- *BaseSingletonPredictionContext
-}
-
-func NewEmptyPredictionContext() *EmptyPredictionContext {
-
- p := new(EmptyPredictionContext)
-
- p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
-
- return p
-}
-
-func (e *EmptyPredictionContext) isEmpty() bool {
- return true
-}
-
-func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
- return nil
-}
-
-func (e *EmptyPredictionContext) getReturnState(index int) int {
- return e.returnState
-}
-
-func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
- return e == other
-}
-
-func (e *EmptyPredictionContext) String() string {
- return "$"
-}
-
-type ArrayPredictionContext struct {
- *BasePredictionContext
-
- parents []PredictionContext
- returnStates []int
-}
-
-func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
- // Parent can be nil only if full ctx mode and we make an array
- // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
- // nil parent and
- // returnState == {@link //EmptyReturnState}.
- hash := murmurInit(1)
-
- for _, parent := range parents {
- hash = murmurUpdate(hash, parent.hash())
- }
-
- for _, returnState := range returnStates {
- hash = murmurUpdate(hash, returnState)
- }
-
- hash = murmurFinish(hash, len(parents)<<1)
-
- c := new(ArrayPredictionContext)
- c.BasePredictionContext = NewBasePredictionContext(hash)
-
- c.parents = parents
- c.returnStates = returnStates
-
- return c
-}
-
-func (a *ArrayPredictionContext) GetReturnStates() []int {
- return a.returnStates
-}
-
-func (a *ArrayPredictionContext) hasEmptyPath() bool {
- return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) isEmpty() bool {
- // since EmptyReturnState can only appear in the last position, we
- // don't need to verify that size==1
- return a.returnStates[0] == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) length() int {
- return len(a.returnStates)
-}
-
-func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
- return a.parents[index]
-}
-
-func (a *ArrayPredictionContext) getReturnState(index int) int {
- return a.returnStates[index]
-}
-
-func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
- if _, ok := other.(*ArrayPredictionContext); !ok {
- return false
- } else if a.cachedHash != other.hash() {
- return false // can't be same if hash is different
- } else {
- otherP := other.(*ArrayPredictionContext)
- return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
- }
-}
-
-func (a *ArrayPredictionContext) hash() int {
- return a.BasePredictionContext.cachedHash
-}
-
-func (a *ArrayPredictionContext) String() string {
- if a.isEmpty() {
- return "[]"
- }
-
- s := "["
- for i := 0; i < len(a.returnStates); i++ {
- if i > 0 {
- s = s + ", "
- }
- if a.returnStates[i] == BasePredictionContextEmptyReturnState {
- s = s + "$"
- continue
- }
- s = s + strconv.Itoa(a.returnStates[i])
- if a.parents[i] != nil {
- s = s + " " + a.parents[i].String()
- } else {
- s = s + "nil"
- }
- }
-
- return s + "]"
-}
-
-// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
-// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
-// /
-func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
- if outerContext == nil {
- outerContext = RuleContextEmpty
- }
- // if we are in RuleContext of start rule, s, then BasePredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
- return BasePredictionContextEMPTY
- }
- // If we have a parent, convert it to a BasePredictionContext graph
- parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
- state := a.states[outerContext.GetInvokingState()]
- transition := state.GetTransitions()[0]
-
- return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
-}
-
-func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- // share same graph if both same
- if a == b {
- return a
- }
-
- ac, ok1 := a.(*BaseSingletonPredictionContext)
- bc, ok2 := b.(*BaseSingletonPredictionContext)
-
- if ok1 && ok2 {
- return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
- }
- // At least one of a or b is array
- // If one is $ and rootIsWildcard, return $ as// wildcard
- if rootIsWildcard {
- if _, ok := a.(*EmptyPredictionContext); ok {
- return a
- }
- if _, ok := b.(*EmptyPredictionContext); ok {
- return b
- }
- }
- // convert singleton so both are arrays to normalize
- if _, ok := a.(*BaseSingletonPredictionContext); ok {
- a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
- }
- if _, ok := b.(*BaseSingletonPredictionContext); ok {
- b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
- }
- return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
-}
-
-//
-// Merge two {@link SingletonBasePredictionContext} instances.
-//
-// Stack tops equal, parents merge is same return left graph.
-//
-//
-// Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A Newroot node is created to point to the
-// merged parents.
-//
-//
-// Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
-//
-// Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// @param mergeCache
-// /
-func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.hash(), a.hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- }
-
- rootMerge := mergeRoot(a, b, rootIsWildcard)
- if rootMerge != nil {
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), rootMerge)
- }
- return rootMerge
- }
- if a.returnState == b.returnState {
- parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
- // if parent is same as existing a or b parent or reduced to a parent,
- // return it
- if parent == a.parentCtx {
- return a // ax + bx = ax, if a=b
- }
- if parent == b.parentCtx {
- return b // ax + bx = bx, if a=b
- }
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array
- // Newjoined parent so create Newsingleton pointing to it, a'
- spc := SingletonBasePredictionContextCreate(parent, a.returnState)
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), spc)
- }
- return spc
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- var singleParent PredictionContext
- if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
- // bx =
- // [a,b]x
- singleParent = a.parentCtx
- }
- if singleParent != nil { // parents are same
- // sort payloads and use same parent
- payloads := []int{a.returnState, b.returnState}
- if a.returnState > b.returnState {
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- }
- parents := []PredictionContext{singleParent, singleParent}
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
- }
- return apc
- }
- // parents differ and can't merge them. Just pack together
- // into array can't merge.
- // ax + by = [ax,by]
- payloads := []int{a.returnState, b.returnState}
- parents := []PredictionContext{a.parentCtx, b.parentCtx}
- if a.returnState > b.returnState { // sort by payload
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- parents = []PredictionContext{b.parentCtx, a.parentCtx}
- }
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
- }
- return apc
-}
-
-//
-// Handle case where at least one of {@code a} or {@code b} is
-// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
-// to represent {@link //EMPTY}.
-//
-// Local-Context Merges
-//
-// These local-context merge operations are used when {@code rootIsWildcard}
-// is true.
-//
-// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
-//
-// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
-//
-// Special case of last merge if local context.
-//
-//
-// Full-Context Merges
-//
-// These full-context merge operations are used when {@code rootIsWildcard}
-// is false.
-//
-//
-//
-// Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
-//
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// /
-func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
- if rootIsWildcard {
- if a == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // // + b =//
- }
- if b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // a +// =//
- }
- } else {
- if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // $ + $ = $
- } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
- payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{b.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
- payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{a.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- }
- }
- return nil
-}
-
-//
-// Merge two {@link ArrayBasePredictionContext} instances.
-//
-// Different tops, different parents.
-//
-//
-// Shared top, same parents.
-//
-//
-// Shared top, different parents.
-//
-//
-// Shared top, all shared parents.
-//
-//
-// Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
-// /
-func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.hash(), a.hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- }
- // merge sorted payloads a + b => M
- i := 0 // walks a
- j := 0 // walks b
- k := 0 // walks target M array
-
- mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
- mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
- // walk and merge to yield mergedParents, mergedReturnStates
- for i < len(a.returnStates) && j < len(b.returnStates) {
- aParent := a.parents[i]
- bParent := b.parents[j]
- if a.returnStates[i] == b.returnStates[j] {
- // same payload (stack tops are equal), must yield merged singleton
- payload := a.returnStates[i]
- // $+$ = $
- bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
- // ->
- // ax
- if bothDollars || axAX {
- mergedParents[k] = aParent // choose left
- mergedReturnStates[k] = payload
- } else { // ax+ay -> a'[x,y]
- mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
- mergedParents[k] = mergedParent
- mergedReturnStates[k] = payload
- }
- i++ // hop over left one as usual
- j++ // but also Skip one in right side since we merge
- } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
- mergedParents[k] = aParent
- mergedReturnStates[k] = a.returnStates[i]
- i++
- } else { // b > a, copy b[j] to M
- mergedParents[k] = bParent
- mergedReturnStates[k] = b.returnStates[j]
- j++
- }
- k++
- }
- // copy over any payloads remaining in either array
- if i < len(a.returnStates) {
- for p := i; p < len(a.returnStates); p++ {
- mergedParents[k] = a.parents[p]
- mergedReturnStates[k] = a.returnStates[p]
- k++
- }
- } else {
- for p := j; p < len(b.returnStates); p++ {
- mergedParents[k] = b.parents[p]
- mergedReturnStates[k] = b.returnStates[p]
- k++
- }
- }
- // trim merged if we combined a few that had same stack tops
- if k < len(mergedParents) { // write index < last position trim
- if k == 1 { // for just one merged element, return singleton top
- pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), pc)
- }
- return pc
- }
- mergedParents = mergedParents[0:k]
- mergedReturnStates = mergedReturnStates[0:k]
- }
-
- M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
-
- // if we created same array as a or b, return that instead
- // TODO: track whether this is possible above during merge sort for speed
- if M == a {
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), a)
- }
- return a
- }
- if M == b {
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), b)
- }
- return b
- }
- combineCommonParents(mergedParents)
-
- if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), M)
- }
- return M
-}
-
-//
-// Make pass over all M {@code parents} merge any {@code equals()}
-// ones.
-// /
-func combineCommonParents(parents []PredictionContext) {
- uniqueParents := make(map[PredictionContext]PredictionContext)
-
- for p := 0; p < len(parents); p++ {
- parent := parents[p]
- if uniqueParents[parent] == nil {
- uniqueParents[parent] = parent
- }
- }
- for q := 0; q < len(parents); q++ {
- parents[q] = uniqueParents[parents[q]]
- }
-}
-
-func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
-
- if context.isEmpty() {
- return context
- }
- existing := visited[context]
- if existing != nil {
- return existing
- }
- existing = contextCache.Get(context)
- if existing != nil {
- visited[context] = existing
- return existing
- }
- changed := false
- parents := make([]PredictionContext, context.length())
- for i := 0; i < len(parents); i++ {
- parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
- if changed || parent != context.GetParent(i) {
- if !changed {
- parents = make([]PredictionContext, context.length())
- for j := 0; j < context.length(); j++ {
- parents[j] = context.GetParent(j)
- }
- changed = true
- }
- parents[i] = parent
- }
- }
- if !changed {
- contextCache.add(context)
- visited[context] = context
- return context
- }
- var updated PredictionContext
- if len(parents) == 0 {
- updated = BasePredictionContextEMPTY
- } else if len(parents) == 1 {
- updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
- } else {
- updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
- }
- contextCache.add(updated)
- visited[updated] = updated
- visited[context] = updated
-
- return updated
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
deleted file mode 100644
index 15718f912b..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
+++ /dev/null
@@ -1,553 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// This enumeration defines the prediction modes available in ANTLR 4 along with
-// utility methods for analyzing configuration sets for conflicts and/or
-// ambiguities.
-
-const (
- //
- // The SLL(*) prediction mode. This prediction mode ignores the current
- // parser context when making predictions. This is the fastest prediction
- // mode, and provides correct results for many grammars. This prediction
- // mode is more powerful than the prediction mode provided by ANTLR 3, but
- // may result in syntax errors for grammar and input combinations which are
- // not SLL.
- //
- //
- // When using this prediction mode, the parser will either return a correct
- // parse tree (i.e. the same parse tree that would be returned with the
- // {@link //LL} prediction mode), or it will Report a syntax error. If a
- // syntax error is encountered when using the {@link //SLL} prediction mode,
- // it may be due to either an actual syntax error in the input or indicate
- // that the particular combination of grammar and input requires the more
- // powerful {@link //LL} prediction abilities to complete successfully.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeSLL = 0
- //
- // The LL(*) prediction mode. This prediction mode allows the current parser
- // context to be used for resolving SLL conflicts that occur during
- // prediction. This is the fastest prediction mode that guarantees correct
- // parse results for all combinations of grammars with syntactically correct
- // inputs.
- //
- //
- // When using this prediction mode, the parser will make correct decisions
- // for all syntactically-correct grammar and input combinations. However, in
- // cases where the grammar is truly ambiguous this prediction mode might not
- // Report a precise answer for exactly which alternatives are
- // ambiguous.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLL = 1
- //
- // The LL(*) prediction mode with exact ambiguity detection. In addition to
- // the correctness guarantees provided by the {@link //LL} prediction mode,
- // this prediction mode instructs the prediction algorithm to determine the
- // complete and exact set of ambiguous alternatives for every ambiguous
- // decision encountered while parsing.
- //
- //
- // This prediction mode may be used for diagnosing ambiguities during
- // grammar development. Due to the performance overhead of calculating sets
- // of ambiguous alternatives, this prediction mode should be avoided when
- // the exact results are not necessary.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLLExactAmbigDetection = 2
-)
-
-//
-// Computes the SLL prediction termination condition.
-//
-//
-// This method computes the SLL prediction termination condition for both of
-// the following cases.
-//
-//
-// The usual SLL+LL fallback upon SLL conflict
-// Pure SLL without LL fallback
-//
-//
-// COMBINED SLL+LL PARSING
-//
-// When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction.
-//
-// Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative (e.g.
-// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations.
-//
-// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty.
-//
-// HEURISTIC
-//
-// As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon):
-//
-// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
-//
-// When the ATN simulation reaches the state before {@code ''}, it has a
-// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
-// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
-// processing this node because alternative to has another way to continue,
-// via {@code [6|2|[]]}.
-//
-// It also let's us continue for this rule:
-//
-// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done.
-//
-// PURE SLL PARSING
-//
-// To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic.
-//
-// PREDICATES IN SLL+LL PARSING
-//
-// SLL decisions don't evaluate predicates until after they reach DFA stop
-// states because they need to create the DFA cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates.
-//
-// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
-// semantic predicate contexts so we might see two configurations like the
-// following.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p})}
-//
-// Before testing these configurations against others, we have to merge
-// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x''} when looking for conflicts in
-// the following configurations.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
-//
-// If the configuration set has predicates (as indicated by
-// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
-// the configurations to strip out all of the predicates so that a standard
-// {@link ATNConfigSet} will merge everything ignoring predicates.
-//
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
- // Configs in rule stop states indicate reaching the end of the decision
- // rule (local context) or end of start rule (full context). If all
- // configs meet this condition, then none of the configurations is able
- // to Match additional input so we terminate prediction.
- //
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return true
- }
- // pure SLL mode parsing
- if mode == PredictionModeSLL {
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL costs more time
- // since we'll often fail over anyway.
- if configs.HasSemanticContext() {
- // dup configs, tossing out semantic predicates
- dup := NewBaseATNConfigSet(false)
- for _, c := range configs.GetItems() {
-
- // NewBaseATNConfig({semanticContext:}, c)
- c = NewBaseATNConfig2(c, SemanticContextNone)
- dup.Add(c, nil)
- }
- configs = dup
- }
- // now we have combined contexts for configs with dissimilar preds
- }
- // pure SLL or combined SLL+LL mode parsing
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
-}
-
-// Checks if any configuration in {@code configs} is in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if any configuration in {@code configs} is in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); ok {
- return true
- }
- }
- return false
-}
-
-// Checks if all configurations in {@code configs} are in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if all configurations in {@code configs} are in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
-
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); !ok {
- return false
- }
- }
- return true
-}
-
-//
-// Full LL prediction termination.
-//
-// Can we stop looking ahead during ATN simulation or is there some
-// uncertainty as to which alternative we will ultimately pick, after
-// consuming more input? Even if there are partial conflicts, we might know
-// that everything is going to resolve to the same minimum alternative. That
-// means we can stop since no more lookahead will change that fact. On the
-// other hand, there might be multiple conflicts that resolve to different
-// minimums. That means we need more look ahead to decide which of those
-// alternatives we should predict.
-//
-// The basic idea is to split the set of configurations {@code C}, into
-// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
-// non-conflicting configurations. Two configurations conflict if they have
-// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
-// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
-// and {@code (s, j, ctx, _)} for {@code i!=j}.
-//
-// Reduce these configuration subsets to the set of possible alternatives.
-// You can compute the alternative subsets in one pass as follows:
-//
-// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
-// {@code C} holding {@code s} and {@code ctx} fixed.
-//
-// Or in pseudo-code, for each configuration {@code c} in {@code C}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-//
-// The values in {@code map} are the set of {@code A_s,ctx} sets.
-//
-// If {@code |A_s,ctx|=1} then there is no conflict associated with
-// {@code s} and {@code ctx}.
-//
-// Reduce the subsets to singletons by choosing a minimum of each subset. If
-// the union of these alternative subsets is a singleton, then no amount of
-// more lookahead will help us. We will always pick that alternative. If,
-// however, there is more than one alternative, then we are uncertain which
-// alternative to predict and must continue looking for resolution. We may
-// or may not discover an ambiguity in the future, even if there are no
-// conflicting subsets this round.
-//
-// The biggest sin is to terminate early because it means we've made a
-// decision but were uncertain as to the eventual outcome. We haven't used
-// enough lookahead. On the other hand, announcing a conflict too late is no
-// big deal you will still have the conflict. It's just inefficient. It
-// might even look until the end of file.
-//
-// No special consideration for semantic predicates is required because
-// predicates are evaluated on-the-fly for full LL prediction, ensuring that
-// no configuration contains a semantic context during the termination
-// check.
-//
-// CONFLICTING CONFIGS
-//
-// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
-// when {@code i!=j} but {@code x=x'}. Because we merge all
-// {@code (s, i, _)} configurations together, that means that there are at
-// most {@code n} configurations associated with state {@code s} for
-// {@code n} possible alternatives in the decision. The merged stacks
-// complicate the comparison of configuration contexts {@code x} and
-// {@code x'}. Sam checks to see if one is a subset of the other by calling
-// merge and checking to see if the merged result is either {@code x} or
-// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
-// is the superset, then {@code i} is the only possible prediction since the
-// others resolve to {@code min(i)} as well. However, if {@code x} is
-// associated with {@code j>i} then at least one stack configuration for
-// {@code j} is not in conflict with alternative {@code i}. The algorithm
-// should keep going, looking for more lookahead due to the uncertainty.
-//
-// For simplicity, I'm doing a equality check between {@code x} and
-// {@code x'} that lets the algorithm continue to consume lookahead longer
-// than necessary. The reason I like the equality is of course the
-// simplicity but also because that is the test you need to detect the
-// alternatives that are actually in conflict.
-//
-// CONTINUE/STOP RULE
-//
-// Continue if union of resolved alternative sets from non-conflicting and
-// conflicting alternative subsets has more than one alternative. We are
-// uncertain about which alternative to predict.
-//
-// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
-// alternatives are still in the running for the amount of input we've
-// consumed at this point. The conflicting sets let us to strip away
-// configurations that won't lead to more states because we resolve
-// conflicts to the configuration with a minimum alternate for the
-// conflicting set.
-//
-// CASES
-//
-//
-//
-// no conflicts and more than 1 alternative in set => continue
-//
-// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
-// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
-// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1,3}} => continue
-//
-//
-// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
-// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1}} => stop and predict 1
-//
-// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {1}} = {@code {1}} => stop and predict 1, can announce
-// ambiguity {@code {1,2}}
-//
-// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
-// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {2}} = {@code {1,2}} => continue
-//
-// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
-// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {3}} = {@code {1,3}} => continue
-//
-//
-//
-// EXACT AMBIGUITY DETECTION
-//
-// If all states Report the same conflicting set of alternatives, then we
-// know we have the exact ambiguity set.
-//
-// |A_i |>1 and
-// A_i = A_j for all i , j .
-//
-// In other words, we continue examining lookahead until all {@code A_i}
-// have more than one alternative and all {@code A_i} are the same. If
-// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
-// because the resolved set is {@code {1}}. To determine what the real
-// ambiguity is, we have to know whether the ambiguity is between one and
-// two or one and three so we keep going. We can only stop prediction when
-// we need exact ambiguity detection when the sets look like
-// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-//
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
- return PredictionModegetSingleViableAlt(altsets)
-}
-
-//
-// Determines if every alternative subset in {@code altsets} contains more
-// than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every {@link BitSet} in {@code altsets} has
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
-func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
- return !PredictionModehasNonConflictingAltSet(altsets)
-}
-
-//
-// Determines if any single alternative subset in {@code altsets} contains
-// exactly one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
-//
-func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() == 1 {
- return true
- }
- }
- return false
-}
-
-//
-// Determines if any single alternative subset in {@code altsets} contains
-// more than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
-func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() > 1 {
- return true
- }
- }
- return false
-}
-
-//
-// Determines if every alternative subset in {@code altsets} is equivalent.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every member of {@code altsets} is equal to the
-// others, otherwise {@code false}
-//
-func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
- var first *BitSet
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if first == nil {
- first = alts
- } else if alts != first {
- return false
- }
- }
-
- return true
-}
-
-//
-// Returns the unique alternative predicted by all alternative subsets in
-// {@code altsets}. If no such alternative exists, this method returns
-// {@link ATN//INVALID_ALT_NUMBER}.
-//
-// @param altsets a collection of alternative subsets
-//
-func PredictionModegetUniqueAlt(altsets []*BitSet) int {
- all := PredictionModeGetAlts(altsets)
- if all.length() == 1 {
- return all.minValue()
- }
-
- return ATNInvalidAltNumber
-}
-
-// Gets the complete set of represented alternatives for a collection of
-// alternative subsets. This method returns the union of each {@link BitSet}
-// in {@code altsets}.
-//
-// @param altsets a collection of alternative subsets
-// @return the set of represented alternatives in {@code altsets}
-//
-func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
- all := NewBitSet()
- for _, alts := range altsets {
- all.or(alts)
- }
- return all
-}
-
-//
-// This func gets the conflicting alt subsets from a configuration set.
-// For each configuration {@code c} in {@code configs}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-//
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := make(map[int]*BitSet)
-
- for _, c := range configs.GetItems() {
- key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
-
- alts, ok := configToAlts[key]
- if !ok {
- alts = NewBitSet()
- configToAlts[key] = alts
- }
- alts.add(c.GetAlt())
- }
-
- values := make([]*BitSet, 0, 10)
- for _, v := range configToAlts {
- values = append(values, v)
- }
- return values
-}
-
-//
-// Get a map from state to alt subset from a configuration set. For each
-// configuration {@code c} in {@code configs}:
-//
-//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-//
-//
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.GetItems() {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
deleted file mode 100644
index 93efcf355d..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strings"
-
- "strconv"
-)
-
-type Recognizer interface {
- GetLiteralNames() []string
- GetSymbolicNames() []string
- GetRuleNames() []string
-
- Sempred(RuleContext, int, int) bool
- Precpred(RuleContext, int) bool
-
- GetState() int
- SetState(int)
- Action(RuleContext, int, int)
- AddErrorListener(ErrorListener)
- RemoveErrorListeners()
- GetATN() *ATN
- GetErrorListenerDispatch() ErrorListener
-}
-
-type BaseRecognizer struct {
- listeners []ErrorListener
- state int
-
- RuleNames []string
- LiteralNames []string
- SymbolicNames []string
- GrammarFileName string
-}
-
-func NewBaseRecognizer() *BaseRecognizer {
- rec := new(BaseRecognizer)
- rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
- rec.state = -1
- return rec
-}
-
-var tokenTypeMapCache = make(map[string]int)
-var ruleIndexMapCache = make(map[string]int)
-
-func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.10.1"
- if runtimeVersion != toolVersion {
- fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
- }
-}
-
-func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
- panic("action not implemented on Recognizer!")
-}
-
-func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
- b.listeners = append(b.listeners, listener)
-}
-
-func (b *BaseRecognizer) RemoveErrorListeners() {
- b.listeners = make([]ErrorListener, 0)
-}
-
-func (b *BaseRecognizer) GetRuleNames() []string {
- return b.RuleNames
-}
-
-func (b *BaseRecognizer) GetTokenNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetSymbolicNames() []string {
- return b.SymbolicNames
-}
-
-func (b *BaseRecognizer) GetLiteralNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetState() int {
- return b.state
-}
-
-func (b *BaseRecognizer) SetState(v int) {
- b.state = v
-}
-
-//func (b *Recognizer) GetTokenTypeMap() {
-// var tokenNames = b.GetTokenNames()
-// if (tokenNames==nil) {
-// panic("The current recognizer does not provide a list of token names.")
-// }
-// var result = tokenTypeMapCache[tokenNames]
-// if(result==nil) {
-// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
-// result.EOF = TokenEOF
-// tokenTypeMapCache[tokenNames] = result
-// }
-// return result
-//}
-
-// Get a map from rule names to rule indexes.
-//
-// Used for XPath and tree pattern compilation.
-//
-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
-
- panic("Method not defined!")
- // var ruleNames = b.GetRuleNames()
- // if (ruleNames==nil) {
- // panic("The current recognizer does not provide a list of rule names.")
- // }
- //
- // var result = ruleIndexMapCache[ruleNames]
- // if(result==nil) {
- // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
- // ruleIndexMapCache[ruleNames] = result
- // }
- // return result
-}
-
-func (b *BaseRecognizer) GetTokenType(tokenName string) int {
- panic("Method not defined!")
- // var ttype = b.GetTokenTypeMap()[tokenName]
- // if (ttype !=nil) {
- // return ttype
- // } else {
- // return TokenInvalidType
- // }
-}
-
-//func (b *Recognizer) GetTokenTypeMap() map[string]int {
-// Vocabulary vocabulary = getVocabulary()
-//
-// Synchronized (tokenTypeMapCache) {
-// Map result = tokenTypeMapCache.Get(vocabulary)
-// if (result == null) {
-// result = new HashMap()
-// for (int i = 0; i < GetATN().maxTokenType; i++) {
-// String literalName = vocabulary.getLiteralName(i)
-// if (literalName != null) {
-// result.put(literalName, i)
-// }
-//
-// String symbolicName = vocabulary.GetSymbolicName(i)
-// if (symbolicName != null) {
-// result.put(symbolicName, i)
-// }
-// }
-//
-// result.put("EOF", Token.EOF)
-// result = Collections.unmodifiableMap(result)
-// tokenTypeMapCache.put(vocabulary, result)
-// }
-//
-// return result
-// }
-//}
-
-// What is the error header, normally line/character position information?//
-func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
- line := e.GetOffendingToken().GetLine()
- column := e.GetOffendingToken().GetColumn()
- return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
-}
-
-// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-//
-// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
-// implementations of {@link ANTLRErrorStrategy} may provide a similar
-// feature when necessary. For example, see
-// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
-//
-func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
-
- return "'" + s + "'"
-}
-
-func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
- return NewProxyErrorListener(b.listeners)
-}
-
-// subclass needs to override these if there are sempreds or actions
-// that the ATN interp needs to execute
-func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
- return true
-}
-
-func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
- return true
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
deleted file mode 100644
index 600cf8c062..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// A rule context is a record of a single rule invocation. It knows
-// which context invoked it, if any. If there is no parent context, then
-// naturally the invoking state is not valid. The parent link
-// provides a chain upwards from the current rule invocation to the root
-// of the invocation tree, forming a stack. We actually carry no
-// information about the rule associated with b context (except
-// when parsing). We keep only the state number of the invoking state from
-// the ATN submachine that invoked b. Contrast b with the s
-// pointer inside ParserRuleContext that tracks the current state
-// being "executed" for the current rule.
-//
-// The parent contexts are useful for computing lookahead sets and
-// getting error information.
-//
-// These objects are used during parsing and prediction.
-// For the special case of parsers, we use the subclass
-// ParserRuleContext.
-//
-// @see ParserRuleContext
-//
-
-type RuleContext interface {
- RuleNode
-
- GetInvokingState() int
- SetInvokingState(int)
-
- GetRuleIndex() int
- IsEmpty() bool
-
- GetAltNumber() int
- SetAltNumber(altNumber int)
-
- String([]string, RuleContext) string
-}
-
-type BaseRuleContext struct {
- parentCtx RuleContext
- invokingState int
- RuleIndex int
-}
-
-func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
-
- rn := new(BaseRuleContext)
-
- // What context invoked b rule?
- rn.parentCtx = parent
-
- // What state invoked the rule associated with b context?
- // The "return address" is the followState of invokingState
- // If parent is nil, b should be -1.
- if parent == nil {
- rn.invokingState = -1
- } else {
- rn.invokingState = invokingState
- }
-
- return rn
-}
-
-func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
- return b
-}
-
-func (b *BaseRuleContext) SetParent(v Tree) {
- if v == nil {
- b.parentCtx = nil
- } else {
- b.parentCtx = v.(RuleContext)
- }
-}
-
-func (b *BaseRuleContext) GetInvokingState() int {
- return b.invokingState
-}
-
-func (b *BaseRuleContext) SetInvokingState(t int) {
- b.invokingState = t
-}
-
-func (b *BaseRuleContext) GetRuleIndex() int {
- return b.RuleIndex
-}
-
-func (b *BaseRuleContext) GetAltNumber() int {
- return ATNInvalidAltNumber
-}
-
-func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
-
-// A context is empty if there is no invoking state meaning nobody call
-// current context.
-func (b *BaseRuleContext) IsEmpty() bool {
- return b.invokingState == -1
-}
-
-// Return the combined text of all child nodes. This method only considers
-// tokens which have been added to the parse tree.
-//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of b
-// method.
-//
-
-func (b *BaseRuleContext) GetParent() Tree {
- return b.parentCtx
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
deleted file mode 100644
index 9ada430779..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-// A tree structure used to record the semantic context in which
-// an ATN configuration is valid. It's either a single predicate,
-// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
-//
-//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
-// {@link SemanticContext} within the scope of this outer class.
-//
-
-type SemanticContext interface {
- comparable
-
- evaluate(parser Recognizer, outerContext RuleContext) bool
- evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
-
- hash() int
- String() string
-}
-
-func SemanticContextandContext(a, b SemanticContext) SemanticContext {
- if a == nil || a == SemanticContextNone {
- return b
- }
- if b == nil || b == SemanticContextNone {
- return a
- }
- result := NewAND(a, b)
- if len(result.opnds) == 1 {
- return result.opnds[0]
- }
-
- return result
-}
-
-func SemanticContextorContext(a, b SemanticContext) SemanticContext {
- if a == nil {
- return b
- }
- if b == nil {
- return a
- }
- if a == SemanticContextNone || b == SemanticContextNone {
- return SemanticContextNone
- }
- result := NewOR(a, b)
- if len(result.opnds) == 1 {
- return result.opnds[0]
- }
-
- return result
-}
-
-type Predicate struct {
- ruleIndex int
- predIndex int
- isCtxDependent bool
-}
-
-func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
- p := new(Predicate)
-
- p.ruleIndex = ruleIndex
- p.predIndex = predIndex
- p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- return p
-}
-
-//The default {@link SemanticContext}, which is semantically equivalent to
-//a predicate of the form {@code {true}?}.
-
-var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
-
-func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
- return p
-}
-
-func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
-
- var localctx RuleContext
-
- if p.isCtxDependent {
- localctx = outerContext
- }
-
- return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
-}
-
-func (p *Predicate) equals(other interface{}) bool {
- if p == other {
- return true
- } else if _, ok := other.(*Predicate); !ok {
- return false
- } else {
- return p.ruleIndex == other.(*Predicate).ruleIndex &&
- p.predIndex == other.(*Predicate).predIndex &&
- p.isCtxDependent == other.(*Predicate).isCtxDependent
- }
-}
-
-func (p *Predicate) hash() int {
- h := murmurInit(0)
- h = murmurUpdate(h, p.ruleIndex)
- h = murmurUpdate(h, p.predIndex)
- if p.isCtxDependent {
- h = murmurUpdate(h, 1)
- } else {
- h = murmurUpdate(h, 0)
- }
- return murmurFinish(h, 3)
-}
-
-func (p *Predicate) String() string {
- return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
-}
-
-type PrecedencePredicate struct {
- precedence int
-}
-
-func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
-
- p := new(PrecedencePredicate)
- p.precedence = precedence
-
- return p
-}
-
-func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
- return parser.Precpred(outerContext, p.precedence)
-}
-
-func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
- if parser.Precpred(outerContext, p.precedence) {
- return SemanticContextNone
- }
-
- return nil
-}
-
-func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
- return p.precedence - other.precedence
-}
-
-func (p *PrecedencePredicate) equals(other interface{}) bool {
- if p == other {
- return true
- } else if _, ok := other.(*PrecedencePredicate); !ok {
- return false
- } else {
- return p.precedence == other.(*PrecedencePredicate).precedence
- }
-}
-
-func (p *PrecedencePredicate) hash() int {
- h := uint32(1)
- h = 31*h + uint32(p.precedence)
- return int(h)
-}
-
-func (p *PrecedencePredicate) String() string {
- return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
-}
-
-func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
- result := make([]*PrecedencePredicate, 0)
-
- set.Each(func(v interface{}) bool {
- if c2, ok := v.(*PrecedencePredicate); ok {
- result = append(result, c2)
- }
- return true
- })
-
- return result
-}
-
-// A semantic context which is true whenever none of the contained contexts
-// is false.`
-
-type AND struct {
- opnds []SemanticContext
-}
-
-func NewAND(a, b SemanticContext) *AND {
-
- operands := newArray2DHashSet(nil, nil)
- if aa, ok := a.(*AND); ok {
- for _, o := range aa.opnds {
- operands.Add(o)
- }
- } else {
- operands.Add(a)
- }
-
- if ba, ok := b.(*AND); ok {
- for _, o := range ba.opnds {
- operands.Add(o)
- }
- } else {
- operands.Add(b)
- }
- precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
- if len(precedencePredicates) > 0 {
- // interested in the transition with the lowest precedence
- var reduced *PrecedencePredicate
-
- for _, p := range precedencePredicates {
- if reduced == nil || p.precedence < reduced.precedence {
- reduced = p
- }
- }
-
- operands.Add(reduced)
- }
-
- vs := operands.Values()
- opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
-
- and := new(AND)
- and.opnds = opnds
-
- return and
-}
-
-func (a *AND) equals(other interface{}) bool {
- if a == other {
- return true
- } else if _, ok := other.(*AND); !ok {
- return false
- } else {
- for i, v := range other.(*AND).opnds {
- if !a.opnds[i].equals(v) {
- return false
- }
- }
- return true
- }
-}
-
-//
-// {@inheritDoc}
-//
-//
-// The evaluation of predicates by a context is short-circuiting, but
-// unordered.
-//
-func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
- for i := 0; i < len(a.opnds); i++ {
- if !a.opnds[i].evaluate(parser, outerContext) {
- return false
- }
- }
- return true
-}
-
-func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
- differs := false
- operands := make([]SemanticContext, 0)
-
- for i := 0; i < len(a.opnds); i++ {
- context := a.opnds[i]
- evaluated := context.evalPrecedence(parser, outerContext)
- differs = differs || (evaluated != context)
- if evaluated == nil {
- // The AND context is false if any element is false
- return nil
- } else if evaluated != SemanticContextNone {
- // Reduce the result by Skipping true elements
- operands = append(operands, evaluated)
- }
- }
- if !differs {
- return a
- }
-
- if len(operands) == 0 {
- // all elements were true, so the AND context is true
- return SemanticContextNone
- }
-
- var result SemanticContext
-
- for _, o := range operands {
- if result == nil {
- result = o
- } else {
- result = SemanticContextandContext(result, o)
- }
- }
-
- return result
-}
-
-func (a *AND) hash() int {
- h := murmurInit(37) // Init with a value different from OR
- for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
- }
- return murmurFinish(h, len(a.opnds))
-}
-
-func (a *OR) hash() int {
- h := murmurInit(41) // Init with a value different from AND
- for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
- }
- return murmurFinish(h, len(a.opnds))
-}
-
-func (a *AND) String() string {
- s := ""
-
- for _, o := range a.opnds {
- s += "&& " + fmt.Sprint(o)
- }
-
- if len(s) > 3 {
- return s[0:3]
- }
-
- return s
-}
-
-//
-// A semantic context which is true whenever at least one of the contained
-// contexts is true.
-//
-
-type OR struct {
- opnds []SemanticContext
-}
-
-func NewOR(a, b SemanticContext) *OR {
-
- operands := newArray2DHashSet(nil, nil)
- if aa, ok := a.(*OR); ok {
- for _, o := range aa.opnds {
- operands.Add(o)
- }
- } else {
- operands.Add(a)
- }
-
- if ba, ok := b.(*OR); ok {
- for _, o := range ba.opnds {
- operands.Add(o)
- }
- } else {
- operands.Add(b)
- }
- precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
- if len(precedencePredicates) > 0 {
- // interested in the transition with the lowest precedence
- var reduced *PrecedencePredicate
-
- for _, p := range precedencePredicates {
- if reduced == nil || p.precedence > reduced.precedence {
- reduced = p
- }
- }
-
- operands.Add(reduced)
- }
-
- vs := operands.Values()
-
- opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
-
- o := new(OR)
- o.opnds = opnds
-
- return o
-}
-
-func (o *OR) equals(other interface{}) bool {
- if o == other {
- return true
- } else if _, ok := other.(*OR); !ok {
- return false
- } else {
- for i, v := range other.(*OR).opnds {
- if !o.opnds[i].equals(v) {
- return false
- }
- }
- return true
- }
-}
-
-//
-// The evaluation of predicates by o context is short-circuiting, but
-// unordered.
-//
-func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
- for i := 0; i < len(o.opnds); i++ {
- if o.opnds[i].evaluate(parser, outerContext) {
- return true
- }
- }
- return false
-}
-
-func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
- differs := false
- operands := make([]SemanticContext, 0)
- for i := 0; i < len(o.opnds); i++ {
- context := o.opnds[i]
- evaluated := context.evalPrecedence(parser, outerContext)
- differs = differs || (evaluated != context)
- if evaluated == SemanticContextNone {
- // The OR context is true if any element is true
- return SemanticContextNone
- } else if evaluated != nil {
- // Reduce the result by Skipping false elements
- operands = append(operands, evaluated)
- }
- }
- if !differs {
- return o
- }
- if len(operands) == 0 {
- // all elements were false, so the OR context is false
- return nil
- }
- var result SemanticContext
-
- for _, o := range operands {
- if result == nil {
- result = o
- } else {
- result = SemanticContextorContext(result, o)
- }
- }
-
- return result
-}
-
-func (o *OR) String() string {
- s := ""
-
- for _, o := range o.opnds {
- s += "|| " + fmt.Sprint(o)
- }
-
- if len(s) > 3 {
- return s[0:3]
- }
-
- return s
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
deleted file mode 100644
index 2d8e99095d..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
- "strings"
-)
-
-type TokenSourceCharStreamPair struct {
- tokenSource TokenSource
- charStream CharStream
-}
-
-// A token has properties: text, type, line, character position in the line
-// (so we can ignore tabs), token channel, index, and source from which
-// we obtained this token.
-
-type Token interface {
- GetSource() *TokenSourceCharStreamPair
- GetTokenType() int
- GetChannel() int
- GetStart() int
- GetStop() int
- GetLine() int
- GetColumn() int
-
- GetText() string
- SetText(s string)
-
- GetTokenIndex() int
- SetTokenIndex(v int)
-
- GetTokenSource() TokenSource
- GetInputStream() CharStream
-}
-
-type BaseToken struct {
- source *TokenSourceCharStreamPair
- tokenType int // token type of the token
- channel int // The parser ignores everything not on DEFAULT_CHANNEL
- start int // optional return -1 if not implemented.
- stop int // optional return -1 if not implemented.
- tokenIndex int // from 0..n-1 of the token object in the input stream
- line int // line=1..n of the 1st character
- column int // beginning of the line at which it occurs, 0..n-1
- text string // text of the token.
- readOnly bool
-}
-
-const (
- TokenInvalidType = 0
-
- // During lookahead operations, this "token" signifies we hit rule end ATN state
- // and did not follow it despite needing to.
- TokenEpsilon = -2
-
- TokenMinUserTokenType = 1
-
- TokenEOF = -1
-
- // All tokens go to the parser (unless Skip() is called in that rule)
- // on a particular "channel". The parser tunes to a particular channel
- // so that whitespace etc... can go to the parser on a "hidden" channel.
-
- TokenDefaultChannel = 0
-
- // Anything on different channel than DEFAULT_CHANNEL is not parsed
- // by parser.
-
- TokenHiddenChannel = 1
-)
-
-func (b *BaseToken) GetChannel() int {
- return b.channel
-}
-
-func (b *BaseToken) GetStart() int {
- return b.start
-}
-
-func (b *BaseToken) GetStop() int {
- return b.stop
-}
-
-func (b *BaseToken) GetLine() int {
- return b.line
-}
-
-func (b *BaseToken) GetColumn() int {
- return b.column
-}
-
-func (b *BaseToken) GetTokenType() int {
- return b.tokenType
-}
-
-func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
- return b.source
-}
-
-func (b *BaseToken) GetTokenIndex() int {
- return b.tokenIndex
-}
-
-func (b *BaseToken) SetTokenIndex(v int) {
- b.tokenIndex = v
-}
-
-func (b *BaseToken) GetTokenSource() TokenSource {
- return b.source.tokenSource
-}
-
-func (b *BaseToken) GetInputStream() CharStream {
- return b.source.charStream
-}
-
-type CommonToken struct {
- *BaseToken
-}
-
-func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
-
- t := new(CommonToken)
-
- t.BaseToken = new(BaseToken)
-
- t.source = source
- t.tokenType = tokenType
- t.channel = channel
- t.start = start
- t.stop = stop
- t.tokenIndex = -1
- if t.source.tokenSource != nil {
- t.line = source.tokenSource.GetLine()
- t.column = source.tokenSource.GetCharPositionInLine()
- } else {
- t.column = -1
- }
- return t
-}
-
-// An empty {@link Pair} which is used as the default value of
-// {@link //source} for tokens that do not have a source.
-
-//CommonToken.EMPTY_SOURCE = [ nil, nil ]
-
-// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
-//
-//
-// If {@code oldToken} is also a {@link CommonToken} instance, the newly
-// constructed token will share a reference to the {@link //text} field and
-// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
-// be assigned the result of calling {@link //GetText}, and {@link //source}
-// will be constructed from the result of {@link Token//GetTokenSource} and
-// {@link Token//GetInputStream}.
-//
-// @param oldToken The token to copy.
-//
-func (c *CommonToken) clone() *CommonToken {
- t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
- t.tokenIndex = c.GetTokenIndex()
- t.line = c.GetLine()
- t.column = c.GetColumn()
- t.text = c.GetText()
- return t
-}
-
-func (c *CommonToken) GetText() string {
- if c.text != "" {
- return c.text
- }
- input := c.GetInputStream()
- if input == nil {
- return ""
- }
- n := input.Size()
- if c.start < n && c.stop < n {
- return input.GetTextFromInterval(NewInterval(c.start, c.stop))
- }
- return ""
-}
-
-func (c *CommonToken) SetText(text string) {
- c.text = text
-}
-
-func (c *CommonToken) String() string {
- txt := c.GetText()
- if txt != "" {
- txt = strings.Replace(txt, "\n", "\\n", -1)
- txt = strings.Replace(txt, "\r", "\\r", -1)
- txt = strings.Replace(txt, "\t", "\\t", -1)
- } else {
- txt = ""
- }
-
- var ch string
- if c.channel > 0 {
- ch = ",channel=" + strconv.Itoa(c.channel)
- } else {
- ch = ""
- }
-
- return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
- txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
- ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
deleted file mode 100644
index e023978fef..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type TokenSource interface {
- NextToken() Token
- Skip()
- More()
- GetLine() int
- GetCharPositionInLine() int
- GetInputStream() CharStream
- GetSourceName() string
- setTokenFactory(factory TokenFactory)
- GetTokenFactory() TokenFactory
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
deleted file mode 100644
index df92c81478..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type TokenStream interface {
- IntStream
-
- LT(k int) Token
-
- Get(index int) Token
- GetTokenSource() TokenSource
- SetTokenSource(TokenSource)
-
- GetAllText() string
- GetTextFromInterval(*Interval) string
- GetTextFromRuleContext(RuleContext) string
- GetTextFromTokens(Token, Token) string
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
deleted file mode 100644
index 96a03f02aa..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
+++ /dev/null
@@ -1,649 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-package antlr
-
-import (
-"bytes"
-"fmt"
-)
-
-
-//
-// Useful for rewriting out a buffered input token stream after doing some
-// augmentation or other manipulations on it.
-
-//
-// You can insert stuff, replace, and delete chunks. Note that the operations
-// are done lazily--only if you convert the buffer to a {@link String} with
-// {@link TokenStream#getText()}. This is very efficient because you are not
-// moving data around all the time. As the buffer of tokens is converted to
-// strings, the {@link #getText()} method(s) scan the input token stream and
-// check to see if there is an operation at the current index. If so, the
-// operation is done and then normal {@link String} rendering continues on the
-// buffer. This is like having multiple Turing machine instruction streams
-// (programs) operating on a single input tape. :)
-//
-
-// This rewriter makes no modifications to the token stream. It does not ask the
-// stream to fill itself up nor does it advance the input cursor. The token
-// stream {@link TokenStream#index()} will return the same value before and
-// after any {@link #getText()} call.
-
-//
-// The rewriter only works on tokens that you have in the buffer and ignores the
-// current input cursor. If you are buffering tokens on-demand, calling
-// {@link #getText()} halfway through the input will only do rewrites for those
-// tokens in the first half of the file.
-
-//
-// Since the operations are done lazily at {@link #getText}-time, operations do
-// not screw up the token index values. That is, an insert operation at token
-// index {@code i} does not change the index values for tokens
-// {@code i}+1..n-1.
-
-//
-// Because operations never actually alter the buffer, you may always get the
-// original token stream back without undoing anything. Since the instructions
-// are queued up, you can easily simulate transactions and roll back any changes
-// if there is an error just by removing instructions. For example,
-
-//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-//
-
-//
-// Then in the rules, you can execute (assuming rewriter is visible):
-
-//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-//
-
-//
-// You can also have multiple "instruction streams" and get multiple rewrites
-// from a single pass over the input. Just name the instruction streams and use
-// that name again when printing the buffer. This could be useful for generating
-// a C file and also its header file--all from the same buffer:
-
-//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-//
-
-//
-// If you don't use named rewrite streams, a "default" stream is used as the
-// first example shows.
-
-
-
-const(
- Default_Program_Name = "default"
- Program_Init_Size = 100
- Min_Token_Index = 0
-)
-
-// Define the rewrite operation hierarchy
-
-type RewriteOperation interface {
- // Execute the rewrite operation by possibly adding to the buffer.
- // Return the index of the next token to operate on.
- Execute(buffer *bytes.Buffer) int
- String() string
- GetInstructionIndex() int
- GetIndex() int
- GetText() string
- GetOpName() string
- GetTokens() TokenStream
- SetInstructionIndex(val int)
- SetIndex(int)
- SetText(string)
- SetOpName(string)
- SetTokens(TokenStream)
-}
-
-type BaseRewriteOperation struct {
- //Current index of rewrites list
- instruction_index int
- //Token buffer index
- index int
- //Substitution text
- text string
- //Actual operation name
- op_name string
- //Pointer to token steam
- tokens TokenStream
-}
-
-func (op *BaseRewriteOperation)GetInstructionIndex() int{
- return op.instruction_index
-}
-
-func (op *BaseRewriteOperation)GetIndex() int{
- return op.index
-}
-
-func (op *BaseRewriteOperation)GetText() string{
- return op.text
-}
-
-func (op *BaseRewriteOperation)GetOpName() string{
- return op.op_name
-}
-
-func (op *BaseRewriteOperation)GetTokens() TokenStream{
- return op.tokens
-}
-
-func (op *BaseRewriteOperation)SetInstructionIndex(val int){
- op.instruction_index = val
-}
-
-func (op *BaseRewriteOperation)SetIndex(val int) {
- op.index = val
-}
-
-func (op *BaseRewriteOperation)SetText(val string){
- op.text = val
-}
-
-func (op *BaseRewriteOperation)SetOpName(val string){
- op.op_name = val
-}
-
-func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
- op.tokens = val
-}
-
-
-func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
- return op.index
-}
-
-func (op *BaseRewriteOperation) String() string {
- return fmt.Sprintf("<%s@%d:\"%s\">",
- op.op_name,
- op.tokens.Get(op.GetIndex()),
- op.text,
- )
-
-}
-
-
-type InsertBeforeOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
- return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index,
- text:text,
- op_name:"InsertBeforeOp",
- tokens:stream,
- }}
-}
-
-func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index+1
-}
-
-func (op *InsertBeforeOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// Distinguish between insert after/before to do the "insert afters"
-// first and then the "insert befores" at same index. Implementation
-// of "insert after" is "insert before index+1".
-
-type InsertAfterOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
- return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index+1,
- text:text,
- tokens:stream,
- }}
-}
-
-func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index+1
-}
-
-func (op *InsertAfterOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
-// instructions.
-type ReplaceOp struct{
- BaseRewriteOperation
- LastIndex int
-}
-
-func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
- return &ReplaceOp{
- BaseRewriteOperation:BaseRewriteOperation{
- index:from,
- text:text,
- op_name:"ReplaceOp",
- tokens:stream,
- },
- LastIndex:to,
- }
-}
-
-func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
- if op.text != ""{
- buffer.WriteString(op.text)
- }
- return op.LastIndex +1
-}
-
-func (op *ReplaceOp) String() string {
- if op.text == "" {
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
- }
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
-}
-
-
-type TokenStreamRewriter struct {
- //Our source stream
- tokens TokenStream
- // You may have multiple, named streams of rewrite operations.
- // I'm calling these things "programs."
- // Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- last_rewrite_token_indexes map[string]int
-}
-
-func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
- return &TokenStreamRewriter{
- tokens: tokens,
- programs: map[string][]RewriteOperation{
- Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
- },
- last_rewrite_token_indexes: map[string]int{},
- }
-}
-
-func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
- return tsr.tokens
-}
-
-// Rollback the instruction stream for a program so that
-// the indicated instruction (via instructionIndex) is no
-// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
- is, ok := tsr.programs[program_name]
- if ok{
- tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
- }
-}
-
-func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
- tsr.Rollback(Default_Program_Name, instruction_index)
-}
-//Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
- tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
-}
-
-func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
- tsr.DeleteProgram(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
- // to insert after, just insert before next index (even if past end)
- var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
- tsr.InsertAfter(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
- tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
- var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
- tsr.InsertBefore(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
- tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
- if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
- panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
- from, to, tsr.tokens.Size()))
- }
- var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
- tsr.Replace(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
- tsr.ReplaceDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
- tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
- tsr.ReplaceToken(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
- tsr.ReplaceTokenDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
- tsr.Replace(program_name, from, to, "" )
-}
-
-func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
- tsr.Delete(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
- tsr.DeleteDefault(index,index)
-}
-
-func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
- tsr.ReplaceToken(program_name, from, to, "")
-}
-
-func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
- tsr.DeleteToken(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
- i, ok := tsr.last_rewrite_token_indexes[program_name]
- if !ok{
- return -1
- }
- return i
-}
-
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
- return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
- tsr.last_rewrite_token_indexes[program_name] = i
-}
-
-func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
- is := make([]RewriteOperation, 0, Program_Init_Size)
- tsr.programs[name] = is
- return is
-}
-
-func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
- is := tsr.GetProgram(name)
- is = append(is, op)
- tsr.programs[name] = is
-}
-
-func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
- is, ok := tsr.programs[name]
- if !ok{
- is = tsr.InitializeProgram(name)
- }
- return is
-}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetTextDefault() string{
- return tsr.GetText(
- Default_Program_Name,
- NewInterval(0, tsr.tokens.Size()-1))
-}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
- rewrites := tsr.programs[program_name]
- start := interval.Start
- stop := interval.Stop
- // ensure start/end are in range
- stop = min(stop, tsr.tokens.Size()-1)
- start = max(start,0)
- if rewrites == nil || len(rewrites) == 0{
- return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
- }
- buf := bytes.Buffer{}
- // First, optimize instruction stream
- indexToOp := reduceToSingleOperationPerIndex(rewrites)
- // Walk buffer, executing instructions and emitting tokens
- for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
- }
- }
- return buf.String()
-}
-
-// We need to combine operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc... Here are the cases:
-//
-// I.i.u I.j.v leave alone, nonoverlapping
-// I.i.u I.i.v combine: Iivu
-//
-// R.i-j.u R.x-y.v | i-j in x-y delete first R
-// R.i-j.u R.i-j.v delete first R
-// R.i-j.u R.x-y.v | x-y in i-j ERROR
-// R.i-j.u R.x-y.v | boundaries overlap ERROR
-//
-// Delete special case of replace (text==null):
-// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
-//
-// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
-// R.x-y.v I.i.u | i in x-y ERROR
-// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
-//
-// I.i.u = insert u before op @ index i
-// R.x-y.u = replace x-y indexed tokens with u
-//
-// First we need to examine replaces. For any replace op:
-//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
-//
-// Then we can deal with inserts:
-//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this replace.
-// 3. throw exception if index in same range as previous replace
-//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
-//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the replace range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
-//
-// Return a map from token index to operation.
-//
-func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
- // WALK REPLACES
- for i:=0; i < len(rewrites); i++{
- op := rewrites[i]
- if op == nil{continue}
- rop, ok := op.(*ReplaceOp)
- if !ok{continue}
- // Wipe prior inserts within range
- for j:=0; j rop.index && iop.index <=rop.LastIndex{
- // delete insert as it's a no-op.
- rewrites[iop.instruction_index] = nil
- }
- }
- }
- // Drop any prior replaces contained within
- for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
- // delete replace as it's a no-op.
- rewrites[prevop.instruction_index] = nil
- continue
- }
- // throw exception unless disjoint or identical
- disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
- // Delete special case of replace (text==null):
- // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if prevop.text == "" && rop.text == "" && !disjoint{
- rewrites[prevop.instruction_index] = nil
- rop.index = min(prevop.index, rop.index)
- rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
- println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
- }else if !disjoint{
- panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
- }
- }
- }
- }
- // WALK INSERTS
- for i:=0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil{continue}
- //hack to replicate inheritance in composition
- _, iok := rewrites[i].(*InsertBeforeOp)
- _, aok := rewrites[i].(*InsertAfterOp)
- if !iok && !aok{continue}
- iop := rewrites[i]
- // combine current insert with prior if any at same index
- // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
- for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
- panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
- }
- }
- }
- }
- m := map[int]RewriteOperation{}
- for i:=0; i < len(rewrites); i++{
- op := rewrites[i]
- if op == nil {continue}
- if _, ok := m[op.GetIndex()]; ok{
- panic("should only be one op per index")
- }
- m[op.GetIndex()] = op
- }
- return m
-}
-
-
-/*
- Quick fixing Go lack of overloads
- */
-
-func max(a,b int)int{
- if a>b{
- return a
- }else {
- return b
- }
-}
-func min(a,b int)int{
- if aThis is a one way link. It emanates from a state (usually via a list of
-// transitions) and has a target state.
-//
-// Since we never have to change the ATN transitions once we construct it,
-// the states. We'll use the term Edge for the DFA to distinguish them from
-// ATN transitions.
-
-type Transition interface {
- getTarget() ATNState
- setTarget(ATNState)
- getIsEpsilon() bool
- getLabel() *IntervalSet
- getSerializationType() int
- Matches(int, int, int) bool
-}
-
-type BaseTransition struct {
- target ATNState
- isEpsilon bool
- label int
- intervalSet *IntervalSet
- serializationType int
-}
-
-func NewBaseTransition(target ATNState) *BaseTransition {
-
- if target == nil {
- panic("target cannot be nil.")
- }
-
- t := new(BaseTransition)
-
- t.target = target
- // Are we epsilon, action, sempred?
- t.isEpsilon = false
- t.intervalSet = nil
-
- return t
-}
-
-func (t *BaseTransition) getTarget() ATNState {
- return t.target
-}
-
-func (t *BaseTransition) setTarget(s ATNState) {
- t.target = s
-}
-
-func (t *BaseTransition) getIsEpsilon() bool {
- return t.isEpsilon
-}
-
-func (t *BaseTransition) getLabel() *IntervalSet {
- return t.intervalSet
-}
-
-func (t *BaseTransition) getSerializationType() int {
- return t.serializationType
-}
-
-func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- panic("Not implemented")
-}
-
-const (
- TransitionEPSILON = 1
- TransitionRANGE = 2
- TransitionRULE = 3
- TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
- TransitionATOM = 5
- TransitionACTION = 6
- TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
- TransitionNOTSET = 8
- TransitionWILDCARD = 9
- TransitionPRECEDENCE = 10
-)
-
-var TransitionserializationNames = []string{
- "INVALID",
- "EPSILON",
- "RANGE",
- "RULE",
- "PREDICATE",
- "ATOM",
- "ACTION",
- "SET",
- "NOT_SET",
- "WILDCARD",
- "PRECEDENCE",
-}
-
-//var TransitionserializationTypes struct {
-// EpsilonTransition int
-// RangeTransition int
-// RuleTransition int
-// PredicateTransition int
-// AtomTransition int
-// ActionTransition int
-// SetTransition int
-// NotSetTransition int
-// WildcardTransition int
-// PrecedencePredicateTransition int
-//}{
-// TransitionEPSILON,
-// TransitionRANGE,
-// TransitionRULE,
-// TransitionPREDICATE,
-// TransitionATOM,
-// TransitionACTION,
-// TransitionSET,
-// TransitionNOTSET,
-// TransitionWILDCARD,
-// TransitionPRECEDENCE
-//}
-
-// TODO: make all transitions sets? no, should remove set edges
-type AtomTransition struct {
- *BaseTransition
-}
-
-func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
-
- t := new(AtomTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.label = intervalSet // The token type or character value or, signifies special intervalSet.
- t.intervalSet = t.makeLabel()
- t.serializationType = TransitionATOM
-
- return t
-}
-
-func (t *AtomTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addOne(t.label)
- return s
-}
-
-func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.label == symbol
-}
-
-func (t *AtomTransition) String() string {
- return strconv.Itoa(t.label)
-}
-
-type RuleTransition struct {
- *BaseTransition
-
- followState ATNState
- ruleIndex, precedence int
-}
-
-func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
-
- t := new(RuleTransition)
- t.BaseTransition = NewBaseTransition(ruleStart)
-
- t.ruleIndex = ruleIndex
- t.precedence = precedence
- t.followState = followState
- t.serializationType = TransitionRULE
- t.isEpsilon = true
-
- return t
-}
-
-func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-type EpsilonTransition struct {
- *BaseTransition
-
- outermostPrecedenceReturn int
-}
-
-func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
-
- t := new(EpsilonTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionEPSILON
- t.isEpsilon = true
- t.outermostPrecedenceReturn = outermostPrecedenceReturn
- return t
-}
-
-func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *EpsilonTransition) String() string {
- return "epsilon"
-}
-
-type RangeTransition struct {
- *BaseTransition
-
- start, stop int
-}
-
-func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
-
- t := new(RangeTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionRANGE
- t.start = start
- t.stop = stop
- t.intervalSet = t.makeLabel()
- return t
-}
-
-func (t *RangeTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addRange(t.start, t.stop)
- return s
-}
-
-func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= t.start && symbol <= t.stop
-}
-
-func (t *RangeTransition) String() string {
- var sb strings.Builder
- sb.WriteByte('\'')
- sb.WriteRune(rune(t.start))
- sb.WriteString("'..'")
- sb.WriteRune(rune(t.stop))
- sb.WriteByte('\'')
- return sb.String()
-}
-
-type AbstractPredicateTransition interface {
- Transition
- IAbstractPredicateTransitionFoo()
-}
-
-type BaseAbstractPredicateTransition struct {
- *BaseTransition
-}
-
-func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
-
- t := new(BaseAbstractPredicateTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- return t
-}
-
-func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
-
-type PredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- isCtxDependent bool
- ruleIndex, predIndex int
-}
-
-func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
-
- t := new(PredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPREDICATE
- t.ruleIndex = ruleIndex
- t.predIndex = predIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PredicateTransition) getPredicate() *Predicate {
- return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
-}
-
-func (t *PredicateTransition) String() string {
- return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
-}
-
-type ActionTransition struct {
- *BaseTransition
-
- isCtxDependent bool
- ruleIndex, actionIndex, predIndex int
-}
-
-func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
-
- t := new(ActionTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionACTION
- t.ruleIndex = ruleIndex
- t.actionIndex = actionIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *ActionTransition) String() string {
- return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
-}
-
-type SetTransition struct {
- *BaseTransition
-}
-
-func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
-
- t := new(SetTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionSET
- if set != nil {
- t.intervalSet = set
- } else {
- t.intervalSet = NewIntervalSet()
- t.intervalSet.addOne(TokenInvalidType)
- }
-
- return t
-}
-
-func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.intervalSet.contains(symbol)
-}
-
-func (t *SetTransition) String() string {
- return t.intervalSet.String()
-}
-
-type NotSetTransition struct {
- *SetTransition
-}
-
-func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
-
- t := new(NotSetTransition)
-
- t.SetTransition = NewSetTransition(target, set)
-
- t.serializationType = TransitionNOTSET
-
- return t
-}
-
-func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
-}
-
-func (t *NotSetTransition) String() string {
- return "~" + t.intervalSet.String()
-}
-
-type WildcardTransition struct {
- *BaseTransition
-}
-
-func NewWildcardTransition(target ATNState) *WildcardTransition {
-
- t := new(WildcardTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionWILDCARD
- return t
-}
-
-func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
-}
-
-func (t *WildcardTransition) String() string {
- return "."
-}
-
-type PrecedencePredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- precedence int
-}
-
-func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
-
- t := new(PrecedencePredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPRECEDENCE
- t.precedence = precedence
- t.isEpsilon = true
-
- return t
-}
-
-func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
- return NewPrecedencePredicate(t.precedence)
-}
-
-func (t *PrecedencePredicateTransition) String() string {
- return fmt.Sprint(t.precedence) + " >= _p"
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
deleted file mode 100644
index 08ce22bba3..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The basic notion of a tree has a parent, a payload, and a list of children.
-// It is the most abstract interface for all the trees used by ANTLR.
-///
-
-var TreeInvalidInterval = NewInterval(-1, -2)
-
-type Tree interface {
- GetParent() Tree
- SetParent(Tree)
- GetPayload() interface{}
- GetChild(i int) Tree
- GetChildCount() int
- GetChildren() []Tree
-}
-
-type SyntaxTree interface {
- Tree
-
- GetSourceInterval() *Interval
-}
-
-type ParseTree interface {
- SyntaxTree
-
- Accept(Visitor ParseTreeVisitor) interface{}
- GetText() string
-
- ToStringTree([]string, Recognizer) string
-}
-
-type RuleNode interface {
- ParseTree
-
- GetRuleContext() RuleContext
- GetBaseRuleContext() *BaseRuleContext
-}
-
-type TerminalNode interface {
- ParseTree
-
- GetSymbol() Token
-}
-
-type ErrorNode interface {
- TerminalNode
-
- errorNode()
-}
-
-type ParseTreeVisitor interface {
- Visit(tree ParseTree) interface{}
- VisitChildren(node RuleNode) interface{}
- VisitTerminal(node TerminalNode) interface{}
- VisitErrorNode(node ErrorNode) interface{}
-}
-
-type BaseParseTreeVisitor struct{}
-
-var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
-
-func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
-func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
-
-// TODO
-//func (this ParseTreeVisitor) Visit(ctx) {
-// if (Utils.isArray(ctx)) {
-// self := this
-// return ctx.map(function(child) { return VisitAtom(self, child)})
-// } else {
-// return VisitAtom(this, ctx)
-// }
-//}
-//
-//func VisitAtom(Visitor, ctx) {
-// if (ctx.parser == nil) { //is terminal
-// return
-// }
-//
-// name := ctx.parser.ruleNames[ctx.ruleIndex]
-// funcName := "Visit" + Utils.titleCase(name)
-//
-// return Visitor[funcName](ctx)
-//}
-
-type ParseTreeListener interface {
- VisitTerminal(node TerminalNode)
- VisitErrorNode(node ErrorNode)
- EnterEveryRule(ctx ParserRuleContext)
- ExitEveryRule(ctx ParserRuleContext)
-}
-
-type BaseParseTreeListener struct{}
-
-var _ ParseTreeListener = &BaseParseTreeListener{}
-
-func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
-func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
-func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
-func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
-
-type TerminalNodeImpl struct {
- parentCtx RuleContext
-
- symbol Token
-}
-
-var _ TerminalNode = &TerminalNodeImpl{}
-
-func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
- tn := new(TerminalNodeImpl)
-
- tn.parentCtx = nil
- tn.symbol = symbol
-
- return tn
-}
-
-func (t *TerminalNodeImpl) GetChild(i int) Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) GetChildren() []Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
- panic("Cannot set children on terminal node")
-}
-
-func (t *TerminalNodeImpl) GetSymbol() Token {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetParent() Tree {
- return t.parentCtx
-}
-
-func (t *TerminalNodeImpl) SetParent(tree Tree) {
- t.parentCtx = tree.(RuleContext)
-}
-
-func (t *TerminalNodeImpl) GetPayload() interface{} {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
- if t.symbol == nil {
- return TreeInvalidInterval
- }
- tokenIndex := t.symbol.GetTokenIndex()
- return NewInterval(tokenIndex, tokenIndex)
-}
-
-func (t *TerminalNodeImpl) GetChildCount() int {
- return 0
-}
-
-func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitTerminal(t)
-}
-
-func (t *TerminalNodeImpl) GetText() string {
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) String() string {
- if t.symbol.GetTokenType() == TokenEOF {
- return ""
- }
-
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
- return t.String()
-}
-
-// Represents a token that was consumed during reSynchronization
-// rather than during a valid Match operation. For example,
-// we will create this kind of a node during single token insertion
-// and deletion as well as during "consume until error recovery set"
-// upon no viable alternative exceptions.
-
-type ErrorNodeImpl struct {
- *TerminalNodeImpl
-}
-
-var _ ErrorNode = &ErrorNodeImpl{}
-
-func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
- en := new(ErrorNodeImpl)
- en.TerminalNodeImpl = NewTerminalNodeImpl(token)
- return en
-}
-
-func (e *ErrorNodeImpl) errorNode() {}
-
-func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitErrorNode(e)
-}
-
-type ParseTreeWalker struct {
-}
-
-func NewParseTreeWalker() *ParseTreeWalker {
- return new(ParseTreeWalker)
-}
-
-// Performs a walk on the given parse tree starting at the root and going down recursively
-// with depth-first search. On each node, EnterRule is called before
-// recursively walking down into child nodes, then
-// ExitRule is called after the recursive call to wind up.
-func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
- switch tt := t.(type) {
- case ErrorNode:
- listener.VisitErrorNode(tt)
- case TerminalNode:
- listener.VisitTerminal(tt)
- default:
- p.EnterRule(listener, t.(RuleNode))
- for i := 0; i < t.GetChildCount(); i++ {
- child := t.GetChild(i)
- p.Walk(listener, child)
- }
- p.ExitRule(listener, t.(RuleNode))
- }
-}
-
-//
-// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
-// then by triggering the event specific to the given parse tree node
-//
-func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
-}
-
-// Exits a grammar rule by first triggering the event specific to the given parse tree node
-// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
-//
-func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
-}
-
-var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
deleted file mode 100644
index 80144ecade..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "fmt"
-
-/** A set of utility routines useful for all kinds of ANTLR trees. */
-
-// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
-// node payloads to get the text for the nodes. Detect
-// parse trees and extract data appropriately.
-func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
-
- if recog != nil {
- ruleNames = recog.GetRuleNames()
- }
-
- s := TreesGetNodeText(tree, ruleNames, nil)
-
- s = EscapeWhitespace(s, false)
- c := tree.GetChildCount()
- if c == 0 {
- return s
- }
- res := "(" + s + " "
- if c > 0 {
- s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
- res += s
- }
- for i := 1; i < c; i++ {
- s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
- res += (" " + s)
- }
- res += ")"
- return res
-}
-
-func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
- if recog != nil {
- ruleNames = recog.GetRuleNames()
- }
-
- if ruleNames != nil {
- switch t2 := t.(type) {
- case RuleNode:
- t3 := t2.GetRuleContext()
- altNumber := t3.GetAltNumber()
-
- if altNumber != ATNInvalidAltNumber {
- return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
- }
- return ruleNames[t3.GetRuleIndex()]
- case ErrorNode:
- return fmt.Sprint(t2)
- case TerminalNode:
- if t2.GetSymbol() != nil {
- return t2.GetSymbol().GetText()
- }
- }
- }
-
- // no recog for rule names
- payload := t.GetPayload()
- if p2, ok := payload.(Token); ok {
- return p2.GetText()
- }
-
- return fmt.Sprint(t.GetPayload())
-}
-
-// Return ordered list of all children of this node
-func TreesGetChildren(t Tree) []Tree {
- list := make([]Tree, 0)
- for i := 0; i < t.GetChildCount(); i++ {
- list = append(list, t.GetChild(i))
- }
- return list
-}
-
-// Return a list of all ancestors of this node. The first node of
-// list is the root and the last is the parent of this node.
-//
-func TreesgetAncestors(t Tree) []Tree {
- ancestors := make([]Tree, 0)
- t = t.GetParent()
- for t != nil {
- f := []Tree{t}
- ancestors = append(f, ancestors...)
- t = t.GetParent()
- }
- return ancestors
-}
-
-func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
- return TreesfindAllNodes(t, ttype, true)
-}
-
-func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
- return TreesfindAllNodes(t, ruleIndex, false)
-}
-
-func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
- nodes := make([]ParseTree, 0)
- treesFindAllNodes(t, index, findTokens, &nodes)
- return nodes
-}
-
-func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
- // check this node (the root) first
-
- t2, ok := t.(TerminalNode)
- t3, ok2 := t.(ParserRuleContext)
-
- if findTokens && ok {
- if t2.GetSymbol().GetTokenType() == index {
- *nodes = append(*nodes, t2)
- }
- } else if !findTokens && ok2 {
- if t3.GetRuleIndex() == index {
- *nodes = append(*nodes, t3)
- }
- }
- // check children
- for i := 0; i < t.GetChildCount(); i++ {
- treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
- }
-}
-
-func TreesDescendants(t ParseTree) []ParseTree {
- nodes := []ParseTree{t}
- for i := 0; i < t.GetChildCount(); i++ {
- nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
- }
- return nodes
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
deleted file mode 100644
index ec219df983..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/bits"
- "strconv"
- "strings"
-)
-
-func intMin(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func intMax(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-// A simple integer stack
-
-type IntStack []int
-
-var ErrEmptyStack = errors.New("Stack is empty")
-
-func (s *IntStack) Pop() (int, error) {
- l := len(*s) - 1
- if l < 0 {
- return 0, ErrEmptyStack
- }
- v := (*s)[l]
- *s = (*s)[0:l]
- return v, nil
-}
-
-func (s *IntStack) Push(e int) {
- *s = append(*s, e)
-}
-
-func standardEqualsFunction(a interface{}, b interface{}) bool {
-
- ac, oka := a.(comparable)
- bc, okb := b.(comparable)
-
- if !oka || !okb {
- panic("Not Comparable")
- }
-
- return ac.equals(bc)
-}
-
-func standardHashFunction(a interface{}) int {
- if h, ok := a.(hasher); ok {
- return h.hash()
- }
-
- panic("Not Hasher")
-}
-
-type hasher interface {
- hash() int
-}
-
-const bitsPerWord = 64
-
-func indexForBit(bit int) int {
- return bit / bitsPerWord
-}
-
-func wordForBit(data []uint64, bit int) uint64 {
- idx := indexForBit(bit)
- if idx >= len(data) {
- return 0
- }
- return data[idx]
-}
-
-func maskForBit(bit int) uint64 {
- return uint64(1) << (bit % bitsPerWord)
-}
-
-func wordsNeeded(bit int) int {
- return indexForBit(bit) + 1
-}
-
-type BitSet struct {
- data []uint64
-}
-
-func NewBitSet() *BitSet {
- return &BitSet{}
-}
-
-func (b *BitSet) add(value int) {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- size := wordsNeeded(value)
- data := make([]uint64, size)
- copy(data, b.data)
- b.data = data
- }
- b.data[idx] |= maskForBit(value)
-}
-
-func (b *BitSet) clear(index int) {
- idx := indexForBit(index)
- if idx >= len(b.data) {
- return
- }
- b.data[idx] &= ^maskForBit(index)
-}
-
-func (b *BitSet) or(set *BitSet) {
- // Get min size necessary to represent the bits in both sets.
- bLen := b.minLen()
- setLen := set.minLen()
- maxLen := intMax(bLen, setLen)
- if maxLen > len(b.data) {
- // Increase the size of len(b.data) to repesent the bits in both sets.
- data := make([]uint64, maxLen)
- copy(data, b.data)
- b.data = data
- }
- // len(b.data) is at least setLen.
- for i := 0; i < setLen; i++ {
- b.data[i] |= set.data[i]
- }
-}
-
-func (b *BitSet) remove(value int) {
- b.clear(value)
-}
-
-func (b *BitSet) contains(value int) bool {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- return false
- }
- return (b.data[idx] & maskForBit(value)) != 0
-}
-
-func (b *BitSet) minValue() int {
- for i, v := range b.data {
- if v == 0 {
- continue
- }
- return i*bitsPerWord + bits.TrailingZeros64(v)
- }
- return 2147483647
-}
-
-func (b *BitSet) equals(other interface{}) bool {
- otherBitSet, ok := other.(*BitSet)
- if !ok {
- return false
- }
-
- if b == otherBitSet {
- return true
- }
-
- // We only compare set bits, so we cannot rely on the two slices having the same size. Its
- // possible for two BitSets to have different slice lengths but the same set bits. So we only
- // compare the relavent words and ignore the trailing zeros.
- bLen := b.minLen()
- otherLen := otherBitSet.minLen()
-
- if bLen != otherLen {
- return false
- }
-
- for i := 0; i < bLen; i++ {
- if b.data[i] != otherBitSet.data[i] {
- return false
- }
- }
-
- return true
-}
-
-func (b *BitSet) minLen() int {
- for i := len(b.data); i > 0; i-- {
- if b.data[i-1] != 0 {
- return i
- }
- }
- return 0
-}
-
-func (b *BitSet) length() int {
- cnt := 0
- for _, val := range b.data {
- cnt += bits.OnesCount64(val)
- }
- return cnt
-}
-
-func (b *BitSet) String() string {
- vals := make([]string, 0, b.length())
-
- for i, v := range b.data {
- for v != 0 {
- n := bits.TrailingZeros64(v)
- vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
- v &= ^(uint64(1) << n)
- }
- }
-
- return "{" + strings.Join(vals, ", ") + "}"
-}
-
-type AltDict struct {
- data map[string]interface{}
-}
-
-func NewAltDict() *AltDict {
- d := new(AltDict)
- d.data = make(map[string]interface{})
- return d
-}
-
-func (a *AltDict) Get(key string) interface{} {
- key = "k-" + key
- return a.data[key]
-}
-
-func (a *AltDict) put(key string, value interface{}) {
- key = "k-" + key
- a.data[key] = value
-}
-
-func (a *AltDict) values() []interface{} {
- vs := make([]interface{}, len(a.data))
- i := 0
- for _, v := range a.data {
- vs[i] = v
- i++
- }
- return vs
-}
-
-type DoubleDict struct {
- data map[int]map[int]interface{}
-}
-
-func NewDoubleDict() *DoubleDict {
- dd := new(DoubleDict)
- dd.data = make(map[int]map[int]interface{})
- return dd
-}
-
-func (d *DoubleDict) Get(a, b int) interface{} {
- data := d.data[a]
-
- if data == nil {
- return nil
- }
-
- return data[b]
-}
-
-func (d *DoubleDict) set(a, b int, o interface{}) {
- data := d.data[a]
-
- if data == nil {
- data = make(map[int]interface{})
- d.data[a] = data
- }
-
- data[b] = o
-}
-
-func EscapeWhitespace(s string, escapeSpaces bool) string {
-
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- if escapeSpaces {
- s = strings.Replace(s, " ", "\u00B7", -1)
- }
- return s
-}
-
-func TerminalNodeToStringArray(sa []TerminalNode) []string {
- st := make([]string, len(sa))
-
- for i, s := range sa {
- st[i] = fmt.Sprintf("%v", s)
- }
-
- return st
-}
-
-func PrintArrayJavaStyle(sa []string) string {
- var buffer bytes.Buffer
-
- buffer.WriteString("[")
-
- for i, s := range sa {
- buffer.WriteString(s)
- if i != len(sa)-1 {
- buffer.WriteString(", ")
- }
- }
-
- buffer.WriteString("]")
-
- return buffer.String()
-}
-
-// murmur hash
-func murmurInit(seed int) int {
- return seed
-}
-
-func murmurUpdate(h int, value int) int {
- const c1 uint32 = 0xCC9E2D51
- const c2 uint32 = 0x1B873593
- const r1 uint32 = 15
- const r2 uint32 = 13
- const m uint32 = 5
- const n uint32 = 0xE6546B64
-
- k := uint32(value)
- k *= c1
- k = (k << r1) | (k >> (32 - r1))
- k *= c2
-
- hash := uint32(h) ^ k
- hash = (hash << r2) | (hash >> (32 - r2))
- hash = hash*m + n
- return int(hash)
-}
-
-func murmurFinish(h int, numberOfWords int) int {
- var hash = uint32(h)
- hash ^= uint32(numberOfWords) << 2
- hash ^= hash >> 16
- hash *= 0x85ebca6b
- hash ^= hash >> 13
- hash *= 0xc2b2ae35
- hash ^= hash >> 16
-
- return int(hash)
-}
diff --git a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go b/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
deleted file mode 100644
index 0d4eac698d..0000000000
--- a/etcd/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package antlr
-
-import "math"
-
-const (
- _initalCapacity = 16
- _initalBucketCapacity = 8
- _loadFactor = 0.75
-)
-
-var _ Set = (*array2DHashSet)(nil)
-
-type Set interface {
- Add(value interface{}) (added interface{})
- Len() int
- Get(value interface{}) (found interface{})
- Contains(value interface{}) bool
- Values() []interface{}
- Each(f func(interface{}) bool)
-}
-
-type array2DHashSet struct {
- buckets [][]interface{}
- hashcodeFunction func(interface{}) int
- equalsFunction func(interface{}, interface{}) bool
-
- n int // How many elements in set
- threshold int // when to expand
-
- currentPrime int // jump by 4 primes each expand or whatever
- initialBucketCapacity int
-}
-
-func (as *array2DHashSet) Each(f func(interface{}) bool) {
- if as.Len() < 1 {
- return
- }
-
- for _, bucket := range as.buckets {
- for _, o := range bucket {
- if o == nil {
- break
- }
- if !f(o) {
- return
- }
- }
- }
-}
-
-func (as *array2DHashSet) Values() []interface{} {
- if as.Len() < 1 {
- return nil
- }
-
- values := make([]interface{}, 0, as.Len())
- as.Each(func(i interface{}) bool {
- values = append(values, i)
- return true
- })
- return values
-}
-
-func (as *array2DHashSet) Contains(value interface{}) bool {
- return as.Get(value) != nil
-}
-
-func (as *array2DHashSet) Add(value interface{}) interface{} {
- if as.n > as.threshold {
- as.expand()
- }
- return as.innerAdd(value)
-}
-
-func (as *array2DHashSet) expand() {
- old := as.buckets
-
- as.currentPrime += 4
-
- var (
- newCapacity = len(as.buckets) << 1
- newTable = as.createBuckets(newCapacity)
- newBucketLengths = make([]int, len(newTable))
- )
-
- as.buckets = newTable
- as.threshold = int(float64(newCapacity) * _loadFactor)
-
- for _, bucket := range old {
- if bucket == nil {
- continue
- }
-
- for _, o := range bucket {
- if o == nil {
- break
- }
-
- b := as.getBuckets(o)
- bucketLength := newBucketLengths[b]
- var newBucket []interface{}
- if bucketLength == 0 {
- // new bucket
- newBucket = as.createBucket(as.initialBucketCapacity)
- newTable[b] = newBucket
- } else {
- newBucket = newTable[b]
- if bucketLength == len(newBucket) {
- // expand
- newBucketCopy := make([]interface{}, len(newBucket)<<1)
- copy(newBucketCopy[:bucketLength], newBucket)
- newBucket = newBucketCopy
- newTable[b] = newBucket
- }
- }
-
- newBucket[bucketLength] = o
- newBucketLengths[b]++
- }
- }
-}
-
-func (as *array2DHashSet) Len() int {
- return as.n
-}
-
-func (as *array2DHashSet) Get(o interface{}) interface{} {
- if o == nil {
- return nil
- }
-
- b := as.getBuckets(o)
- bucket := as.buckets[b]
- if bucket == nil { // no bucket
- return nil
- }
-
- for _, e := range bucket {
- if e == nil {
- return nil // empty slot; not there
- }
- if as.equalsFunction(e, o) {
- return e
- }
- }
-
- return nil
-}
-
-func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
- b := as.getBuckets(o)
-
- bucket := as.buckets[b]
-
- // new bucket
- if bucket == nil {
- bucket = as.createBucket(as.initialBucketCapacity)
- bucket[0] = o
-
- as.buckets[b] = bucket
- as.n++
- return o
- }
-
- // look for it in bucket
- for i := 0; i < len(bucket); i++ {
- existing := bucket[i]
- if existing == nil { // empty slot; not there, add.
- bucket[i] = o
- as.n++
- return o
- }
-
- if as.equalsFunction(existing, o) { // found existing, quit
- return existing
- }
- }
-
- // full bucket, expand and add to end
- oldLength := len(bucket)
- bucketCopy := make([]interface{}, oldLength<<1)
- copy(bucketCopy[:oldLength], bucket)
- bucket = bucketCopy
- as.buckets[b] = bucket
- bucket[oldLength] = o
- as.n++
- return o
-}
-
-func (as *array2DHashSet) getBuckets(value interface{}) int {
- hash := as.hashcodeFunction(value)
- return hash & (len(as.buckets) - 1)
-}
-
-func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
- return make([][]interface{}, cap)
-}
-
-func (as *array2DHashSet) createBucket(cap int) []interface{} {
- return make([]interface{}, cap)
-}
-
-func newArray2DHashSetWithCap(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
- initCap int,
- initBucketCap int,
-) *array2DHashSet {
- if hashcodeFunction == nil {
- hashcodeFunction = standardHashFunction
- }
-
- if equalsFunction == nil {
- equalsFunction = standardEqualsFunction
- }
-
- ret := &array2DHashSet{
- hashcodeFunction: hashcodeFunction,
- equalsFunction: equalsFunction,
-
- n: 0,
- threshold: int(math.Floor(_initalCapacity * _loadFactor)),
-
- currentPrime: 1,
- initialBucketCapacity: initBucketCap,
- }
-
- ret.buckets = ret.createBuckets(initCap)
- return ret
-}
-
-func newArray2DHashSet(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
-) *array2DHashSet {
- return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
-}
diff --git a/etcd/vendor/github.com/coreos/go-oidc/.gitignore b/etcd/vendor/github.com/coreos/go-oidc/.gitignore
deleted file mode 100644
index c96f2f47bc..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/bin
-/gopath
diff --git a/etcd/vendor/github.com/coreos/go-oidc/.travis.yml b/etcd/vendor/github.com/coreos/go-oidc/.travis.yml
deleted file mode 100644
index 6ff9dd9652..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-language: go
-
-go:
- - "1.9"
- - "1.10"
-
-install:
- - go get -v -t github.com/coreos/go-oidc/...
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/golang/lint/golint
-
-script:
- - ./test
-
-notifications:
- email: false
diff --git a/etcd/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md b/etcd/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md
deleted file mode 100644
index 6662073a84..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# How to Contribute
-
-CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
-GitHub pull requests. This document outlines some of the conventions on
-development workflow, commit message formatting, contact points and other
-resources to make it easier to get your contribution accepted.
-
-# Certificate of Origin
-
-By contributing to this project you agree to the Developer Certificate of
-Origin (DCO). This document was created by the Linux Kernel community and is a
-simple statement that you, as a contributor, have the legal right to make the
-contribution. See the [DCO](DCO) file for details.
-
-# Email and Chat
-
-The project currently uses the general CoreOS email list and IRC channel:
-- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
-- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
-
-Please avoid emailing maintainers found in the MAINTAINERS file directly. They
-are very busy and read the mailing lists.
-
-## Getting Started
-
-- Fork the repository on GitHub
-- Read the [README](README.md) for build and test instructions
-- Play with the project, submit bugs, submit patches!
-
-## Contribution Flow
-
-This is a rough outline of what a contributor's workflow looks like:
-
-- Create a topic branch from where you want to base your work (usually master).
-- Make commits of logical units.
-- Make sure your commit messages are in the proper format (see below).
-- Push your changes to a topic branch in your fork of the repository.
-- Make sure the tests pass, and add any new tests as appropriate.
-- Submit a pull request to the original repository.
-
-Thanks for your contributions!
-
-### Format of the Commit Message
-
-We follow a rough convention for commit messages that is designed to answer two
-questions: what changed and why. The subject line should feature the what and
-the body of the commit should describe the why.
-
-```
-scripts: add the test-cluster command
-
-this uses tmux to setup a test cluster that you can easily kill and
-start for debugging.
-
-Fixes #38
-```
-
-The format can be described more formally as follows:
-
-```
-:
-
-
-
-
-```
-
-The first line is the subject and should be no longer than 70 characters, the
-second line is always blank, and other lines should be wrapped at 80 characters.
-This allows the message to be easier to read on GitHub as well as in various
-git tools.
diff --git a/etcd/vendor/github.com/coreos/go-oidc/DCO b/etcd/vendor/github.com/coreos/go-oidc/DCO
deleted file mode 100644
index 716561d5d2..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/DCO
+++ /dev/null
@@ -1,36 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/etcd/vendor/github.com/coreos/go-oidc/LICENSE b/etcd/vendor/github.com/coreos/go-oidc/LICENSE
deleted file mode 100644
index e06d208186..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/etcd/vendor/github.com/coreos/go-oidc/MAINTAINERS b/etcd/vendor/github.com/coreos/go-oidc/MAINTAINERS
deleted file mode 100644
index 99bcaa3f58..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/MAINTAINERS
+++ /dev/null
@@ -1,3 +0,0 @@
-Eric Chiang (@ericchiang)
-Mike Danese (@mikedanese)
-Rithu Leena John (@rithujohn191)
diff --git a/etcd/vendor/github.com/coreos/go-oidc/NOTICE b/etcd/vendor/github.com/coreos/go-oidc/NOTICE
deleted file mode 100644
index b39ddfa5cb..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/etcd/vendor/github.com/coreos/go-oidc/README.md b/etcd/vendor/github.com/coreos/go-oidc/README.md
deleted file mode 100644
index 520d7c87f4..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# go-oidc
-
-[](https://godoc.org/github.com/coreos/go-oidc)
-[](https://travis-ci.org/coreos/go-oidc)
-
-## OpenID Connect support for Go
-
-This package enables OpenID Connect support for the [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) package.
-
-```go
-provider, err := oidc.NewProvider(ctx, "https://accounts.google.com")
-if err != nil {
- // handle error
-}
-
-// Configure an OpenID Connect aware OAuth2 client.
-oauth2Config := oauth2.Config{
- ClientID: clientID,
- ClientSecret: clientSecret,
- RedirectURL: redirectURL,
-
- // Discovery returns the OAuth2 endpoints.
- Endpoint: provider.Endpoint(),
-
- // "openid" is a required scope for OpenID Connect flows.
- Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
-}
-```
-
-OAuth2 redirects are unchanged.
-
-```go
-func handleRedirect(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusFound)
-}
-```
-
-The on responses, the provider can be used to verify ID Tokens.
-
-```go
-var verifier = provider.Verifier(&oidc.Config{ClientID: clientID})
-
-func handleOAuth2Callback(w http.ResponseWriter, r *http.Request) {
- // Verify state and errors.
-
- oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
- if err != nil {
- // handle error
- }
-
- // Extract the ID Token from OAuth2 token.
- rawIDToken, ok := oauth2Token.Extra("id_token").(string)
- if !ok {
- // handle missing token
- }
-
- // Parse and verify ID Token payload.
- idToken, err := verifier.Verify(ctx, rawIDToken)
- if err != nil {
- // handle error
- }
-
- // Extract custom claims
- var claims struct {
- Email string `json:"email"`
- Verified bool `json:"email_verified"`
- }
- if err := idToken.Claims(&claims); err != nil {
- // handle error
- }
-}
-```
diff --git a/etcd/vendor/github.com/coreos/go-oidc/code-of-conduct.md b/etcd/vendor/github.com/coreos/go-oidc/code-of-conduct.md
deleted file mode 100644
index a234f3609d..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/code-of-conduct.md
+++ /dev/null
@@ -1,61 +0,0 @@
-## CoreOS Community Code of Conduct
-
-### Contributor Code of Conduct
-
-As contributors and maintainers of this project, and in the interest of
-fostering an open and welcoming community, we pledge to respect all people who
-contribute through reporting issues, posting feature requests, updating
-documentation, submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project a harassment-free
-experience for everyone, regardless of level of experience, gender, gender
-identity and expression, sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing others' private information, such as physical or electronic addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently applying these
-principles to every aspect of managing this project. Project maintainers who do
-not follow or enforce the Code of Conduct may be permanently removed from the
-project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting a project maintainer, Brandon Philips
-, and/or Rithu John .
-
-This Code of Conduct is adapted from the Contributor Covenant
-(http://contributor-covenant.org), version 1.2.0, available at
-http://contributor-covenant.org/version/1/2/0/
-
-### CoreOS Events Code of Conduct
-
-CoreOS events are working conferences intended for professional networking and
-collaboration in the CoreOS community. Attendees are expected to behave
-according to professional standards and in accordance with their employer’s
-policies on appropriate workplace behavior.
-
-While at CoreOS events or related social networking opportunities, attendees
-should not engage in discriminatory or offensive speech or actions including
-but not limited to gender, sexuality, race, age, disability, or religion.
-Speakers should be especially aware of these concerns.
-
-CoreOS does not condone any statements by speakers contrary to these standards.
-CoreOS reserves the right to deny entrance and/or eject from an event (without
-refund) any individual found to be engaging in discriminatory or offensive
-speech or actions.
-
-Please bring any concerns to the immediate attention of designated on-site
-staff, Brandon Philips , and/or Rithu John .
diff --git a/etcd/vendor/github.com/coreos/go-oidc/jose.go b/etcd/vendor/github.com/coreos/go-oidc/jose.go
deleted file mode 100644
index f2e6bf4322..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/jose.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !golint
-
-// Don't lint this file. We don't want to have to add a comment to each constant.
-
-package oidc
-
-const (
- // JOSE asymmetric signing algorithm values as defined by RFC 7518
- //
- // see: https://tools.ietf.org/html/rfc7518#section-3.1
- RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
- RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
- RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
- ES256 = "ES256" // ECDSA using P-256 and SHA-256
- ES384 = "ES384" // ECDSA using P-384 and SHA-384
- ES512 = "ES512" // ECDSA using P-521 and SHA-512
- PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
- PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
- PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
-)
diff --git a/etcd/vendor/github.com/coreos/go-oidc/jwks.go b/etcd/vendor/github.com/coreos/go-oidc/jwks.go
deleted file mode 100644
index e6a82c8429..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/jwks.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package oidc
-
-import (
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "sync"
- "time"
-
- "github.com/pquerna/cachecontrol"
- jose "gopkg.in/square/go-jose.v2"
-)
-
-// keysExpiryDelta is the allowed clock skew between a client and the OpenID Connect
-// server.
-//
-// When keys expire, they are valid for this amount of time after.
-//
-// If the keys have not expired, and an ID Token claims it was signed by a key not in
-// the cache, if and only if the keys expire in this amount of time, the keys will be
-// updated.
-const keysExpiryDelta = 30 * time.Second
-
-// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
-// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
-// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
-// exposed for providers that don't support discovery or to prevent round trips to the
-// discovery URL.
-//
-// The returned KeySet is a long lived verifier that caches keys based on cache-control
-// headers. Reuse a common remote key set instead of creating new ones as needed.
-//
-// The behavior of the returned KeySet is undefined once the context is canceled.
-func NewRemoteKeySet(ctx context.Context, jwksURL string) KeySet {
- return newRemoteKeySet(ctx, jwksURL, time.Now)
-}
-
-func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *remoteKeySet {
- if now == nil {
- now = time.Now
- }
- return &remoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
-}
-
-type remoteKeySet struct {
- jwksURL string
- ctx context.Context
- now func() time.Time
-
- // guard all other fields
- mu sync.Mutex
-
- // inflight suppresses parallel execution of updateKeys and allows
- // multiple goroutines to wait for its result.
- inflight *inflight
-
- // A set of cached keys and their expiry.
- cachedKeys []jose.JSONWebKey
- expiry time.Time
-}
-
-// inflight is used to wait on some in-flight request from multiple goroutines.
-type inflight struct {
- doneCh chan struct{}
-
- keys []jose.JSONWebKey
- err error
-}
-
-func newInflight() *inflight {
- return &inflight{doneCh: make(chan struct{})}
-}
-
-// wait returns a channel that multiple goroutines can receive on. Once it returns
-// a value, the inflight request is done and result() can be inspected.
-func (i *inflight) wait() <-chan struct{} {
- return i.doneCh
-}
-
-// done can only be called by a single goroutine. It records the result of the
-// inflight request and signals other goroutines that the result is safe to
-// inspect.
-func (i *inflight) done(keys []jose.JSONWebKey, err error) {
- i.keys = keys
- i.err = err
- close(i.doneCh)
-}
-
-// result cannot be called until the wait() channel has returned a value.
-func (i *inflight) result() ([]jose.JSONWebKey, error) {
- return i.keys, i.err
-}
-
-func (r *remoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
- jws, err := jose.ParseSigned(jwt)
- if err != nil {
- return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
- }
- return r.verify(ctx, jws)
-}
-
-func (r *remoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
- // We don't support JWTs signed with multiple signatures.
- keyID := ""
- for _, sig := range jws.Signatures {
- keyID = sig.Header.KeyID
- break
- }
-
- keys, expiry := r.keysFromCache()
-
- // Don't check expiry yet. This optimizes for when the provider is unavailable.
- for _, key := range keys {
- if keyID == "" || key.KeyID == keyID {
- if payload, err := jws.Verify(&key); err == nil {
- return payload, nil
- }
- }
- }
-
- if !r.now().Add(keysExpiryDelta).After(expiry) {
- // Keys haven't expired, don't refresh.
- return nil, errors.New("failed to verify id token signature")
- }
-
- keys, err := r.keysFromRemote(ctx)
- if err != nil {
- return nil, fmt.Errorf("fetching keys %v", err)
- }
-
- for _, key := range keys {
- if keyID == "" || key.KeyID == keyID {
- if payload, err := jws.Verify(&key); err == nil {
- return payload, nil
- }
- }
- }
- return nil, errors.New("failed to verify id token signature")
-}
-
-func (r *remoteKeySet) keysFromCache() (keys []jose.JSONWebKey, expiry time.Time) {
- r.mu.Lock()
- defer r.mu.Unlock()
- return r.cachedKeys, r.expiry
-}
-
-// keysFromRemote syncs the key set from the remote set, records the values in the
-// cache, and returns the key set.
-func (r *remoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
- // Need to lock to inspect the inflight request field.
- r.mu.Lock()
- // If there's not a current inflight request, create one.
- if r.inflight == nil {
- r.inflight = newInflight()
-
- // This goroutine has exclusive ownership over the current inflight
- // request. It releases the resource by nil'ing the inflight field
- // once the goroutine is done.
- go func() {
- // Sync keys and finish inflight when that's done.
- keys, expiry, err := r.updateKeys()
-
- r.inflight.done(keys, err)
-
- // Lock to update the keys and indicate that there is no longer an
- // inflight request.
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if err == nil {
- r.cachedKeys = keys
- r.expiry = expiry
- }
-
- // Free inflight so a different request can run.
- r.inflight = nil
- }()
- }
- inflight := r.inflight
- r.mu.Unlock()
-
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-inflight.wait():
- return inflight.result()
- }
-}
-
-func (r *remoteKeySet) updateKeys() ([]jose.JSONWebKey, time.Time, error) {
- req, err := http.NewRequest("GET", r.jwksURL, nil)
- if err != nil {
- return nil, time.Time{}, fmt.Errorf("oidc: can't create request: %v", err)
- }
-
- resp, err := doRequest(r.ctx, req)
- if err != nil {
- return nil, time.Time{}, fmt.Errorf("oidc: get keys failed %v", err)
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, time.Time{}, fmt.Errorf("unable to read response body: %v", err)
- }
-
- if resp.StatusCode != http.StatusOK {
- return nil, time.Time{}, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
- }
-
- var keySet jose.JSONWebKeySet
- err = unmarshalResp(resp, body, &keySet)
- if err != nil {
- return nil, time.Time{}, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
- }
-
- // If the server doesn't provide cache control headers, assume the
- // keys expire immediately.
- expiry := r.now()
-
- _, e, err := cachecontrol.CachableResponse(req, resp, cachecontrol.Options{})
- if err == nil && e.After(expiry) {
- expiry = e
- }
- return keySet.Keys, expiry, nil
-}
diff --git a/etcd/vendor/github.com/coreos/go-oidc/oidc.go b/etcd/vendor/github.com/coreos/go-oidc/oidc.go
deleted file mode 100644
index 508b39d3c6..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/oidc.go
+++ /dev/null
@@ -1,385 +0,0 @@
-// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
-package oidc
-
-import (
- "context"
- "crypto/sha256"
- "crypto/sha512"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "hash"
- "io/ioutil"
- "mime"
- "net/http"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- jose "gopkg.in/square/go-jose.v2"
-)
-
-const (
- // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
- ScopeOpenID = "openid"
-
- // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
- // OAuth2 refresh tokens.
- //
- // Support for this scope differs between OpenID Connect providers. For instance
- // Google rejects it, favoring appending "access_type=offline" as part of the
- // authorization request instead.
- //
- // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
- ScopeOfflineAccess = "offline_access"
-)
-
-var (
- errNoAtHash = errors.New("id token did not have an access token hash")
- errInvalidAtHash = errors.New("access token hash does not match value in ID token")
-)
-
-// ClientContext returns a new Context that carries the provided HTTP client.
-//
-// This method sets the same context key used by the golang.org/x/oauth2 package,
-// so the returned context works for that package too.
-//
-// myClient := &http.Client{}
-// ctx := oidc.ClientContext(parentContext, myClient)
-//
-// // This will use the custom client
-// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
-//
-func ClientContext(ctx context.Context, client *http.Client) context.Context {
- return context.WithValue(ctx, oauth2.HTTPClient, client)
-}
-
-func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
- client := http.DefaultClient
- if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
- client = c
- }
- return client.Do(req.WithContext(ctx))
-}
-
-// Provider represents an OpenID Connect server's configuration.
-type Provider struct {
- issuer string
- authURL string
- tokenURL string
- userInfoURL string
-
- // Raw claims returned by the server.
- rawClaims []byte
-
- remoteKeySet KeySet
-}
-
-type cachedKeys struct {
- keys []jose.JSONWebKey
- expiry time.Time
-}
-
-type providerJSON struct {
- Issuer string `json:"issuer"`
- AuthURL string `json:"authorization_endpoint"`
- TokenURL string `json:"token_endpoint"`
- JWKSURL string `json:"jwks_uri"`
- UserInfoURL string `json:"userinfo_endpoint"`
-}
-
-// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
-//
-// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
-// or "https://login.salesforce.com".
-func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
- wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
- req, err := http.NewRequest("GET", wellKnown, nil)
- if err != nil {
- return nil, err
- }
- resp, err := doRequest(ctx, req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("unable to read response body: %v", err)
- }
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("%s: %s", resp.Status, body)
- }
-
- var p providerJSON
- err = unmarshalResp(resp, body, &p)
- if err != nil {
- return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
- }
-
- if p.Issuer != issuer {
- return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
- }
- return &Provider{
- issuer: p.Issuer,
- authURL: p.AuthURL,
- tokenURL: p.TokenURL,
- userInfoURL: p.UserInfoURL,
- rawClaims: body,
- remoteKeySet: NewRemoteKeySet(ctx, p.JWKSURL),
- }, nil
-}
-
-// Claims unmarshals raw fields returned by the server during discovery.
-//
-// var claims struct {
-// ScopesSupported []string `json:"scopes_supported"`
-// ClaimsSupported []string `json:"claims_supported"`
-// }
-//
-// if err := provider.Claims(&claims); err != nil {
-// // handle unmarshaling error
-// }
-//
-// For a list of fields defined by the OpenID Connect spec see:
-// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
-func (p *Provider) Claims(v interface{}) error {
- if p.rawClaims == nil {
- return errors.New("oidc: claims not set")
- }
- return json.Unmarshal(p.rawClaims, v)
-}
-
-// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
-func (p *Provider) Endpoint() oauth2.Endpoint {
- return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
-}
-
-// UserInfo represents the OpenID Connect userinfo claims.
-type UserInfo struct {
- Subject string `json:"sub"`
- Profile string `json:"profile"`
- Email string `json:"email"`
- EmailVerified bool `json:"email_verified"`
-
- claims []byte
-}
-
-// Claims unmarshals the raw JSON object claims into the provided object.
-func (u *UserInfo) Claims(v interface{}) error {
- if u.claims == nil {
- return errors.New("oidc: claims not set")
- }
- return json.Unmarshal(u.claims, v)
-}
-
-// UserInfo uses the token source to query the provider's user info endpoint.
-func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
- if p.userInfoURL == "" {
- return nil, errors.New("oidc: user info endpoint is not supported by this provider")
- }
-
- req, err := http.NewRequest("GET", p.userInfoURL, nil)
- if err != nil {
- return nil, fmt.Errorf("oidc: create GET request: %v", err)
- }
-
- token, err := tokenSource.Token()
- if err != nil {
- return nil, fmt.Errorf("oidc: get access token: %v", err)
- }
- token.SetAuthHeader(req)
-
- resp, err := doRequest(ctx, req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("%s: %s", resp.Status, body)
- }
-
- var userInfo UserInfo
- if err := json.Unmarshal(body, &userInfo); err != nil {
- return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
- }
- userInfo.claims = body
- return &userInfo, nil
-}
-
-// IDToken is an OpenID Connect extension that provides a predictable representation
-// of an authorization event.
-//
-// The ID Token only holds fields OpenID Connect requires. To access additional
-// claims returned by the server, use the Claims method.
-type IDToken struct {
- // The URL of the server which issued this token. OpenID Connect
- // requires this value always be identical to the URL used for
- // initial discovery.
- //
- // Note: Because of a known issue with Google Accounts' implementation
- // this value may differ when using Google.
- //
- // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
- Issuer string
-
- // The client ID, or set of client IDs, that this token is issued for. For
- // common uses, this is the client that initialized the auth flow.
- //
- // This package ensures the audience contains an expected value.
- Audience []string
-
- // A unique string which identifies the end user.
- Subject string
-
- // Expiry of the token. Ths package will not process tokens that have
- // expired unless that validation is explicitly turned off.
- Expiry time.Time
- // When the token was issued by the provider.
- IssuedAt time.Time
-
- // Initial nonce provided during the authentication redirect.
- //
- // This package does NOT provided verification on the value of this field
- // and it's the user's responsibility to ensure it contains a valid value.
- Nonce string
-
- // at_hash claim, if set in the ID token. Callers can verify an access token
- // that corresponds to the ID token using the VerifyAccessToken method.
- AccessTokenHash string
-
- // signature algorithm used for ID token, needed to compute a verification hash of an
- // access token
- sigAlgorithm string
-
- // Raw payload of the id_token.
- claims []byte
-
- // Map of distributed claim names to claim sources
- distributedClaims map[string]claimSource
-}
-
-// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
-//
-// idToken, err := idTokenVerifier.Verify(rawIDToken)
-// if err != nil {
-// // handle error
-// }
-// var claims struct {
-// Email string `json:"email"`
-// EmailVerified bool `json:"email_verified"`
-// }
-// if err := idToken.Claims(&claims); err != nil {
-// // handle error
-// }
-//
-func (i *IDToken) Claims(v interface{}) error {
- if i.claims == nil {
- return errors.New("oidc: claims not set")
- }
- return json.Unmarshal(i.claims, v)
-}
-
-// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
-// matches the hash in the id token. It returns an error if the hashes don't match.
-// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
-// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
-func (i *IDToken) VerifyAccessToken(accessToken string) error {
- if i.AccessTokenHash == "" {
- return errNoAtHash
- }
- var h hash.Hash
- switch i.sigAlgorithm {
- case RS256, ES256, PS256:
- h = sha256.New()
- case RS384, ES384, PS384:
- h = sha512.New384()
- case RS512, ES512, PS512:
- h = sha512.New()
- default:
- return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
- }
- h.Write([]byte(accessToken)) // hash documents that Write will never return an error
- sum := h.Sum(nil)[:h.Size()/2]
- actual := base64.RawURLEncoding.EncodeToString(sum)
- if actual != i.AccessTokenHash {
- return errInvalidAtHash
- }
- return nil
-}
-
-type idToken struct {
- Issuer string `json:"iss"`
- Subject string `json:"sub"`
- Audience audience `json:"aud"`
- Expiry jsonTime `json:"exp"`
- IssuedAt jsonTime `json:"iat"`
- NotBefore *jsonTime `json:"nbf"`
- Nonce string `json:"nonce"`
- AtHash string `json:"at_hash"`
- ClaimNames map[string]string `json:"_claim_names"`
- ClaimSources map[string]claimSource `json:"_claim_sources"`
-}
-
-type claimSource struct {
- Endpoint string `json:"endpoint"`
- AccessToken string `json:"access_token"`
-}
-
-type audience []string
-
-func (a *audience) UnmarshalJSON(b []byte) error {
- var s string
- if json.Unmarshal(b, &s) == nil {
- *a = audience{s}
- return nil
- }
- var auds []string
- if err := json.Unmarshal(b, &auds); err != nil {
- return err
- }
- *a = audience(auds)
- return nil
-}
-
-type jsonTime time.Time
-
-func (j *jsonTime) UnmarshalJSON(b []byte) error {
- var n json.Number
- if err := json.Unmarshal(b, &n); err != nil {
- return err
- }
- var unix int64
-
- if t, err := n.Int64(); err == nil {
- unix = t
- } else {
- f, err := n.Float64()
- if err != nil {
- return err
- }
- unix = int64(f)
- }
- *j = jsonTime(time.Unix(unix, 0))
- return nil
-}
-
-func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
- err := json.Unmarshal(body, &v)
- if err == nil {
- return nil
- }
- ct := r.Header.Get("Content-Type")
- mediaType, _, parseErr := mime.ParseMediaType(ct)
- if parseErr == nil && mediaType == "application/json" {
- return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
- }
- return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
-}
diff --git a/etcd/vendor/github.com/coreos/go-oidc/test b/etcd/vendor/github.com/coreos/go-oidc/test
deleted file mode 100644
index b262d0e75a..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/test
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# Filter out any files with a !golint build tag.
-LINTABLE=$( go list -tags=golint -f '
- {{- range $i, $file := .GoFiles -}}
- {{ $file }} {{ end }}
- {{ range $i, $file := .TestGoFiles -}}
- {{ $file }} {{ end }}' github.com/coreos/go-oidc )
-
-go test -v -i -race github.com/coreos/go-oidc/...
-go test -v -race github.com/coreos/go-oidc/...
-golint -set_exit_status $LINTABLE
-go vet github.com/coreos/go-oidc/...
-go build -v ./example/...
diff --git a/etcd/vendor/github.com/coreos/go-oidc/verify.go b/etcd/vendor/github.com/coreos/go-oidc/verify.go
deleted file mode 100644
index ff7555db77..0000000000
--- a/etcd/vendor/github.com/coreos/go-oidc/verify.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package oidc
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- jose "gopkg.in/square/go-jose.v2"
-)
-
-const (
- issuerGoogleAccounts = "https://accounts.google.com"
- issuerGoogleAccountsNoScheme = "accounts.google.com"
-)
-
-// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
-// of JSON web tokens. This is expected to be backed by a remote key set through
-// provider metadata discovery or an in-memory set of keys delivered out-of-band.
-type KeySet interface {
- // VerifySignature parses the JSON web token, verifies the signature, and returns
- // the raw payload. Header and claim fields are validated by other parts of the
- // package. For example, the KeySet does not need to check values such as signature
- // algorithm, issuer, and audience since the IDTokenVerifier validates these values
- // independently.
- //
- // If VerifySignature makes HTTP requests to verify the token, it's expected to
- // use any HTTP client associated with the context through ClientContext.
- VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
-}
-
-// IDTokenVerifier provides verification for ID Tokens.
-type IDTokenVerifier struct {
- keySet KeySet
- config *Config
- issuer string
-}
-
-// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
-//
-// It's easier to use provider discovery to construct an IDTokenVerifier than creating
-// one directly. This method is intended to be used with provider that don't support
-// metadata discovery, or avoiding round trips when the key set URL is already known.
-//
-// This constructor can be used to create a verifier directly using the issuer URL and
-// JSON Web Key Set URL without using discovery:
-//
-// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
-// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
-//
-// Since KeySet is an interface, this constructor can also be used to supply custom
-// public key sources. For example, if a user wanted to supply public keys out-of-band
-// and hold them statically in-memory:
-//
-// // Custom KeySet implementation.
-// keySet := newStatisKeySet(publicKeys...)
-//
-// // Verifier uses the custom KeySet implementation.
-// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config)
-//
-func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
- return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
-}
-
-// Config is the configuration for an IDTokenVerifier.
-type Config struct {
- // Expected audience of the token. For a majority of the cases this is expected to be
- // the ID of the client that initialized the login flow. It may occasionally differ if
- // the provider supports the authorizing party (azp) claim.
- //
- // If not provided, users must explicitly set SkipClientIDCheck.
- ClientID string
- // If specified, only this set of algorithms may be used to sign the JWT.
- //
- // Since many providers only support RS256, SupportedSigningAlgs defaults to this value.
- SupportedSigningAlgs []string
-
- // If true, no ClientID check performed. Must be true if ClientID field is empty.
- SkipClientIDCheck bool
- // If true, token expiry is not checked.
- SkipExpiryCheck bool
-
- // SkipIssuerCheck is intended for specialized cases where the the caller wishes to
- // defer issuer validation. When enabled, callers MUST independently verify the Token's
- // Issuer is a known good value.
- //
- // Mismatched issuers often indicate client mis-configuration. If mismatches are
- // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
- // this option.
- SkipIssuerCheck bool
-
- // Time function to check Token expiry. Defaults to time.Now
- Now func() time.Time
-}
-
-// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
-//
-// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
-// undefined once the Provider's context is canceled.
-func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
- return NewVerifier(p.issuer, p.remoteKeySet, config)
-}
-
-func parseJWT(p string) ([]byte, error) {
- parts := strings.Split(p, ".")
- if len(parts) < 2 {
- return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
- }
- payload, err := base64.RawURLEncoding.DecodeString(parts[1])
- if err != nil {
- return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
- }
- return payload, nil
-}
-
-func contains(sli []string, ele string) bool {
- for _, s := range sli {
- if s == ele {
- return true
- }
- }
- return false
-}
-
-// Returns the Claims from the distributed JWT token
-func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
- req, err := http.NewRequest("GET", src.Endpoint, nil)
- if err != nil {
- return nil, fmt.Errorf("malformed request: %v", err)
- }
- if src.AccessToken != "" {
- req.Header.Set("Authorization", "Bearer "+src.AccessToken)
- }
-
- resp, err := doRequest(ctx, req)
- if err != nil {
- return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("unable to read response body: %v", err)
- }
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
- }
-
- token, err := verifier.Verify(ctx, string(body))
- if err != nil {
- return nil, fmt.Errorf("malformed response body: %v", err)
- }
-
- return token.claims, nil
-}
-
-func parseClaim(raw []byte, name string, v interface{}) error {
- var parsed map[string]json.RawMessage
- if err := json.Unmarshal(raw, &parsed); err != nil {
- return err
- }
-
- val, ok := parsed[name]
- if !ok {
- return fmt.Errorf("claim doesn't exist: %s", name)
- }
-
- return json.Unmarshal([]byte(val), v)
-}
-
-// Verify parses a raw ID Token, verifies it's been signed by the provider, preforms
-// any additional checks depending on the Config, and returns the payload.
-//
-// Verify does NOT do nonce validation, which is the callers responsibility.
-//
-// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
-//
-// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
-// if err != nil {
-// // handle error
-// }
-//
-// // Extract the ID Token from oauth2 token.
-// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
-// if !ok {
-// // handle error
-// }
-//
-// token, err := verifier.Verify(ctx, rawIDToken)
-//
-func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
- jws, err := jose.ParseSigned(rawIDToken)
- if err != nil {
- return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
- }
-
- // Throw out tokens with invalid claims before trying to verify the token. This lets
- // us do cheap checks before possibly re-syncing keys.
- payload, err := parseJWT(rawIDToken)
- if err != nil {
- return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
- }
- var token idToken
- if err := json.Unmarshal(payload, &token); err != nil {
- return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
- }
-
- distributedClaims := make(map[string]claimSource)
-
- //step through the token to map claim names to claim sources"
- for cn, src := range token.ClaimNames {
- if src == "" {
- return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
- }
- s, ok := token.ClaimSources[src]
- if !ok {
- return nil, fmt.Errorf("oidc: source does not exist")
- }
- distributedClaims[cn] = s
- }
-
- t := &IDToken{
- Issuer: token.Issuer,
- Subject: token.Subject,
- Audience: []string(token.Audience),
- Expiry: time.Time(token.Expiry),
- IssuedAt: time.Time(token.IssuedAt),
- Nonce: token.Nonce,
- AccessTokenHash: token.AtHash,
- claims: payload,
- distributedClaims: distributedClaims,
- }
-
- // Check issuer.
- if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
- // Google sometimes returns "accounts.google.com" as the issuer claim instead of
- // the required "https://accounts.google.com". Detect this case and allow it only
- // for Google.
- //
- // We will not add hooks to let other providers go off spec like this.
- if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
- return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
- }
- }
-
- // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
- //
- // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
- if !v.config.SkipClientIDCheck {
- if v.config.ClientID != "" {
- if !contains(t.Audience, v.config.ClientID) {
- return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
- }
- } else {
- return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
- }
- }
-
- // If a SkipExpiryCheck is false, make sure token is not expired.
- if !v.config.SkipExpiryCheck {
- now := time.Now
- if v.config.Now != nil {
- now = v.config.Now
- }
- nowTime := now()
-
- if t.Expiry.Before(nowTime) {
- return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry)
- }
-
- // If nbf claim is provided in token, ensure that it is indeed in the past.
- if token.NotBefore != nil {
- nbfTime := time.Time(*token.NotBefore)
- leeway := 1 * time.Minute
-
- if nowTime.Add(leeway).Before(nbfTime) {
- return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
- }
- }
- }
-
- switch len(jws.Signatures) {
- case 0:
- return nil, fmt.Errorf("oidc: id token not signed")
- case 1:
- default:
- return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
- }
-
- sig := jws.Signatures[0]
- supportedSigAlgs := v.config.SupportedSigningAlgs
- if len(supportedSigAlgs) == 0 {
- supportedSigAlgs = []string{RS256}
- }
-
- if !contains(supportedSigAlgs, sig.Header.Algorithm) {
- return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
- }
-
- t.sigAlgorithm = sig.Header.Algorithm
-
- gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
- if err != nil {
- return nil, fmt.Errorf("failed to verify signature: %v", err)
- }
-
- // Ensure that the payload returned by the square actually matches the payload parsed earlier.
- if !bytes.Equal(gotPayload, payload) {
- return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
- }
-
- return t, nil
-}
-
-// Nonce returns an auth code option which requires the ID Token created by the
-// OpenID Connect provider to contain the specified nonce.
-func Nonce(nonce string) oauth2.AuthCodeOption {
- return oauth2.SetAuthURLParam("nonce", nonce)
-}
diff --git a/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go b/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
deleted file mode 100644
index ba4ae31f19..0000000000
--- a/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 Docker, Inc.
-// Copyright 2015-2018 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package daemon provides a Go implementation of the sd_notify protocol.
-// It can be used to inform systemd of service start-up completion, watchdog
-// events, and other status changes.
-//
-// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description
-package daemon
-
-import (
- "net"
- "os"
-)
-
-const (
- // SdNotifyReady tells the service manager that service startup is finished
- // or the service finished loading its configuration.
- SdNotifyReady = "READY=1"
-
- // SdNotifyStopping tells the service manager that the service is beginning
- // its shutdown.
- SdNotifyStopping = "STOPPING=1"
-
- // SdNotifyReloading tells the service manager that this service is
- // reloading its configuration. Note that you must call SdNotifyReady when
- // it completed reloading.
- SdNotifyReloading = "RELOADING=1"
-
- // SdNotifyWatchdog tells the service manager to update the watchdog
- // timestamp for the service.
- SdNotifyWatchdog = "WATCHDOG=1"
-)
-
-// SdNotify sends a message to the init daemon. It is common to ignore the error.
-// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
-// will be unconditionally unset.
-//
-// It returns one of the following:
-// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
-// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
-// (true, nil) - notification supported, data has been sent
-func SdNotify(unsetEnvironment bool, state string) (bool, error) {
- socketAddr := &net.UnixAddr{
- Name: os.Getenv("NOTIFY_SOCKET"),
- Net: "unixgram",
- }
-
- // NOTIFY_SOCKET not set
- if socketAddr.Name == "" {
- return false, nil
- }
-
- if unsetEnvironment {
- if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
- return false, err
- }
- }
-
- conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
- // Error connecting to NOTIFY_SOCKET
- if err != nil {
- return false, err
- }
- defer conn.Close()
-
- if _, err = conn.Write([]byte(state)); err != nil {
- return false, err
- }
- return true, nil
-}
diff --git a/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go b/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
deleted file mode 100644
index 7a0e0d3a51..0000000000
--- a/etcd/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2016 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package daemon
-
-import (
- "fmt"
- "os"
- "strconv"
- "time"
-)
-
-// SdWatchdogEnabled returns watchdog information for a service.
-// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every
-// time / 2.
-// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and
-// `WATCHDOG_PID` will be unconditionally unset.
-//
-// It returns one of the following:
-// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
-// (0, err) - an error happened (e.g. error converting time).
-// (time, nil) - watchdog is enabled and we can send ping.
-// time is delay before inactive service will be killed.
-func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
- wusec := os.Getenv("WATCHDOG_USEC")
- wpid := os.Getenv("WATCHDOG_PID")
- if unsetEnvironment {
- wusecErr := os.Unsetenv("WATCHDOG_USEC")
- wpidErr := os.Unsetenv("WATCHDOG_PID")
- if wusecErr != nil {
- return 0, wusecErr
- }
- if wpidErr != nil {
- return 0, wpidErr
- }
- }
-
- if wusec == "" {
- return 0, nil
- }
- s, err := strconv.Atoi(wusec)
- if err != nil {
- return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
- }
- if s <= 0 {
- return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
- }
- interval := time.Duration(s) * time.Microsecond
-
- if wpid == "" {
- return interval, nil
- }
- p, err := strconv.Atoi(wpid)
- if err != nil {
- return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
- }
- if os.Getpid() != p {
- return 0, nil
- }
-
- return interval, nil
-}
diff --git a/etcd/vendor/github.com/docker/distribution/LICENSE b/etcd/vendor/github.com/docker/distribution/LICENSE
deleted file mode 100644
index e06d208186..0000000000
--- a/etcd/vendor/github.com/docker/distribution/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/etcd/vendor/github.com/docker/distribution/digestset/set.go b/etcd/vendor/github.com/docker/distribution/digestset/set.go
deleted file mode 100644
index 71327dca72..0000000000
--- a/etcd/vendor/github.com/docker/distribution/digestset/set.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package digestset
-
-import (
- "errors"
- "sort"
- "strings"
- "sync"
-
- digest "github.com/opencontainers/go-digest"
-)
-
-var (
- // ErrDigestNotFound is used when a matching digest
- // could not be found in a set.
- ErrDigestNotFound = errors.New("digest not found")
-
- // ErrDigestAmbiguous is used when multiple digests
- // are found in a set. None of the matching digests
- // should be considered valid matches.
- ErrDigestAmbiguous = errors.New("ambiguous digest string")
-)
-
-// Set is used to hold a unique set of digests which
-// may be easily referenced by easily referenced by a string
-// representation of the digest as well as short representation.
-// The uniqueness of the short representation is based on other
-// digests in the set. If digests are omitted from this set,
-// collisions in a larger set may not be detected, therefore it
-// is important to always do short representation lookups on
-// the complete set of digests. To mitigate collisions, an
-// appropriately long short code should be used.
-type Set struct {
- mutex sync.RWMutex
- entries digestEntries
-}
-
-// NewSet creates an empty set of digests
-// which may have digests added.
-func NewSet() *Set {
- return &Set{
- entries: digestEntries{},
- }
-}
-
-// checkShortMatch checks whether two digests match as either whole
-// values or short values. This function does not test equality,
-// rather whether the second value could match against the first
-// value.
-func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
- if len(hex) == len(shortHex) {
- if hex != shortHex {
- return false
- }
- if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- } else if !strings.HasPrefix(hex, shortHex) {
- return false
- } else if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- return true
-}
-
-// Lookup looks for a digest matching the given string representation.
-// If no digests could be found ErrDigestNotFound will be returned
-// with an empty digest value. If multiple matches are found
-// ErrDigestAmbiguous will be returned with an empty digest value.
-func (dst *Set) Lookup(d string) (digest.Digest, error) {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- if len(dst.entries) == 0 {
- return "", ErrDigestNotFound
- }
- var (
- searchFunc func(int) bool
- alg digest.Algorithm
- hex string
- )
- dgst, err := digest.Parse(d)
- if err == digest.ErrDigestInvalidFormat {
- hex = d
- searchFunc = func(i int) bool {
- return dst.entries[i].val >= d
- }
- } else {
- hex = dgst.Hex()
- alg = dgst.Algorithm()
- searchFunc = func(i int) bool {
- if dst.entries[i].val == hex {
- return dst.entries[i].alg >= alg
- }
- return dst.entries[i].val >= hex
- }
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
- return "", ErrDigestNotFound
- }
- if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
- return dst.entries[idx].digest, nil
- }
- if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
- return "", ErrDigestAmbiguous
- }
-
- return dst.entries[idx].digest, nil
-}
-
-// Add adds the given digest to the set. An error will be returned
-// if the given digest is invalid. If the digest already exists in the
-// set, this operation will be a no-op.
-func (dst *Set) Add(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) {
- dst.entries = append(dst.entries, entry)
- return nil
- } else if dst.entries[idx].digest == d {
- return nil
- }
-
- entries := append(dst.entries, nil)
- copy(entries[idx+1:], entries[idx:len(entries)-1])
- entries[idx] = entry
- dst.entries = entries
- return nil
-}
-
-// Remove removes the given digest from the set. An err will be
-// returned if the given digest is invalid. If the digest does
-// not exist in the set, this operation will be a no-op.
-func (dst *Set) Remove(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- // Not found if idx is after or value at idx is not digest
- if idx == len(dst.entries) || dst.entries[idx].digest != d {
- return nil
- }
-
- entries := dst.entries
- copy(entries[idx:], entries[idx+1:])
- entries = entries[:len(entries)-1]
- dst.entries = entries
-
- return nil
-}
-
-// All returns all the digests in the set
-func (dst *Set) All() []digest.Digest {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- retValues := make([]digest.Digest, len(dst.entries))
- for i := range dst.entries {
- retValues[i] = dst.entries[i].digest
- }
-
- return retValues
-}
-
-// ShortCodeTable returns a map of Digest to unique short codes. The
-// length represents the minimum value, the maximum length may be the
-// entire value of digest if uniqueness cannot be achieved without the
-// full value. This function will attempt to make short codes as short
-// as possible to be unique.
-func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- m := make(map[digest.Digest]string, len(dst.entries))
- l := length
- resetIdx := 0
- for i := 0; i < len(dst.entries); i++ {
- var short string
- extended := true
- for extended {
- extended = false
- if len(dst.entries[i].val) <= l {
- short = dst.entries[i].digest.String()
- } else {
- short = dst.entries[i].val[:l]
- for j := i + 1; j < len(dst.entries); j++ {
- if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
- if j > resetIdx {
- resetIdx = j
- }
- extended = true
- } else {
- break
- }
- }
- if extended {
- l++
- }
- }
- }
- m[dst.entries[i].digest] = short
- if i >= resetIdx {
- l = length
- }
- }
- return m
-}
-
-type digestEntry struct {
- alg digest.Algorithm
- val string
- digest digest.Digest
-}
-
-type digestEntries []*digestEntry
-
-func (d digestEntries) Len() int {
- return len(d)
-}
-
-func (d digestEntries) Less(i, j int) bool {
- if d[i].val != d[j].val {
- return d[i].val < d[j].val
- }
- return d[i].alg < d[j].alg
-}
-
-func (d digestEntries) Swap(i, j int) {
- d[i], d[j] = d[j], d[i]
-}
diff --git a/etcd/vendor/github.com/docker/distribution/reference/helpers.go b/etcd/vendor/github.com/docker/distribution/reference/helpers.go
deleted file mode 100644
index 978df7eabb..0000000000
--- a/etcd/vendor/github.com/docker/distribution/reference/helpers.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package reference
-
-import "path"
-
-// IsNameOnly returns true if reference only contains a repo name.
-func IsNameOnly(ref Named) bool {
- if _, ok := ref.(NamedTagged); ok {
- return false
- }
- if _, ok := ref.(Canonical); ok {
- return false
- }
- return true
-}
-
-// FamiliarName returns the familiar name string
-// for the given named, familiarizing if needed.
-func FamiliarName(ref Named) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().Name()
- }
- return ref.Name()
-}
-
-// FamiliarString returns the familiar string representation
-// for the given reference, familiarizing if needed.
-func FamiliarString(ref Reference) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().String()
- }
- return ref.String()
-}
-
-// FamiliarMatch reports whether ref matches the specified pattern.
-// See https://godoc.org/path#Match for supported patterns.
-func FamiliarMatch(pattern string, ref Reference) (bool, error) {
- matched, err := path.Match(pattern, FamiliarString(ref))
- if namedRef, isNamed := ref.(Named); isNamed && !matched {
- matched, _ = path.Match(pattern, FamiliarName(namedRef))
- }
- return matched, err
-}
diff --git a/etcd/vendor/github.com/docker/distribution/reference/normalize.go b/etcd/vendor/github.com/docker/distribution/reference/normalize.go
deleted file mode 100644
index b3dfb7a6d7..0000000000
--- a/etcd/vendor/github.com/docker/distribution/reference/normalize.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/docker/distribution/digestset"
- "github.com/opencontainers/go-digest"
-)
-
-var (
- legacyDefaultDomain = "index.docker.io"
- defaultDomain = "docker.io"
- officialRepoName = "library"
- defaultTag = "latest"
-)
-
-// normalizedNamed represents a name which has been
-// normalized and has a familiar form. A familiar name
-// is what is used in Docker UI. An example normalized
-// name is "docker.io/library/ubuntu" and corresponding
-// familiar name of "ubuntu".
-type normalizedNamed interface {
- Named
- Familiar() Named
-}
-
-// ParseNormalizedNamed parses a string into a named reference
-// transforming a familiar name from Docker UI to a fully
-// qualified reference. If the value may be an identifier
-// use ParseAnyReference.
-func ParseNormalizedNamed(s string) (Named, error) {
- if ok := anchoredIdentifierRegexp.MatchString(s); ok {
- return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
- }
- domain, remainder := splitDockerDomain(s)
- var remoteName string
- if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
- remoteName = remainder[:tagSep]
- } else {
- remoteName = remainder
- }
- if strings.ToLower(remoteName) != remoteName {
- return nil, errors.New("invalid reference format: repository name must be lowercase")
- }
-
- ref, err := Parse(domain + "/" + remainder)
- if err != nil {
- return nil, err
- }
- named, isNamed := ref.(Named)
- if !isNamed {
- return nil, fmt.Errorf("reference %s has no name", ref.String())
- }
- return named, nil
-}
-
-// ParseDockerRef normalizes the image reference following the docker convention. This is added
-// mainly for backward compatibility.
-// The reference returned can only be either tagged or digested. For reference contains both tag
-// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
-// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
-// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
-func ParseDockerRef(ref string) (Named, error) {
- named, err := ParseNormalizedNamed(ref)
- if err != nil {
- return nil, err
- }
- if _, ok := named.(NamedTagged); ok {
- if canonical, ok := named.(Canonical); ok {
- // The reference is both tagged and digested, only
- // return digested.
- newNamed, err := WithName(canonical.Name())
- if err != nil {
- return nil, err
- }
- newCanonical, err := WithDigest(newNamed, canonical.Digest())
- if err != nil {
- return nil, err
- }
- return newCanonical, nil
- }
- }
- return TagNameOnly(named), nil
-}
-
-// splitDockerDomain splits a repository name to domain and remotename string.
-// If no valid domain is found, the default domain is used. Repository name
-// needs to be already validated before.
-func splitDockerDomain(name string) (domain, remainder string) {
- i := strings.IndexRune(name, '/')
- if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
- domain, remainder = defaultDomain, name
- } else {
- domain, remainder = name[:i], name[i+1:]
- }
- if domain == legacyDefaultDomain {
- domain = defaultDomain
- }
- if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
- remainder = officialRepoName + "/" + remainder
- }
- return
-}
-
-// familiarizeName returns a shortened version of the name familiar
-// to to the Docker UI. Familiar names have the default domain
-// "docker.io" and "library/" repository prefix removed.
-// For example, "docker.io/library/redis" will have the familiar
-// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
-// Returns a familiarized named only reference.
-func familiarizeName(named namedRepository) repository {
- repo := repository{
- domain: named.Domain(),
- path: named.Path(),
- }
-
- if repo.domain == defaultDomain {
- repo.domain = ""
- // Handle official repositories which have the pattern "library/"
- if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
- repo.path = split[1]
- }
- }
- return repo
-}
-
-func (r reference) Familiar() Named {
- return reference{
- namedRepository: familiarizeName(r.namedRepository),
- tag: r.tag,
- digest: r.digest,
- }
-}
-
-func (r repository) Familiar() Named {
- return familiarizeName(r)
-}
-
-func (t taggedReference) Familiar() Named {
- return taggedReference{
- namedRepository: familiarizeName(t.namedRepository),
- tag: t.tag,
- }
-}
-
-func (c canonicalReference) Familiar() Named {
- return canonicalReference{
- namedRepository: familiarizeName(c.namedRepository),
- digest: c.digest,
- }
-}
-
-// TagNameOnly adds the default tag "latest" to a reference if it only has
-// a repo name.
-func TagNameOnly(ref Named) Named {
- if IsNameOnly(ref) {
- namedTagged, err := WithTag(ref, defaultTag)
- if err != nil {
- // Default tag must be valid, to create a NamedTagged
- // type with non-validated input the WithTag function
- // should be used instead
- panic(err)
- }
- return namedTagged
- }
- return ref
-}
-
-// ParseAnyReference parses a reference string as a possible identifier,
-// full digest, or familiar name.
-func ParseAnyReference(ref string) (Reference, error) {
- if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
- return digestReference("sha256:" + ref), nil
- }
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
-
- return ParseNormalizedNamed(ref)
-}
-
-// ParseAnyReferenceWithSet parses a reference string as a possible short
-// identifier to be matched in a digest set, a full digest, or familiar name.
-func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
- if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
- dgst, err := ds.Lookup(ref)
- if err == nil {
- return digestReference(dgst), nil
- }
- } else {
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
- }
-
- return ParseNormalizedNamed(ref)
-}
diff --git a/etcd/vendor/github.com/docker/distribution/reference/reference.go b/etcd/vendor/github.com/docker/distribution/reference/reference.go
deleted file mode 100644
index 8c0c23b2fe..0000000000
--- a/etcd/vendor/github.com/docker/distribution/reference/reference.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Package reference provides a general type to represent any way of referencing images within the registry.
-// Its main purpose is to abstract tags and digests (content-addressable hash).
-//
-// Grammar
-//
-// reference := name [ ":" tag ] [ "@" digest ]
-// name := [domain '/'] path-component ['/' path-component]*
-// domain := domain-component ['.' domain-component]* [':' port-number]
-// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
-// port-number := /[0-9]+/
-// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
-// separator := /[_.]|__|[-]*/
-//
-// tag := /[\w][\w.-]{0,127}/
-//
-// digest := digest-algorithm ":" digest-hex
-// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
-// digest-algorithm-separator := /[+.-_]/
-// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
-// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
-//
-// identifier := /[a-f0-9]{64}/
-// short-identifier := /[a-f0-9]{6,64}/
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/opencontainers/go-digest"
-)
-
-const (
- // NameTotalLengthMax is the maximum total number of characters in a repository name.
- NameTotalLengthMax = 255
-)
-
-var (
- // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
- ErrReferenceInvalidFormat = errors.New("invalid reference format")
-
- // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
- ErrTagInvalidFormat = errors.New("invalid tag format")
-
- // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
- ErrDigestInvalidFormat = errors.New("invalid digest format")
-
- // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
- ErrNameContainsUppercase = errors.New("repository name must be lowercase")
-
- // ErrNameEmpty is returned for empty, invalid repository names.
- ErrNameEmpty = errors.New("repository name must have at least one component")
-
- // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
- ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
-
- // ErrNameNotCanonical is returned when a name is not canonical.
- ErrNameNotCanonical = errors.New("repository name must be canonical")
-)
-
-// Reference is an opaque object reference identifier that may include
-// modifiers such as a hostname, name, tag, and digest.
-type Reference interface {
- // String returns the full reference
- String() string
-}
-
-// Field provides a wrapper type for resolving correct reference types when
-// working with encoding.
-type Field struct {
- reference Reference
-}
-
-// AsField wraps a reference in a Field for encoding.
-func AsField(reference Reference) Field {
- return Field{reference}
-}
-
-// Reference unwraps the reference type from the field to
-// return the Reference object. This object should be
-// of the appropriate type to further check for different
-// reference types.
-func (f Field) Reference() Reference {
- return f.reference
-}
-
-// MarshalText serializes the field to byte text which
-// is the string of the reference.
-func (f Field) MarshalText() (p []byte, err error) {
- return []byte(f.reference.String()), nil
-}
-
-// UnmarshalText parses text bytes by invoking the
-// reference parser to ensure the appropriately
-// typed reference object is wrapped by field.
-func (f *Field) UnmarshalText(p []byte) error {
- r, err := Parse(string(p))
- if err != nil {
- return err
- }
-
- f.reference = r
- return nil
-}
-
-// Named is an object with a full name
-type Named interface {
- Reference
- Name() string
-}
-
-// Tagged is an object which has a tag
-type Tagged interface {
- Reference
- Tag() string
-}
-
-// NamedTagged is an object including a name and tag.
-type NamedTagged interface {
- Named
- Tag() string
-}
-
-// Digested is an object which has a digest
-// in which it can be referenced by
-type Digested interface {
- Reference
- Digest() digest.Digest
-}
-
-// Canonical reference is an object with a fully unique
-// name including a name with domain and digest
-type Canonical interface {
- Named
- Digest() digest.Digest
-}
-
-// namedRepository is a reference to a repository with a name.
-// A namedRepository has both domain and path components.
-type namedRepository interface {
- Named
- Domain() string
- Path() string
-}
-
-// Domain returns the domain part of the Named reference
-func Domain(named Named) string {
- if r, ok := named.(namedRepository); ok {
- return r.Domain()
- }
- domain, _ := splitDomain(named.Name())
- return domain
-}
-
-// Path returns the name without the domain part of the Named reference
-func Path(named Named) (name string) {
- if r, ok := named.(namedRepository); ok {
- return r.Path()
- }
- _, path := splitDomain(named.Name())
- return path
-}
-
-func splitDomain(name string) (string, string) {
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if len(match) != 3 {
- return "", name
- }
- return match[1], match[2]
-}
-
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-// DEPRECATED: Use Domain or Path
-func SplitHostname(named Named) (string, string) {
- if r, ok := named.(namedRepository); ok {
- return r.Domain(), r.Path()
- }
- return splitDomain(named.Name())
-}
-
-// Parse parses s and returns a syntactically valid Reference.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: Parse will not handle short digests.
-func Parse(s string) (Reference, error) {
- matches := ReferenceRegexp.FindStringSubmatch(s)
- if matches == nil {
- if s == "" {
- return nil, ErrNameEmpty
- }
- if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
- return nil, ErrNameContainsUppercase
- }
- return nil, ErrReferenceInvalidFormat
- }
-
- if len(matches[1]) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- var repo repository
-
- nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
- if len(nameMatch) == 3 {
- repo.domain = nameMatch[1]
- repo.path = nameMatch[2]
- } else {
- repo.domain = ""
- repo.path = matches[1]
- }
-
- ref := reference{
- namedRepository: repo,
- tag: matches[2],
- }
- if matches[3] != "" {
- var err error
- ref.digest, err = digest.Parse(matches[3])
- if err != nil {
- return nil, err
- }
- }
-
- r := getBestReferenceType(ref)
- if r == nil {
- return nil, ErrNameEmpty
- }
-
- return r, nil
-}
-
-// ParseNamed parses s and returns a syntactically valid reference implementing
-// the Named interface. The reference must have a name and be in the canonical
-// form, otherwise an error is returned.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: ParseNamed will not handle short digests.
-func ParseNamed(s string) (Named, error) {
- named, err := ParseNormalizedNamed(s)
- if err != nil {
- return nil, err
- }
- if named.String() != s {
- return nil, ErrNameNotCanonical
- }
- return named, nil
-}
-
-// WithName returns a named object representing the given string. If the input
-// is invalid ErrReferenceInvalidFormat will be returned.
-func WithName(name string) (Named, error) {
- if len(name) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if match == nil || len(match) != 3 {
- return nil, ErrReferenceInvalidFormat
- }
- return repository{
- domain: match[1],
- path: match[2],
- }, nil
-}
-
-// WithTag combines the name from "name" and the tag from "tag" to form a
-// reference incorporating both the name and the tag.
-func WithTag(name Named, tag string) (NamedTagged, error) {
- if !anchoredTagRegexp.MatchString(tag) {
- return nil, ErrTagInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if canonical, ok := name.(Canonical); ok {
- return reference{
- namedRepository: repo,
- tag: tag,
- digest: canonical.Digest(),
- }, nil
- }
- return taggedReference{
- namedRepository: repo,
- tag: tag,
- }, nil
-}
-
-// WithDigest combines the name from "name" and the digest from "digest" to form
-// a reference incorporating both the name and the digest.
-func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
- if !anchoredDigestRegexp.MatchString(digest.String()) {
- return nil, ErrDigestInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if tagged, ok := name.(Tagged); ok {
- return reference{
- namedRepository: repo,
- tag: tagged.Tag(),
- digest: digest,
- }, nil
- }
- return canonicalReference{
- namedRepository: repo,
- digest: digest,
- }, nil
-}
-
-// TrimNamed removes any tag or digest from the named reference.
-func TrimNamed(ref Named) Named {
- domain, path := SplitHostname(ref)
- return repository{
- domain: domain,
- path: path,
- }
-}
-
-func getBestReferenceType(ref reference) Reference {
- if ref.Name() == "" {
- // Allow digest only references
- if ref.digest != "" {
- return digestReference(ref.digest)
- }
- return nil
- }
- if ref.tag == "" {
- if ref.digest != "" {
- return canonicalReference{
- namedRepository: ref.namedRepository,
- digest: ref.digest,
- }
- }
- return ref.namedRepository
- }
- if ref.digest == "" {
- return taggedReference{
- namedRepository: ref.namedRepository,
- tag: ref.tag,
- }
- }
-
- return ref
-}
-
-type reference struct {
- namedRepository
- tag string
- digest digest.Digest
-}
-
-func (r reference) String() string {
- return r.Name() + ":" + r.tag + "@" + r.digest.String()
-}
-
-func (r reference) Tag() string {
- return r.tag
-}
-
-func (r reference) Digest() digest.Digest {
- return r.digest
-}
-
-type repository struct {
- domain string
- path string
-}
-
-func (r repository) String() string {
- return r.Name()
-}
-
-func (r repository) Name() string {
- if r.domain == "" {
- return r.path
- }
- return r.domain + "/" + r.path
-}
-
-func (r repository) Domain() string {
- return r.domain
-}
-
-func (r repository) Path() string {
- return r.path
-}
-
-type digestReference digest.Digest
-
-func (d digestReference) String() string {
- return digest.Digest(d).String()
-}
-
-func (d digestReference) Digest() digest.Digest {
- return digest.Digest(d)
-}
-
-type taggedReference struct {
- namedRepository
- tag string
-}
-
-func (t taggedReference) String() string {
- return t.Name() + ":" + t.tag
-}
-
-func (t taggedReference) Tag() string {
- return t.tag
-}
-
-type canonicalReference struct {
- namedRepository
- digest digest.Digest
-}
-
-func (c canonicalReference) String() string {
- return c.Name() + "@" + c.digest.String()
-}
-
-func (c canonicalReference) Digest() digest.Digest {
- return c.digest
-}
diff --git a/etcd/vendor/github.com/docker/distribution/reference/regexp.go b/etcd/vendor/github.com/docker/distribution/reference/regexp.go
deleted file mode 100644
index 7860349320..0000000000
--- a/etcd/vendor/github.com/docker/distribution/reference/regexp.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package reference
-
-import "regexp"
-
-var (
- // alphaNumericRegexp defines the alpha numeric atom, typically a
- // component of names. This only allows lower case characters and digits.
- alphaNumericRegexp = match(`[a-z0-9]+`)
-
- // separatorRegexp defines the separators allowed to be embedded in name
- // components. This allow one period, one or two underscore and multiple
- // dashes.
- separatorRegexp = match(`(?:[._]|__|[-]*)`)
-
- // nameComponentRegexp restricts registry path component names to start
- // with at least one letter or number, with following parts able to be
- // separated by one period, one or two underscore and multiple dashes.
- nameComponentRegexp = expression(
- alphaNumericRegexp,
- optional(repeated(separatorRegexp, alphaNumericRegexp)))
-
- // domainComponentRegexp restricts the registry domain component of a
- // repository name to start with a component as defined by DomainRegexp
- // and followed by an optional port.
- domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
-
- // DomainRegexp defines the structure of potential domain components
- // that may be part of image names. This is purposely a subset of what is
- // allowed by DNS to ensure backwards compatibility with Docker image
- // names.
- DomainRegexp = expression(
- domainComponentRegexp,
- optional(repeated(literal(`.`), domainComponentRegexp)),
- optional(literal(`:`), match(`[0-9]+`)))
-
- // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
- TagRegexp = match(`[\w][\w.-]{0,127}`)
-
- // anchoredTagRegexp matches valid tag names, anchored at the start and
- // end of the matched string.
- anchoredTagRegexp = anchored(TagRegexp)
-
- // DigestRegexp matches valid digests.
- DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
-
- // anchoredDigestRegexp matches valid digests, anchored at the start and
- // end of the matched string.
- anchoredDigestRegexp = anchored(DigestRegexp)
-
- // NameRegexp is the format for the name component of references. The
- // regexp has capturing groups for the domain and name part omitting
- // the separating forward slash from either.
- NameRegexp = expression(
- optional(DomainRegexp, literal(`/`)),
- nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp)))
-
- // anchoredNameRegexp is used to parse a name value, capturing the
- // domain and trailing components.
- anchoredNameRegexp = anchored(
- optional(capture(DomainRegexp), literal(`/`)),
- capture(nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp))))
-
- // ReferenceRegexp is the full supported format of a reference. The regexp
- // is anchored and has capturing groups for name, tag, and digest
- // components.
- ReferenceRegexp = anchored(capture(NameRegexp),
- optional(literal(":"), capture(TagRegexp)),
- optional(literal("@"), capture(DigestRegexp)))
-
- // IdentifierRegexp is the format for string identifier used as a
- // content addressable identifier using sha256. These identifiers
- // are like digests without the algorithm, since sha256 is used.
- IdentifierRegexp = match(`([a-f0-9]{64})`)
-
- // ShortIdentifierRegexp is the format used to represent a prefix
- // of an identifier. A prefix may be used to match a sha256 identifier
- // within a list of trusted identifiers.
- ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
-
- // anchoredIdentifierRegexp is used to check or match an
- // identifier value, anchored at start and end of string.
- anchoredIdentifierRegexp = anchored(IdentifierRegexp)
-
- // anchoredShortIdentifierRegexp is used to check if a value
- // is a possible identifier prefix, anchored at start and end
- // of string.
- anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
-)
-
-// match compiles the string to a regular expression.
-var match = regexp.MustCompile
-
-// literal compiles s into a literal regular expression, escaping any regexp
-// reserved characters.
-func literal(s string) *regexp.Regexp {
- re := match(regexp.QuoteMeta(s))
-
- if _, complete := re.LiteralPrefix(); !complete {
- panic("must be a literal")
- }
-
- return re
-}
-
-// expression defines a full expression, where each regular expression must
-// follow the previous.
-func expression(res ...*regexp.Regexp) *regexp.Regexp {
- var s string
- for _, re := range res {
- s += re.String()
- }
-
- return match(s)
-}
-
-// optional wraps the expression in a non-capturing group and makes the
-// production optional.
-func optional(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `?`)
-}
-
-// repeated wraps the regexp in a non-capturing group to get one or more
-// matches.
-func repeated(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `+`)
-}
-
-// group wraps the regexp in a non-capturing group.
-func group(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(?:` + expression(res...).String() + `)`)
-}
-
-// capture wraps the expression in a capturing group.
-func capture(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(` + expression(res...).String() + `)`)
-}
-
-// anchored anchors the regular expression by adding start and end delimiters.
-func anchored(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`^` + expression(res...).String() + `$`)
-}
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/.gitignore b/etcd/vendor/github.com/felixge/httpsnoop/.gitignore
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/.travis.yml b/etcd/vendor/github.com/felixge/httpsnoop/.travis.yml
deleted file mode 100644
index bfc421200d..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/etcd/vendor/github.com/felixge/httpsnoop/LICENSE.txt
deleted file mode 100644
index e028b46a9b..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/Makefile b/etcd/vendor/github.com/felixge/httpsnoop/Makefile
deleted file mode 100644
index 2d84889aed..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-.PHONY: ci generate clean
-
-ci: clean generate
- go test -v ./...
-
-generate:
- go generate .
-
-clean:
- rm -rf *_generated*.go
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/README.md b/etcd/vendor/github.com/felixge/httpsnoop/README.md
deleted file mode 100644
index ddcecd13e7..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/README.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# httpsnoop
-
-Package httpsnoop provides an easy way to capture http related metrics (i.e.
-response time, bytes written, and http status code) from your application's
-http.Handlers.
-
-Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
-which is also exposed for users interested in a more low-level API.
-
-[](https://godoc.org/github.com/felixge/httpsnoop)
-[](https://travis-ci.org/felixge/httpsnoop)
-
-## Usage Example
-
-```go
-// myH is your app's http handler, perhaps a http.ServeMux or similar.
-var myH http.Handler
-// wrappedH wraps myH in order to log every request.
-wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- m := httpsnoop.CaptureMetrics(myH, w, r)
- log.Printf(
- "%s %s (code=%d dt=%s written=%d)",
- r.Method,
- r.URL,
- m.Code,
- m.Duration,
- m.Written,
- )
-})
-http.ListenAndServe(":8080", wrappedH)
-```
-
-## Why this package exists
-
-Instrumenting an application's http.Handler is surprisingly difficult.
-
-However if you google for e.g. "capture ResponseWriter status code" you'll find
-lots of advise and code examples that suggest it to be a fairly trivial
-undertaking. Unfortunately everything I've seen so far has a high chance of
-breaking your application.
-
-The main problem is that a `http.ResponseWriter` often implements additional
-interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
-`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
-in your own struct that also implements the `http.ResponseWriter` interface
-will hide the additional interfaces mentioned above. This has a high change of
-introducing subtle bugs into any non-trivial application.
-
-Another approach I've seen people take is to return a struct that implements
-all of the interfaces above. However, that's also problematic, because it's
-difficult to fake some of these interfaces behaviors when the underlying
-`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
-because an application may choose to operate differently, merely because it
-detects the presence of these additional interfaces.
-
-This package solves this problem by checking which additional interfaces a
-`http.ResponseWriter` implements, returning a wrapped version implementing the
-exact same set of interfaces.
-
-Additionally this package properly handles edge cases such as `WriteHeader` not
-being called, or called more than once, as well as concurrent calls to
-`http.ResponseWriter` methods, and even calls happening after the wrapped
-`ServeHTTP` has already returned.
-
-Unfortunately this package is not perfect either. It's possible that it is
-still missing some interfaces provided by the go core (let me know if you find
-one), and it won't work for applications adding their own interfaces into the
-mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
-`http.ResponseWriter` and type-assert the result to its other interfaces.
-
-However, hopefully the explanation above has sufficiently scared you of rolling
-your own solution to this problem. httpsnoop may still break your application,
-but at least it tries to avoid it as much as possible.
-
-Anyway, the real problem here is that smuggling additional interfaces inside
-`http.ResponseWriter` is a problematic design choice, but it probably goes as
-deep as the Go language specification itself. But that's okay, I still prefer
-Go over the alternatives ;).
-
-## Performance
-
-```
-BenchmarkBaseline-8 20000 94912 ns/op
-BenchmarkCaptureMetrics-8 20000 95461 ns/op
-```
-
-As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
-overhead of ~500 ns per http request on my machine. However, the margin of
-error appears to be larger than that, therefor it should be reasonable to
-assume that the overhead introduced by `CaptureMetrics` is absolutely
-negligible.
-
-## License
-
-MIT
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/etcd/vendor/github.com/felixge/httpsnoop/capture_metrics.go
deleted file mode 100644
index b77cc7c009..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package httpsnoop
-
-import (
- "io"
- "net/http"
- "time"
-)
-
-// Metrics holds metrics captured from CaptureMetrics.
-type Metrics struct {
- // Code is the first http response code passed to the WriteHeader func of
- // the ResponseWriter. If no such call is made, a default code of 200 is
- // assumed instead.
- Code int
- // Duration is the time it took to execute the handler.
- Duration time.Duration
- // Written is the number of bytes successfully written by the Write or
- // ReadFrom function of the ResponseWriter. ResponseWriters may also write
- // data to their underlaying connection directly (e.g. headers), but those
- // are not tracked. Therefor the number of Written bytes will usually match
- // the size of the response body.
- Written int64
-}
-
-// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
-// returns the metrics it captured from it.
-func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
- return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
- hnd.ServeHTTP(ww, r)
- })
-}
-
-// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
-// resulting metrics. This is very similar to CaptureMetrics (which is just
-// sugar on top of this func), but is a more usable interface if your
-// application doesn't use the Go http.Handler interface.
-func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
- m := Metrics{Code: http.StatusOK}
- m.CaptureMetrics(w, fn)
- return m
-}
-
-// CaptureMetrics wraps w and calls fn with the wrapped w and updates
-// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
-// but allows one to customize starting Metrics object.
-func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
- var (
- start = time.Now()
- headerWritten bool
- hooks = Hooks{
- WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
- return func(code int) {
- next(code)
-
- if !headerWritten {
- m.Code = code
- headerWritten = true
- }
- }
- },
-
- Write: func(next WriteFunc) WriteFunc {
- return func(p []byte) (int, error) {
- n, err := next(p)
-
- m.Written += int64(n)
- headerWritten = true
- return n, err
- }
- },
-
- ReadFrom: func(next ReadFromFunc) ReadFromFunc {
- return func(src io.Reader) (int64, error) {
- n, err := next(src)
-
- headerWritten = true
- m.Written += n
- return n, err
- }
- },
- }
- )
-
- fn(Wrap(w, hooks))
- m.Duration += time.Since(start)
-}
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/docs.go b/etcd/vendor/github.com/felixge/httpsnoop/docs.go
deleted file mode 100644
index 203c35b3c6..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/docs.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Package httpsnoop provides an easy way to capture http related metrics (i.e.
-// response time, bytes written, and http status code) from your application's
-// http.Handlers.
-//
-// Doing this requires non-trivial wrapping of the http.ResponseWriter
-// interface, which is also exposed for users interested in a more low-level
-// API.
-package httpsnoop
-
-//go:generate go run codegen/main.go
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
deleted file mode 100644
index 31cbdfb8ef..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
-
-package httpsnoop
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-// HeaderFunc is part of the http.ResponseWriter interface.
-type HeaderFunc func() http.Header
-
-// WriteHeaderFunc is part of the http.ResponseWriter interface.
-type WriteHeaderFunc func(code int)
-
-// WriteFunc is part of the http.ResponseWriter interface.
-type WriteFunc func(b []byte) (int, error)
-
-// FlushFunc is part of the http.Flusher interface.
-type FlushFunc func()
-
-// CloseNotifyFunc is part of the http.CloseNotifier interface.
-type CloseNotifyFunc func() <-chan bool
-
-// HijackFunc is part of the http.Hijacker interface.
-type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
-
-// ReadFromFunc is part of the io.ReaderFrom interface.
-type ReadFromFunc func(src io.Reader) (int64, error)
-
-// PushFunc is part of the http.Pusher interface.
-type PushFunc func(target string, opts *http.PushOptions) error
-
-// Hooks defines a set of method interceptors for methods included in
-// http.ResponseWriter as well as some others. You can think of them as
-// middleware for the function calls they target. See Wrap for more details.
-type Hooks struct {
- Header func(HeaderFunc) HeaderFunc
- WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
- Write func(WriteFunc) WriteFunc
- Flush func(FlushFunc) FlushFunc
- CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
- Hijack func(HijackFunc) HijackFunc
- ReadFrom func(ReadFromFunc) ReadFromFunc
- Push func(PushFunc) PushFunc
-}
-
-// Wrap returns a wrapped version of w that provides the exact same interface
-// as w. Specifically if w implements any combination of:
-//
-// - http.Flusher
-// - http.CloseNotifier
-// - http.Hijacker
-// - io.ReaderFrom
-// - http.Pusher
-//
-// The wrapped version will implement the exact same combination. If no hooks
-// are set, the wrapped version also behaves exactly as w. Hooks targeting
-// methods not supported by w are ignored. Any other hooks will intercept the
-// method they target and may modify the call's arguments and/or return values.
-// The CaptureMetrics implementation serves as a working example for how the
-// hooks can be used.
-func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
- rw := &rw{w: w, h: hooks}
- _, i0 := w.(http.Flusher)
- _, i1 := w.(http.CloseNotifier)
- _, i2 := w.(http.Hijacker)
- _, i3 := w.(io.ReaderFrom)
- _, i4 := w.(http.Pusher)
- switch {
- // combination 1/32
- case !i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- }{rw, rw}
- // combination 2/32
- case !i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Pusher
- }{rw, rw, rw}
- // combination 3/32
- case !i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- }{rw, rw, rw}
- // combination 4/32
- case !i0 && !i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 5/32
- case !i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- }{rw, rw, rw}
- // combination 6/32
- case !i0 && !i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 7/32
- case !i0 && !i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 8/32
- case !i0 && !i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 9/32
- case !i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- }{rw, rw, rw}
- // combination 10/32
- case !i0 && i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 11/32
- case !i0 && i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 12/32
- case !i0 && i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 13/32
- case !i0 && i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 14/32
- case !i0 && i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 15/32
- case !i0 && i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 16/32
- case !i0 && i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 17/32
- case i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- }{rw, rw, rw}
- // combination 18/32
- case i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 19/32
- case i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 20/32
- case i0 && !i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 21/32
- case i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 22/32
- case i0 && !i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 23/32
- case i0 && !i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 24/32
- case i0 && !i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 25/32
- case i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- }{rw, rw, rw, rw}
- // combination 26/32
- case i0 && i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 27/32
- case i0 && i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 28/32
- case i0 && i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 29/32
- case i0 && i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw, rw}
- // combination 30/32
- case i0 && i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 31/32
- case i0 && i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw, rw}
- // combination 32/32
- case i0 && i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw, rw}
- }
- panic("unreachable")
-}
-
-type rw struct {
- w http.ResponseWriter
- h Hooks
-}
-
-func (w *rw) Unwrap() http.ResponseWriter {
- return w.w
-}
-
-func (w *rw) Header() http.Header {
- f := w.w.(http.ResponseWriter).Header
- if w.h.Header != nil {
- f = w.h.Header(f)
- }
- return f()
-}
-
-func (w *rw) WriteHeader(code int) {
- f := w.w.(http.ResponseWriter).WriteHeader
- if w.h.WriteHeader != nil {
- f = w.h.WriteHeader(f)
- }
- f(code)
-}
-
-func (w *rw) Write(b []byte) (int, error) {
- f := w.w.(http.ResponseWriter).Write
- if w.h.Write != nil {
- f = w.h.Write(f)
- }
- return f(b)
-}
-
-func (w *rw) Flush() {
- f := w.w.(http.Flusher).Flush
- if w.h.Flush != nil {
- f = w.h.Flush(f)
- }
- f()
-}
-
-func (w *rw) CloseNotify() <-chan bool {
- f := w.w.(http.CloseNotifier).CloseNotify
- if w.h.CloseNotify != nil {
- f = w.h.CloseNotify(f)
- }
- return f()
-}
-
-func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- f := w.w.(http.Hijacker).Hijack
- if w.h.Hijack != nil {
- f = w.h.Hijack(f)
- }
- return f()
-}
-
-func (w *rw) ReadFrom(src io.Reader) (int64, error) {
- f := w.w.(io.ReaderFrom).ReadFrom
- if w.h.ReadFrom != nil {
- f = w.h.ReadFrom(f)
- }
- return f(src)
-}
-
-func (w *rw) Push(target string, opts *http.PushOptions) error {
- f := w.w.(http.Pusher).Push
- if w.h.Push != nil {
- f = w.h.Push(f)
- }
- return f(target, opts)
-}
-
-type Unwrapper interface {
- Unwrap() http.ResponseWriter
-}
-
-// Unwrap returns the underlying http.ResponseWriter from within zero or more
-// layers of httpsnoop wrappers.
-func Unwrap(w http.ResponseWriter) http.ResponseWriter {
- if rw, ok := w.(Unwrapper); ok {
- // recurse until rw.Unwrap() returns a non-Unwrapper
- return Unwrap(rw.Unwrap())
- } else {
- return w
- }
-}
diff --git a/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
deleted file mode 100644
index ab99c07c7a..0000000000
--- a/etcd/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
-
-package httpsnoop
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-// HeaderFunc is part of the http.ResponseWriter interface.
-type HeaderFunc func() http.Header
-
-// WriteHeaderFunc is part of the http.ResponseWriter interface.
-type WriteHeaderFunc func(code int)
-
-// WriteFunc is part of the http.ResponseWriter interface.
-type WriteFunc func(b []byte) (int, error)
-
-// FlushFunc is part of the http.Flusher interface.
-type FlushFunc func()
-
-// CloseNotifyFunc is part of the http.CloseNotifier interface.
-type CloseNotifyFunc func() <-chan bool
-
-// HijackFunc is part of the http.Hijacker interface.
-type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
-
-// ReadFromFunc is part of the io.ReaderFrom interface.
-type ReadFromFunc func(src io.Reader) (int64, error)
-
-// Hooks defines a set of method interceptors for methods included in
-// http.ResponseWriter as well as some others. You can think of them as
-// middleware for the function calls they target. See Wrap for more details.
-type Hooks struct {
- Header func(HeaderFunc) HeaderFunc
- WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
- Write func(WriteFunc) WriteFunc
- Flush func(FlushFunc) FlushFunc
- CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
- Hijack func(HijackFunc) HijackFunc
- ReadFrom func(ReadFromFunc) ReadFromFunc
-}
-
-// Wrap returns a wrapped version of w that provides the exact same interface
-// as w. Specifically if w implements any combination of:
-//
-// - http.Flusher
-// - http.CloseNotifier
-// - http.Hijacker
-// - io.ReaderFrom
-//
-// The wrapped version will implement the exact same combination. If no hooks
-// are set, the wrapped version also behaves exactly as w. Hooks targeting
-// methods not supported by w are ignored. Any other hooks will intercept the
-// method they target and may modify the call's arguments and/or return values.
-// The CaptureMetrics implementation serves as a working example for how the
-// hooks can be used.
-func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
- rw := &rw{w: w, h: hooks}
- _, i0 := w.(http.Flusher)
- _, i1 := w.(http.CloseNotifier)
- _, i2 := w.(http.Hijacker)
- _, i3 := w.(io.ReaderFrom)
- switch {
- // combination 1/16
- case !i0 && !i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- }{rw, rw}
- // combination 2/16
- case !i0 && !i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- }{rw, rw, rw}
- // combination 3/16
- case !i0 && !i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- }{rw, rw, rw}
- // combination 4/16
- case !i0 && !i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 5/16
- case !i0 && i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- }{rw, rw, rw}
- // combination 6/16
- case !i0 && i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 7/16
- case !i0 && i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 8/16
- case !i0 && i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 9/16
- case i0 && !i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- }{rw, rw, rw}
- // combination 10/16
- case i0 && !i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 11/16
- case i0 && !i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 12/16
- case i0 && !i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 13/16
- case i0 && i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- }{rw, rw, rw, rw}
- // combination 14/16
- case i0 && i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 15/16
- case i0 && i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw, rw}
- // combination 16/16
- case i0 && i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw, rw}
- }
- panic("unreachable")
-}
-
-type rw struct {
- w http.ResponseWriter
- h Hooks
-}
-
-func (w *rw) Unwrap() http.ResponseWriter {
- return w.w
-}
-
-func (w *rw) Header() http.Header {
- f := w.w.(http.ResponseWriter).Header
- if w.h.Header != nil {
- f = w.h.Header(f)
- }
- return f()
-}
-
-func (w *rw) WriteHeader(code int) {
- f := w.w.(http.ResponseWriter).WriteHeader
- if w.h.WriteHeader != nil {
- f = w.h.WriteHeader(f)
- }
- f(code)
-}
-
-func (w *rw) Write(b []byte) (int, error) {
- f := w.w.(http.ResponseWriter).Write
- if w.h.Write != nil {
- f = w.h.Write(f)
- }
- return f(b)
-}
-
-func (w *rw) Flush() {
- f := w.w.(http.Flusher).Flush
- if w.h.Flush != nil {
- f = w.h.Flush(f)
- }
- f()
-}
-
-func (w *rw) CloseNotify() <-chan bool {
- f := w.w.(http.CloseNotifier).CloseNotify
- if w.h.CloseNotify != nil {
- f = w.h.CloseNotify(f)
- }
- return f()
-}
-
-func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- f := w.w.(http.Hijacker).Hijack
- if w.h.Hijack != nil {
- f = w.h.Hijack(f)
- }
- return f()
-}
-
-func (w *rw) ReadFrom(src io.Reader) (int64, error) {
- f := w.w.(io.ReaderFrom).ReadFrom
- if w.h.ReadFrom != nil {
- f = w.h.ReadFrom(f)
- }
- return f(src)
-}
-
-type Unwrapper interface {
- Unwrap() http.ResponseWriter
-}
-
-// Unwrap returns the underlying http.ResponseWriter from within zero or more
-// layers of httpsnoop wrappers.
-func Unwrap(w http.ResponseWriter) http.ResponseWriter {
- if rw, ok := w.(Unwrapper); ok {
- // recurse until rw.Unwrap() returns a non-Unwrapper
- return Unwrap(rw.Unwrap())
- } else {
- return w
- }
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/.editorconfig b/etcd/vendor/github.com/fsnotify/fsnotify/.editorconfig
deleted file mode 100644
index fad895851e..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/.editorconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-root = true
-
-[*.go]
-indent_style = tab
-indent_size = 4
-insert_final_newline = true
-
-[*.{yml,yaml}]
-indent_style = space
-indent_size = 2
-insert_final_newline = true
-trim_trailing_whitespace = true
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/.gitattributes b/etcd/vendor/github.com/fsnotify/fsnotify/.gitattributes
deleted file mode 100644
index 32f1001be0..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-go.sum linguist-generated
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/.gitignore b/etcd/vendor/github.com/fsnotify/fsnotify/.gitignore
deleted file mode 100644
index 1d89d85ce4..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# go test -c output
-*.test
-*.test.exe
-
-# Output of go build ./cmd/fsnotify
-/fsnotify
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/.mailmap b/etcd/vendor/github.com/fsnotify/fsnotify/.mailmap
deleted file mode 100644
index a04f2907fe..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/.mailmap
+++ /dev/null
@@ -1,2 +0,0 @@
-Chris Howey
-Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/etcd/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
deleted file mode 100644
index 77f9593bd5..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ /dev/null
@@ -1,470 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [Unreleased]
-
-Nothing yet.
-
-## [1.6.0] - 2022-10-13
-
-This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
-but not documented). It also increases the minimum Linux version to 2.6.32.
-
-### Additions
-
-- all: add `Event.Has()` and `Op.Has()` ([#477])
-
- This makes checking events a lot easier; for example:
-
- if event.Op&Write == Write && !(event.Op&Remove == Remove) {
- }
-
- Becomes:
-
- if event.Has(Write) && !event.Has(Remove) {
- }
-
-- all: add cmd/fsnotify ([#463])
-
- A command-line utility for testing and some examples.
-
-### Changes and fixes
-
-- inotify: don't ignore events for files that don't exist ([#260], [#470])
-
- Previously the inotify watcher would call `os.Lstat()` to check if a file
- still exists before emitting events.
-
- This was inconsistent with other platforms and resulted in inconsistent event
- reporting (e.g. when a file is quickly removed and re-created), and generally
- a source of confusion. It was added in 2013 to fix a memory leak that no
- longer exists.
-
-- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
- not watched ([#460])
-
-- inotify: replace epoll() with non-blocking inotify ([#434])
-
- Non-blocking inotify was not generally available at the time this library was
- written in 2014, but now it is. As a result, the minimum Linux version is
- bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
-
-- kqueue: don't check for events every 100ms ([#480])
-
- The watcher would wake up every 100ms, even when there was nothing to do. Now
- it waits until there is something to do.
-
-- macos: retry opening files on EINTR ([#475])
-
-- kqueue: skip unreadable files ([#479])
-
- kqueue requires a file descriptor for every file in a directory; this would
- fail if a file was unreadable by the current user. Now these files are simply
- skipped.
-
-- windows: fix renaming a watched directory if the parent is also watched ([#370])
-
-- windows: increase buffer size from 4K to 64K ([#485])
-
-- windows: close file handle on Remove() ([#288])
-
-- kqueue: put pathname in the error if watching a file fails ([#471])
-
-- inotify, windows: calling Close() more than once could race ([#465])
-
-- kqueue: improve Close() performance ([#233])
-
-- all: various documentation additions and clarifications.
-
-[#233]: https://github.com/fsnotify/fsnotify/pull/233
-[#260]: https://github.com/fsnotify/fsnotify/pull/260
-[#288]: https://github.com/fsnotify/fsnotify/pull/288
-[#370]: https://github.com/fsnotify/fsnotify/pull/370
-[#434]: https://github.com/fsnotify/fsnotify/pull/434
-[#460]: https://github.com/fsnotify/fsnotify/pull/460
-[#463]: https://github.com/fsnotify/fsnotify/pull/463
-[#465]: https://github.com/fsnotify/fsnotify/pull/465
-[#470]: https://github.com/fsnotify/fsnotify/pull/470
-[#471]: https://github.com/fsnotify/fsnotify/pull/471
-[#475]: https://github.com/fsnotify/fsnotify/pull/475
-[#477]: https://github.com/fsnotify/fsnotify/pull/477
-[#479]: https://github.com/fsnotify/fsnotify/pull/479
-[#480]: https://github.com/fsnotify/fsnotify/pull/480
-[#485]: https://github.com/fsnotify/fsnotify/pull/485
-
-## [1.5.4] - 2022-04-25
-
-* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
-* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
-* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
-
-## [1.5.3] - 2022-04-22
-
-* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
-
-## [1.5.2] - 2022-04-21
-
-* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
-* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
-* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
-* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
-* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
-
-## [1.5.1] - 2021-08-24
-
-* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
-
-## [1.5.0] - 2021-08-20
-
-* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
-* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
-* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
-* CI: Use GitHub Actions for CI and cover go 1.12-1.17
- [#378](https://github.com/fsnotify/fsnotify/pull/378)
- [#381](https://github.com/fsnotify/fsnotify/pull/381)
- [#385](https://github.com/fsnotify/fsnotify/pull/385)
-* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
-
-## [1.4.9] - 2020-03-11
-
-* Move example usage to the readme #329. This may resolve #328.
-
-## [1.4.8] - 2020-03-10
-
-* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
-* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
-* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
-* CI: Less verbosity (@nathany #267)
-* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
-* Tests: Check if channels are closed in the example (@alexeykazakov #244)
-* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
-* CI: Add windows to travis matrix (@cpuguy83 #284)
-* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
-* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
-* Linux: open files with close-on-exec (@linxiulei #273)
-* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
-* Project: Add go.mod (@nathany #309)
-* Project: Revise editor config (@nathany #309)
-* Project: Update copyright for 2019 (@nathany #309)
-* CI: Drop go1.8 from CI matrix (@nathany #309)
-* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
-
-## [1.4.7] - 2018-01-09
-
-* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
-* Tests: Fix missing verb on format string (thanks @rchiossi)
-* Linux: Fix deadlock in Remove (thanks @aarondl)
-* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
-* Docs: Moved FAQ into the README (thanks @vahe)
-* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
-* Docs: replace references to OS X with macOS
-
-## [1.4.2] - 2016-10-10
-
-* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
-
-## [1.4.1] - 2016-10-04
-
-* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
-
-## [1.4.0] - 2016-10-01
-
-* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
-
-## [1.3.1] - 2016-06-28
-
-* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
-
-## [1.3.0] - 2016-04-19
-
-* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
-
-## [1.2.10] - 2016-03-02
-
-* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
-
-## [1.2.9] - 2016-01-13
-
-kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
-
-## [1.2.8] - 2015-12-17
-
-* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
-* inotify: fix race in test
-* enable race detection for continuous integration (Linux, Mac, Windows)
-
-## [1.2.5] - 2015-10-17
-
-* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
-* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
-* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
-* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
-
-## [1.2.1] - 2015-10-14
-
-* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
-
-## [1.2.0] - 2015-02-08
-
-* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
-* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
-* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
-
-## [1.1.1] - 2015-02-05
-
-* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
-
-## [1.1.0] - 2014-12-12
-
-* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
- * add low-level functions
- * only need to store flags on directories
- * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
- * done can be an unbuffered channel
- * remove calls to os.NewSyscallError
-* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
-* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## [1.0.4] - 2014-09-07
-
-* kqueue: add dragonfly to the build tags.
-* Rename source code files, rearrange code so exported APIs are at the top.
-* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-
-## [1.0.3] - 2014-08-19
-
-* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
-
-## [1.0.2] - 2014-08-17
-
-* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-
-## [1.0.0] - 2014-08-15
-
-* [API] Remove AddWatch on Windows, use Add.
-* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
-* Minor updates based on feedback from golint.
-
-## dev / 2014-07-09
-
-* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
-* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
-
-## dev / 2014-07-04
-
-* kqueue: fix incorrect mutex used in Close()
-* Update example to demonstrate usage of Op.
-
-## dev / 2014-06-28
-
-* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
-* Fix for String() method on Event (thanks Alex Brainman)
-* Don't build on Plan 9 or Solaris (thanks @4ad)
-
-## dev / 2014-06-21
-
-* Events channel of type Event rather than *Event.
-* [internal] use syscall constants directly for inotify and kqueue.
-* [internal] kqueue: rename events to kevents and fileEvent to event.
-
-## dev / 2014-06-19
-
-* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
-* [internal] remove cookie from Event struct (unused).
-* [internal] Event struct has the same definition across every OS.
-* [internal] remove internal watch and removeWatch methods.
-
-## dev / 2014-06-12
-
-* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
-* [API] Pluralized channel names: Events and Errors.
-* [API] Renamed FileEvent struct to Event.
-* [API] Op constants replace methods like IsCreate().
-
-## dev / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## dev / 2014-05-23
-
-* [API] Remove current implementation of WatchFlags.
- * current implementation doesn't take advantage of OS for efficiency
- * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
- * no tests for the current implementation
- * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-
-## [0.9.3] - 2014-12-31
-
-* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
-
-## [0.9.2] - 2014-08-17
-
-* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
-
-## [0.9.1] - 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## [0.9.0] - 2014-01-17
-
-* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
-* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
-* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-
-## [0.8.12] - 2013-11-13
-
-* [API] Remove FD_SET and friends from Linux adapter
-
-## [0.8.11] - 2013-11-02
-
-* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
-
-## [0.8.10] - 2013-10-19
-
-* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
-* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
-* [Doc] specify OS-specific limits in README (thanks @debrando)
-
-## [0.8.9] - 2013-09-08
-
-* [Doc] Contributing (thanks @nathany)
-* [Doc] update package path in example code [#63][] (thanks @paulhammond)
-* [Doc] GoCI badge in README (Linux only) [#60][]
-* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
-
-## [0.8.8] - 2013-06-17
-
-* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-
-## [0.8.7] - 2013-06-03
-
-* [API] Make syscall flags internal
-* [Fix] inotify: ignore event changes
-* [Fix] race in symlink test [#45][] (reported by @srid)
-* [Fix] tests on Windows
-* lower case error messages
-
-## [0.8.6] - 2013-05-23
-
-* kqueue: Use EVT_ONLY flag on Darwin
-* [Doc] Update README with full example
-
-## [0.8.5] - 2013-05-09
-
-* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-
-## [0.8.4] - 2013-04-07
-
-* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-
-## [0.8.3] - 2013-03-13
-
-* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
-* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-
-## [0.8.2] - 2013-02-07
-
-* [Doc] add Authors
-* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-
-## [0.8.1] - 2013-01-09
-
-* [Fix] Windows path separators
-* [Doc] BSD License
-
-## [0.8.0] - 2012-11-09
-
-* kqueue: directory watching improvements (thanks @vmirage)
-* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
-* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-
-## [0.7.4] - 2012-10-09
-
-* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
-* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
-* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
-* [Fix] kqueue: modify after recreation of file
-
-## [0.7.3] - 2012-09-27
-
-* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
-* [Fix] kqueue: no longer get duplicate CREATE events
-
-## [0.7.2] - 2012-09-01
-
-* kqueue: events for created directories
-
-## [0.7.1] - 2012-07-14
-
-* [Fix] for renaming files
-
-## [0.7.0] - 2012-07-02
-
-* [Feature] FSNotify flags
-* [Fix] inotify: Added file name back to event path
-
-## [0.6.0] - 2012-06-06
-
-* kqueue: watch files after directory created (thanks @tmc)
-
-## [0.5.1] - 2012-05-22
-
-* [Fix] inotify: remove all watches before Close()
-
-## [0.5.0] - 2012-05-03
-
-* [API] kqueue: return errors during watch instead of sending over channel
-* kqueue: match symlink behavior on Linux
-* inotify: add `DELETE_SELF` (requested by @taralx)
-* [Fix] kqueue: handle EINTR (reported by @robfig)
-* [Doc] Godoc example [#1][] (thanks @davecheney)
-
-## [0.4.0] - 2012-03-30
-
-* Go 1 released: build with go tool
-* [Feature] Windows support using winfsnotify
-* Windows does not have attribute change notifications
-* Roll attribute notifications into IsModify
-
-## [0.3.0] - 2012-02-19
-
-* kqueue: add files when watch directory
-
-## [0.2.0] - 2011-12-30
-
-* update to latest Go weekly code
-
-## [0.1.0] - 2011-10-19
-
-* kqueue: add watch on file creation to match inotify
-* kqueue: create file event
-* inotify: ignore `IN_IGNORED` events
-* event String()
-* linux: common FileEvent functions
-* initial commit
-
-[#79]: https://github.com/howeyc/fsnotify/pull/79
-[#77]: https://github.com/howeyc/fsnotify/pull/77
-[#72]: https://github.com/howeyc/fsnotify/issues/72
-[#71]: https://github.com/howeyc/fsnotify/issues/71
-[#70]: https://github.com/howeyc/fsnotify/issues/70
-[#63]: https://github.com/howeyc/fsnotify/issues/63
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#60]: https://github.com/howeyc/fsnotify/issues/60
-[#59]: https://github.com/howeyc/fsnotify/issues/59
-[#49]: https://github.com/howeyc/fsnotify/issues/49
-[#45]: https://github.com/howeyc/fsnotify/issues/45
-[#40]: https://github.com/howeyc/fsnotify/issues/40
-[#36]: https://github.com/howeyc/fsnotify/issues/36
-[#33]: https://github.com/howeyc/fsnotify/issues/33
-[#29]: https://github.com/howeyc/fsnotify/issues/29
-[#25]: https://github.com/howeyc/fsnotify/issues/25
-[#24]: https://github.com/howeyc/fsnotify/issues/24
-[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/etcd/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
deleted file mode 100644
index ea379759d5..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ /dev/null
@@ -1,26 +0,0 @@
-Thank you for your interest in contributing to fsnotify! We try to review and
-merge PRs in a reasonable timeframe, but please be aware that:
-
-- To avoid "wasted" work, please discus changes on the issue tracker first. You
- can just send PRs, but they may end up being rejected for one reason or the
- other.
-
-- fsnotify is a cross-platform library, and changes must work reasonably well on
- all supported platforms.
-
-- Changes will need to be compatible; old code should still compile, and the
- runtime behaviour can't change in ways that are likely to lead to problems for
- users.
-
-Testing
--------
-Just `go test ./...` runs all the tests; the CI runs this on all supported
-platforms. Testing different platforms locally can be done with something like
-[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
-
-Use the `-short` flag to make the "stress test" run faster.
-
-
-[goon]: https://github.com/arp242/goon
-[Vagrant]: https://www.vagrantup.com/
-[integration_test.go]: /integration_test.go
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/LICENSE b/etcd/vendor/github.com/fsnotify/fsnotify/LICENSE
deleted file mode 100644
index fb03ade750..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright © 2012 The Go Authors. All rights reserved.
-Copyright © fsnotify Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice, this
- list of conditions and the following disclaimer in the documentation and/or
- other materials provided with the distribution.
-* Neither the name of Google Inc. nor the names of its contributors may be used
- to endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/README.md b/etcd/vendor/github.com/fsnotify/fsnotify/README.md
deleted file mode 100644
index d4e6080feb..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/README.md
+++ /dev/null
@@ -1,161 +0,0 @@
-fsnotify is a Go library to provide cross-platform filesystem notifications on
-Windows, Linux, macOS, and BSD systems.
-
-Go 1.16 or newer is required; the full documentation is at
-https://pkg.go.dev/github.com/fsnotify/fsnotify
-
-**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
-released version, whereas this README is for the last development version which
-may include additions/changes.**
-
----
-
-Platform support:
-
-| Adapter | OS | Status |
-| --------------------- | ---------------| -------------------------------------------------------------|
-| inotify | Linux 2.6.32+ | Supported |
-| kqueue | BSD, macOS | Supported |
-| ReadDirectoryChangesW | Windows | Supported |
-| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
-| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
-| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
-| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
-| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
-
-Linux and macOS should include Android and iOS, but these are currently untested.
-
-Usage
------
-A basic example:
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/fsnotify/fsnotify"
-)
-
-func main() {
- // Create new watcher.
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- log.Fatal(err)
- }
- defer watcher.Close()
-
- // Start listening for events.
- go func() {
- for {
- select {
- case event, ok := <-watcher.Events:
- if !ok {
- return
- }
- log.Println("event:", event)
- if event.Has(fsnotify.Write) {
- log.Println("modified file:", event.Name)
- }
- case err, ok := <-watcher.Errors:
- if !ok {
- return
- }
- log.Println("error:", err)
- }
- }
- }()
-
- // Add a path.
- err = watcher.Add("/tmp")
- if err != nil {
- log.Fatal(err)
- }
-
- // Block main goroutine forever.
- <-make(chan struct{})
-}
-```
-
-Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
-run with:
-
- % go run ./cmd/fsnotify
-
-FAQ
----
-### Will a file still be watched when it's moved to another directory?
-No, not unless you are watching the location it was moved to.
-
-### Are subdirectories watched too?
-No, you must add watches for any directory you want to watch (a recursive
-watcher is on the roadmap: [#18]).
-
-[#18]: https://github.com/fsnotify/fsnotify/issues/18
-
-### Do I have to watch the Error and Event channels in a goroutine?
-As of now, yes (you can read both channels in the same goroutine using `select`,
-you don't need a separate goroutine for both channels; see the example).
-
-### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
-fsnotify requires support from underlying OS to work. The current NFS and SMB
-protocols does not provide network level support for file notifications, and
-neither do the /proc and /sys virtual filesystems.
-
-This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
-
-[#9]: https://github.com/fsnotify/fsnotify/issues/9
-
-Platform-specific notes
------------------------
-### Linux
-When a file is removed a REMOVE event won't be emitted until all file
-descriptors are closed; it will emit a CHMOD instead:
-
- fp := os.Open("file")
- os.Remove("file") // CHMOD
- fp.Close() // REMOVE
-
-This is the event that inotify sends, so not much can be changed about this.
-
-The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
-the number of watches per user, and `fs.inotify.max_user_instances` specifies
-the maximum number of inotify instances per user. Every Watcher you create is an
-"instance", and every path you add is a "watch".
-
-These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
-`/proc/sys/fs/inotify/max_user_instances`
-
-To increase them you can use `sysctl` or write the value to proc file:
-
- # The default values on Linux 5.18
- sysctl fs.inotify.max_user_watches=124983
- sysctl fs.inotify.max_user_instances=128
-
-To make the changes persist on reboot edit `/etc/sysctl.conf` or
-`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
-distro's documentation):
-
- fs.inotify.max_user_watches=124983
- fs.inotify.max_user_instances=128
-
-Reaching the limit will result in a "no space left on device" or "too many open
-files" error.
-
-### kqueue (macOS, all BSD systems)
-kqueue requires opening a file descriptor for every file that's being watched;
-so if you're watching a directory with five files then that's six file
-descriptors. You will run in to your system's "max open files" limit faster on
-these platforms.
-
-The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
-control the maximum number of open files.
-
-### macOS
-Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
-workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
-have a native FSEvents implementation (see [#11]).
-
-[#11]: https://github.com/fsnotify/fsnotify/issues/11
-[#15]: https://github.com/fsnotify/fsnotify/issues/15
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/etcd/vendor/github.com/fsnotify/fsnotify/backend_fen.go
deleted file mode 100644
index 1a95ad8e7c..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/backend_fen.go
+++ /dev/null
@@ -1,162 +0,0 @@
-//go:build solaris
-// +build solaris
-
-package fsnotify
-
-import (
- "errors"
-)
-
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # macOS notes
-//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
-//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
- Events chan Event
-
- // Errors sends any errors.
- Errors chan error
-}
-
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- return nil
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
-//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- return nil
-}
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- return nil
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/etcd/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
deleted file mode 100644
index 54c77fbb0e..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
+++ /dev/null
@@ -1,459 +0,0 @@
-//go:build linux
-// +build linux
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # macOS notes
-//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
-//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
- Events chan Event
-
- // Errors sends any errors.
- Errors chan error
-
- // Store fd here as os.File.Read() will no longer return on close after
- // calling Fd(). See: https://github.com/golang/go/issues/26439
- fd int
- mu sync.Mutex // Map access
- inotifyFile *os.File
- watches map[string]*watch // Map of inotify watches (key: path)
- paths map[int]string // Map of watched paths (key: watch descriptor)
- done chan struct{} // Channel for sending a "quit message" to the reader goroutine
- doneResp chan struct{} // Channel to respond to Close
-}
-
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- // Create inotify fd
- // Need to set the FD to nonblocking mode in order for SetDeadline methods to work
- // Otherwise, blocking i/o operations won't terminate on close
- fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
- if fd == -1 {
- return nil, errno
- }
-
- w := &Watcher{
- fd: fd,
- inotifyFile: os.NewFile(uintptr(fd), ""),
- watches: make(map[string]*watch),
- paths: make(map[int]string),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan struct{}),
- doneResp: make(chan struct{}),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
- select {
- case w.Events <- e:
- return true
- case <-w.done:
- }
- return false
-}
-
-// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
- return true
- case <-w.done:
- return false
- }
-}
-
-func (w *Watcher) isClosed() bool {
- select {
- case <-w.done:
- return true
- default:
- return false
- }
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed() {
- w.mu.Unlock()
- return nil
- }
-
- // Send 'close' signal to goroutine, and set the Watcher to closed.
- close(w.done)
- w.mu.Unlock()
-
- // Causes any blocking reads to return with an error, provided the file
- // still supports deadline operations.
- err := w.inotifyFile.Close()
- if err != nil {
- return err
- }
-
- // Wait for goroutine to close
- <-w.doneResp
-
- return nil
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
-//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- name = filepath.Clean(name)
- if w.isClosed() {
- return errors.New("inotify instance already closed")
- }
-
- var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
- unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
- unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
-
- w.mu.Lock()
- defer w.mu.Unlock()
- watchEntry := w.watches[name]
- if watchEntry != nil {
- flags |= watchEntry.flags | unix.IN_MASK_ADD
- }
- wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
- if wd == -1 {
- return errno
- }
-
- if watchEntry == nil {
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- } else {
- watchEntry.wd = uint32(wd)
- watchEntry.flags = flags
- }
-
- return nil
-}
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
-
- // Fetch the watch.
- w.mu.Lock()
- defer w.mu.Unlock()
- watch, ok := w.watches[name]
-
- // Remove it from inotify.
- if !ok {
- return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
- }
-
- // We successfully removed the watch if InotifyRmWatch doesn't return an
- // error, we need to clean up our internal state to ensure it matches
- // inotify's kernel state.
- delete(w.paths, int(watch.wd))
- delete(w.watches, name)
-
- // inotify_rm_watch will return EINVAL if the file has been deleted;
- // the inotify will already have been removed.
- // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
- // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
- // so that EINVAL means that the wd is being rm_watch()ed or its file removed
- // by another thread and we have not received IN_IGNORE event.
- success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
- if success == -1 {
- // TODO: Perhaps it's not helpful to return an error here in every case;
- // The only two possible errors are:
- //
- // - EBADF, which happens when w.fd is not a valid file descriptor
- // of any kind.
- // - EINVAL, which is when fd is not an inotify descriptor or wd
- // is not a valid watch descriptor. Watch descriptors are
- // invalidated when they are removed explicitly or implicitly;
- // explicitly by inotify_rm_watch, implicitly when the file they
- // are watching is deleted.
- return errno
- }
-
- return nil
-}
-
-// WatchList returns all paths added with [Add] (and are not yet removed).
-func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- entries := make([]string, 0, len(w.watches))
- for pathname := range w.watches {
- entries = append(entries, pathname)
- }
-
- return entries
-}
-
-type watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
-// readEvents reads from the inotify file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
- defer func() {
- close(w.doneResp)
- close(w.Errors)
- close(w.Events)
- }()
-
- var (
- buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
- errno error // Syscall errno
- )
- for {
- // See if we have been closed.
- if w.isClosed() {
- return
- }
-
- n, err := w.inotifyFile.Read(buf[:])
- switch {
- case errors.Unwrap(err) == os.ErrClosed:
- return
- case err != nil:
- if !w.sendError(err) {
- return
- }
- continue
- }
-
- if n < unix.SizeofInotifyEvent {
- var err error
- if n == 0 {
- // If EOF is received. This should really never happen.
- err = io.EOF
- } else if n < 0 {
- // If an error occurred while reading.
- err = errno
- } else {
- // Read was too short.
- err = errors.New("notify: short read in readEvents()")
- }
- if !w.sendError(err) {
- return
- }
- continue
- }
-
- var offset uint32
- // We don't know how many events we just read into the buffer
- // While the offset points to at least one whole event...
- for offset <= uint32(n-unix.SizeofInotifyEvent) {
- var (
- // Point "raw" to the event in the buffer
- raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
- mask = uint32(raw.Mask)
- nameLen = uint32(raw.Len)
- )
-
- if mask&unix.IN_Q_OVERFLOW != 0 {
- if !w.sendError(ErrEventOverflow) {
- return
- }
- }
-
- // If the event happened to the watched directory or the watched file, the kernel
- // doesn't append the filename to the event, but we would like to always fill the
- // the "Name" field with a valid filename. We retrieve the path of the watch from
- // the "paths" map.
- w.mu.Lock()
- name, ok := w.paths[int(raw.Wd)]
- // IN_DELETE_SELF occurs when the file/directory being watched is removed.
- // This is a sign to clean up the maps, otherwise we are no longer in sync
- // with the inotify kernel state which has already deleted the watch
- // automatically.
- if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
- delete(w.paths, int(raw.Wd))
- delete(w.watches, name)
- }
- w.mu.Unlock()
-
- if nameLen > 0 {
- // Point "bytes" at the first byte of the filename
- bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
- // The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
- }
-
- event := w.newEvent(name, mask)
-
- // Send the events that are not ignored on the events channel
- if mask&unix.IN_IGNORED == 0 {
- if !w.sendEvent(event) {
- return
- }
- }
-
- // Move to the next event in the buffer
- offset += unix.SizeofInotifyEvent + nameLen
- }
- }
-}
-
-// newEvent returns an platform-independent Event based on an inotify mask.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
- e.Op |= Create
- }
- if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
- e.Op |= Remove
- }
- if mask&unix.IN_MODIFY == unix.IN_MODIFY {
- e.Op |= Write
- }
- if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
- e.Op |= Rename
- }
- if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/etcd/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
deleted file mode 100644
index 29087469bf..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
+++ /dev/null
@@ -1,707 +0,0 @@
-//go:build freebsd || openbsd || netbsd || dragonfly || darwin
-// +build freebsd openbsd netbsd dragonfly darwin
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
-
- "golang.org/x/sys/unix"
-)
-
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # macOS notes
-//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
-//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
- Events chan Event
-
- // Errors sends any errors.
- Errors chan error
-
- done chan struct{}
- kq int // File descriptor (as returned by the kqueue() syscall).
- closepipe [2]int // Pipe used for closing.
- mu sync.Mutex // Protects access to watcher data
- watches map[string]int // Watched file descriptors (key: path).
- watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
- userWatches map[string]struct{} // Watches added with Watcher.Add()
- dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
- paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
- fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
- isClosed bool // Set to true when Close() is first called
-}
-
-type pathInfo struct {
- name string
- isDir bool
-}
-
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- kq, closepipe, err := newKqueue()
- if err != nil {
- return nil, err
- }
-
- w := &Watcher{
- kq: kq,
- closepipe: closepipe,
- watches: make(map[string]int),
- watchesByDir: make(map[string]map[int]struct{}),
- dirFlags: make(map[string]uint32),
- paths: make(map[int]pathInfo),
- fileExists: make(map[string]struct{}),
- userWatches: make(map[string]struct{}),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan struct{}),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-// newKqueue creates a new kernel event queue and returns a descriptor.
-//
-// This registers a new event on closepipe, which will trigger an event when
-// it's closed. This way we can use kevent() without timeout/polling; without
-// the closepipe, it would block forever and we wouldn't be able to stop it at
-// all.
-func newKqueue() (kq int, closepipe [2]int, err error) {
- kq, err = unix.Kqueue()
- if kq == -1 {
- return kq, closepipe, err
- }
-
- // Register the close pipe.
- err = unix.Pipe(closepipe[:])
- if err != nil {
- unix.Close(kq)
- return kq, closepipe, err
- }
-
- // Register changes to listen on the closepipe.
- changes := make([]unix.Kevent_t, 1)
- // SetKevent converts int to the platform-specific types.
- unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
- unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
-
- ok, err := unix.Kevent(kq, changes, nil, nil)
- if ok == -1 {
- unix.Close(kq)
- unix.Close(closepipe[0])
- unix.Close(closepipe[1])
- return kq, closepipe, err
- }
- return kq, closepipe, nil
-}
-
-// Returns true if the event was sent, or false if watcher is closed.
-func (w *Watcher) sendEvent(e Event) bool {
- select {
- case w.Events <- e:
- return true
- case <-w.done:
- }
- return false
-}
-
-// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
- return true
- case <-w.done:
- }
- return false
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return nil
- }
- w.isClosed = true
-
- // copy paths to remove while locked
- pathsToRemove := make([]string, 0, len(w.watches))
- for name := range w.watches {
- pathsToRemove = append(pathsToRemove, name)
- }
- w.mu.Unlock() // Unlock before calling Remove, which also locks
- for _, name := range pathsToRemove {
- w.Remove(name)
- }
-
- // Send "quit" message to the reader goroutine.
- unix.Close(w.closepipe[1])
- close(w.done)
-
- return nil
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
-//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- w.mu.Lock()
- w.userWatches[name] = struct{}{}
- w.mu.Unlock()
- _, err := w.addWatch(name, noteAllEvents)
- return err
-}
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
- w.mu.Lock()
- watchfd, ok := w.watches[name]
- w.mu.Unlock()
- if !ok {
- return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
- }
-
- err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
- if err != nil {
- return err
- }
-
- unix.Close(watchfd)
-
- w.mu.Lock()
- isDir := w.paths[watchfd].isDir
- delete(w.watches, name)
- delete(w.userWatches, name)
-
- parentName := filepath.Dir(name)
- delete(w.watchesByDir[parentName], watchfd)
-
- if len(w.watchesByDir[parentName]) == 0 {
- delete(w.watchesByDir, parentName)
- }
-
- delete(w.paths, watchfd)
- delete(w.dirFlags, name)
- delete(w.fileExists, name)
- w.mu.Unlock()
-
- // Find all watched paths that are in this directory that are not external.
- if isDir {
- var pathsToRemove []string
- w.mu.Lock()
- for fd := range w.watchesByDir[name] {
- path := w.paths[fd]
- if _, ok := w.userWatches[path.name]; !ok {
- pathsToRemove = append(pathsToRemove, path.name)
- }
- }
- w.mu.Unlock()
- for _, name := range pathsToRemove {
- // Since these are internal, not much sense in propagating error
- // to the user, as that will just confuse them with an error about
- // a path they did not explicitly watch themselves.
- w.Remove(name)
- }
- }
-
- return nil
-}
-
-// WatchList returns all paths added with [Add] (and are not yet removed).
-func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- entries := make([]string, 0, len(w.userWatches))
- for pathname := range w.userWatches {
- entries = append(entries, pathname)
- }
-
- return entries
-}
-
-// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
-const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
-
-// addWatch adds name to the watched file set.
-// The flags are interpreted as described in kevent(2).
-// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
-func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
- var isDir bool
- // Make ./name and name equivalent
- name = filepath.Clean(name)
-
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return "", errors.New("kevent instance already closed")
- }
- watchfd, alreadyWatching := w.watches[name]
- // We already have a watch, but we can still override flags.
- if alreadyWatching {
- isDir = w.paths[watchfd].isDir
- }
- w.mu.Unlock()
-
- if !alreadyWatching {
- fi, err := os.Lstat(name)
- if err != nil {
- return "", err
- }
-
- // Don't watch sockets or named pipes
- if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
- return "", nil
- }
-
- // Follow Symlinks
- //
- // Linux can add unresolvable symlinks to the watch list without issue,
- // and Windows can't do symlinks period. To maintain consistency, we
- // will act like everything is fine if the link can't be resolved.
- // There will simply be no file events for broken symlinks. Hence the
- // returns of nil on errors.
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
- name, err = filepath.EvalSymlinks(name)
- if err != nil {
- return "", nil
- }
-
- w.mu.Lock()
- _, alreadyWatching = w.watches[name]
- w.mu.Unlock()
-
- if alreadyWatching {
- return name, nil
- }
-
- fi, err = os.Lstat(name)
- if err != nil {
- return "", nil
- }
- }
-
- // Retry on EINTR; open() can return EINTR in practice on macOS.
- // See #354, and go issues 11180 and 39237.
- for {
- watchfd, err = unix.Open(name, openMode, 0)
- if err == nil {
- break
- }
- if errors.Is(err, unix.EINTR) {
- continue
- }
-
- return "", err
- }
-
- isDir = fi.IsDir()
- }
-
- err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
- if err != nil {
- unix.Close(watchfd)
- return "", err
- }
-
- if !alreadyWatching {
- w.mu.Lock()
- parentName := filepath.Dir(name)
- w.watches[name] = watchfd
-
- watchesByDir, ok := w.watchesByDir[parentName]
- if !ok {
- watchesByDir = make(map[int]struct{}, 1)
- w.watchesByDir[parentName] = watchesByDir
- }
- watchesByDir[watchfd] = struct{}{}
-
- w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
- w.mu.Unlock()
- }
-
- if isDir {
- // Watch the directory if it has not been watched before,
- // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
- w.mu.Lock()
-
- watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
- (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
- // Store flags so this watch can be updated later
- w.dirFlags[name] = flags
- w.mu.Unlock()
-
- if watchDir {
- if err := w.watchDirectoryFiles(name); err != nil {
- return "", err
- }
- }
- }
- return name, nil
-}
-
-// readEvents reads from kqueue and converts the received kevents into
-// Event values that it sends down the Events channel.
-func (w *Watcher) readEvents() {
- defer func() {
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- unix.Close(w.closepipe[0])
- close(w.Events)
- close(w.Errors)
- }()
-
- eventBuffer := make([]unix.Kevent_t, 10)
- for closed := false; !closed; {
- kevents, err := w.read(eventBuffer)
- // EINTR is okay, the syscall was interrupted before timeout expired.
- if err != nil && err != unix.EINTR {
- if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
- closed = true
- }
- continue
- }
-
- // Flush the events we received to the Events channel
- for _, kevent := range kevents {
- var (
- watchfd = int(kevent.Ident)
- mask = uint32(kevent.Fflags)
- )
-
- // Shut down the loop when the pipe is closed, but only after all
- // other events have been processed.
- if watchfd == w.closepipe[0] {
- closed = true
- continue
- }
-
- w.mu.Lock()
- path := w.paths[watchfd]
- w.mu.Unlock()
-
- event := w.newEvent(path.name, mask)
-
- if path.isDir && !event.Has(Remove) {
- // Double check to make sure the directory exists. This can
- // happen when we do a rm -fr on a recursively watched folders
- // and we receive a modification event first but the folder has
- // been deleted and later receive the delete event.
- if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
- event.Op |= Remove
- }
- }
-
- if event.Has(Rename) || event.Has(Remove) {
- w.Remove(event.Name)
- w.mu.Lock()
- delete(w.fileExists, event.Name)
- w.mu.Unlock()
- }
-
- if path.isDir && event.Has(Write) && !event.Has(Remove) {
- w.sendDirectoryChangeEvents(event.Name)
- } else {
- if !w.sendEvent(event) {
- closed = true
- continue
- }
- }
-
- if event.Has(Remove) {
- // Look for a file that may have overwritten this.
- // For example, mv f1 f2 will delete f2, then create f2.
- if path.isDir {
- fileDir := filepath.Clean(event.Name)
- w.mu.Lock()
- _, found := w.watches[fileDir]
- w.mu.Unlock()
- if found {
- // make sure the directory exists before we watch for changes. When we
- // do a recursive watch and perform rm -fr, the parent directory might
- // have gone missing, ignore the missing directory and let the
- // upcoming delete event remove the watch from the parent directory.
- if _, err := os.Lstat(fileDir); err == nil {
- w.sendDirectoryChangeEvents(fileDir)
- }
- }
- } else {
- filePath := filepath.Clean(event.Name)
- if fileInfo, err := os.Lstat(filePath); err == nil {
- w.sendFileCreatedEventIfNew(filePath, fileInfo)
- }
- }
- }
- }
- }
-}
-
-// newEvent returns an platform-independent Event based on kqueue Fflags.
-func (w *Watcher) newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
- e.Op |= Remove
- }
- if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
- e.Op |= Write
- }
- if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
- e.Op |= Rename
- }
- if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-// watchDirectoryFiles to mimic inotify when adding a watch on a directory
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
- // Get all files
- files, err := ioutil.ReadDir(dirPath)
- if err != nil {
- return err
- }
-
- for _, fileInfo := range files {
- path := filepath.Join(dirPath, fileInfo.Name())
-
- cleanPath, err := w.internalWatch(path, fileInfo)
- if err != nil {
- // No permission to read the file; that's not a problem: just skip.
- // But do add it to w.fileExists to prevent it from being picked up
- // as a "new" file later (it still shows up in the directory
- // listing).
- switch {
- case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
- cleanPath = filepath.Clean(path)
- default:
- return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
- }
- }
-
- w.mu.Lock()
- w.fileExists[cleanPath] = struct{}{}
- w.mu.Unlock()
- }
-
- return nil
-}
-
-// Search the directory for new files and send an event for them.
-//
-// This functionality is to have the BSD watcher match the inotify, which sends
-// a create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dir string) {
- // Get all files
- files, err := ioutil.ReadDir(dir)
- if err != nil {
- if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
- return
- }
- }
-
- // Search for new files
- for _, fi := range files {
- err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
- if err != nil {
- return
- }
- }
-}
-
-// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
-func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
- w.mu.Lock()
- _, doesExist := w.fileExists[filePath]
- w.mu.Unlock()
- if !doesExist {
- if !w.sendEvent(Event{Name: filePath, Op: Create}) {
- return
- }
- }
-
- // like watchDirectoryFiles (but without doing another ReadDir)
- filePath, err = w.internalWatch(filePath, fileInfo)
- if err != nil {
- return err
- }
-
- w.mu.Lock()
- w.fileExists[filePath] = struct{}{}
- w.mu.Unlock()
-
- return nil
-}
-
-func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
- if fileInfo.IsDir() {
- // mimic Linux providing delete events for subdirectories
- // but preserve the flags used if currently watching subdirectory
- w.mu.Lock()
- flags := w.dirFlags[name]
- w.mu.Unlock()
-
- flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
- return w.addWatch(name, flags)
- }
-
- // watch file to mimic Linux inotify
- return w.addWatch(name, noteAllEvents)
-}
-
-// Register events with the queue.
-func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
- changes := make([]unix.Kevent_t, len(fds))
- for i, fd := range fds {
- // SetKevent converts int to the platform-specific types.
- unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
- changes[i].Fflags = fflags
- }
-
- // Register the events.
- success, err := unix.Kevent(w.kq, changes, nil, nil)
- if success == -1 {
- return err
- }
- return nil
-}
-
-// read retrieves pending events, or waits until an event occurs.
-func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
- n, err := unix.Kevent(w.kq, nil, events, nil)
- if err != nil {
- return nil, err
- }
- return events[0:n], nil
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/backend_other.go b/etcd/vendor/github.com/fsnotify/fsnotify/backend_other.go
deleted file mode 100644
index a9bb1c3c4d..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/backend_other.go
+++ /dev/null
@@ -1,66 +0,0 @@
-//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
-// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
-
-package fsnotify
-
-import (
- "fmt"
- "runtime"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct{}
-
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- return nil
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
-//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- return nil
-}
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- return nil
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/etcd/vendor/github.com/fsnotify/fsnotify/backend_windows.go
deleted file mode 100644
index ae392867c0..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/backend_windows.go
+++ /dev/null
@@ -1,746 +0,0 @@
-//go:build windows
-// +build windows
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-// Watcher watches a set of paths, delivering events on a channel.
-//
-// A watcher should not be copied (e.g. pass it by pointer, rather than by
-// value).
-//
-// # Linux notes
-//
-// When a file is removed a Remove event won't be emitted until all file
-// descriptors are closed, and deletes will always emit a Chmod. For example:
-//
-// fp := os.Open("file")
-// os.Remove("file") // Triggers Chmod
-// fp.Close() // Triggers Remove
-//
-// This is the event that inotify sends, so not much can be changed about this.
-//
-// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
-// for the number of watches per user, and fs.inotify.max_user_instances
-// specifies the maximum number of inotify instances per user. Every Watcher you
-// create is an "instance", and every path you add is a "watch".
-//
-// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
-// /proc/sys/fs/inotify/max_user_instances
-//
-// To increase them you can use sysctl or write the value to the /proc file:
-//
-// # Default values on Linux 5.18
-// sysctl fs.inotify.max_user_watches=124983
-// sysctl fs.inotify.max_user_instances=128
-//
-// To make the changes persist on reboot edit /etc/sysctl.conf or
-// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
-// your distro's documentation):
-//
-// fs.inotify.max_user_watches=124983
-// fs.inotify.max_user_instances=128
-//
-// Reaching the limit will result in a "no space left on device" or "too many open
-// files" error.
-//
-// # kqueue notes (macOS, BSD)
-//
-// kqueue requires opening a file descriptor for every file that's being watched;
-// so if you're watching a directory with five files then that's six file
-// descriptors. You will run in to your system's "max open files" limit faster on
-// these platforms.
-//
-// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
-// control the maximum number of open files, as well as /etc/login.conf on BSD
-// systems.
-//
-// # macOS notes
-//
-// Spotlight indexing on macOS can result in multiple events (see [#15]). A
-// temporary workaround is to add your folder(s) to the "Spotlight Privacy
-// Settings" until we have a native FSEvents implementation (see [#11]).
-//
-// [#11]: https://github.com/fsnotify/fsnotify/issues/11
-// [#15]: https://github.com/fsnotify/fsnotify/issues/15
-type Watcher struct {
- // Events sends the filesystem change events.
- //
- // fsnotify can send the following events; a "path" here can refer to a
- // file, directory, symbolic link, or special file like a FIFO.
- //
- // fsnotify.Create A new path was created; this may be followed by one
- // or more Write events if data also gets written to a
- // file.
- //
- // fsnotify.Remove A path was removed.
- //
- // fsnotify.Rename A path was renamed. A rename is always sent with the
- // old path as Event.Name, and a Create event will be
- // sent with the new name. Renames are only sent for
- // paths that are currently watched; e.g. moving an
- // unmonitored file into a monitored directory will
- // show up as just a Create. Similarly, renaming a file
- // to outside a monitored directory will show up as
- // only a Rename.
- //
- // fsnotify.Write A file or named pipe was written to. A Truncate will
- // also trigger a Write. A single "write action"
- // initiated by the user may show up as one or multiple
- // writes, depending on when the system syncs things to
- // disk. For example when compiling a large Go program
- // you may get hundreds of Write events, so you
- // probably want to wait until you've stopped receiving
- // them (see the dedup example in cmd/fsnotify).
- //
- // fsnotify.Chmod Attributes were changed. On Linux this is also sent
- // when a file is removed (or more accurately, when a
- // link to an inode is removed). On kqueue it's sent
- // and on kqueue when a file is truncated. On Windows
- // it's never sent.
- Events chan Event
-
- // Errors sends any errors.
- Errors chan error
-
- port windows.Handle // Handle to completion port
- input chan *input // Inputs to the reader are sent on this channel
- quit chan chan<- error
-
- mu sync.Mutex // Protects access to watches, isClosed
- watches watchMap // Map of watches (key: i-number)
- isClosed bool // Set to true when Close() is first called
-}
-
-// NewWatcher creates a new Watcher.
-func NewWatcher() (*Watcher, error) {
- port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
- if err != nil {
- return nil, os.NewSyscallError("CreateIoCompletionPort", err)
- }
- w := &Watcher{
- port: port,
- watches: make(watchMap),
- input: make(chan *input, 1),
- Events: make(chan Event, 50),
- Errors: make(chan error),
- quit: make(chan chan<- error, 1),
- }
- go w.readEvents()
- return w, nil
-}
-
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
- if mask == 0 {
- return false
- }
-
- event := w.newEvent(name, uint32(mask))
- select {
- case ch := <-w.quit:
- w.quit <- ch
- case w.Events <- event:
- }
- return true
-}
-
-// Returns true if the error was sent, or false if watcher is closed.
-func (w *Watcher) sendError(err error) bool {
- select {
- case w.Errors <- err:
- return true
- case <-w.quit:
- }
- return false
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return nil
- }
- w.isClosed = true
- w.mu.Unlock()
-
- // Send "quit" message to the reader goroutine
- ch := make(chan error)
- w.quit <- ch
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-ch
-}
-
-// Add starts monitoring the path for changes.
-//
-// A path can only be watched once; attempting to watch it more than once will
-// return an error. Paths that do not yet exist on the filesystem cannot be
-// added. A watch will be automatically removed if the path is deleted.
-//
-// A path will remain watched if it gets renamed to somewhere else on the same
-// filesystem, but the monitor will get removed if the path gets deleted and
-// re-created, or if it's moved to a different filesystem.
-//
-// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
-// filesystems (/proc, /sys, etc.) generally don't work.
-//
-// # Watching directories
-//
-// All files in a directory are monitored, including new files that are created
-// after the watcher is started. Subdirectories are not watched (i.e. it's
-// non-recursive).
-//
-// # Watching files
-//
-// Watching individual files (rather than directories) is generally not
-// recommended as many tools update files atomically. Instead of "just" writing
-// to the file a temporary file will be written to first, and if successful the
-// temporary file is moved to to destination removing the original, or some
-// variant thereof. The watcher on the original file is now lost, as it no
-// longer exists.
-//
-// Instead, watch the parent directory and use Event.Name to filter out files
-// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
-func (w *Watcher) Add(name string) error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return errors.New("watcher already closed")
- }
- w.mu.Unlock()
-
- in := &input{
- op: opAddWatch,
- path: filepath.Clean(name),
- flags: sysFSALLEVENTS,
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-// Remove stops monitoring the path for changes.
-//
-// Directories are always removed non-recursively. For example, if you added
-// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
-//
-// Removing a path that has not yet been added returns [ErrNonExistentWatch].
-func (w *Watcher) Remove(name string) error {
- in := &input{
- op: opRemoveWatch,
- path: filepath.Clean(name),
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-// WatchList returns all paths added with [Add] (and are not yet removed).
-func (w *Watcher) WatchList() []string {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- entries := make([]string, 0, len(w.watches))
- for _, entry := range w.watches {
- for _, watchEntry := range entry {
- entries = append(entries, watchEntry.path)
- }
- }
-
- return entries
-}
-
-// These options are from the old golang.org/x/exp/winfsnotify, where you could
-// add various options to the watch. This has long since been removed.
-//
-// The "sys" in the name is misleading as they're not part of any "system".
-//
-// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
-const (
- sysFSALLEVENTS = 0xfff
- sysFSATTRIB = 0x4
- sysFSCREATE = 0x100
- sysFSDELETE = 0x200
- sysFSDELETESELF = 0x400
- sysFSMODIFY = 0x2
- sysFSMOVE = 0xc0
- sysFSMOVEDFROM = 0x40
- sysFSMOVEDTO = 0x80
- sysFSMOVESELF = 0x800
- sysFSIGNORED = 0x8000
-)
-
-func (w *Watcher) newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
- e.Op |= Create
- }
- if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
- e.Op |= Remove
- }
- if mask&sysFSMODIFY == sysFSMODIFY {
- e.Op |= Write
- }
- if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
- e.Op |= Rename
- }
- if mask&sysFSATTRIB == sysFSATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-const (
- opAddWatch = iota
- opRemoveWatch
-)
-
-const (
- provisional uint64 = 1 << (32 + iota)
-)
-
-type input struct {
- op int
- path string
- flags uint32
- reply chan error
-}
-
-type inode struct {
- handle windows.Handle
- volume uint32
- index uint64
-}
-
-type watch struct {
- ov windows.Overlapped
- ino *inode // i-number
- path string // Directory path
- mask uint64 // Directory itself is being watched with these notify flags
- names map[string]uint64 // Map of names being watched and their notify flags
- rename string // Remembers the old name while renaming a file
- buf [65536]byte // 64K buffer
-}
-
-type (
- indexMap map[uint64]*watch
- watchMap map[uint32]indexMap
-)
-
-func (w *Watcher) wakeupReader() error {
- err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
- if err != nil {
- return os.NewSyscallError("PostQueuedCompletionStatus", err)
- }
- return nil
-}
-
-func (w *Watcher) getDir(pathname string) (dir string, err error) {
- attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
- if err != nil {
- return "", os.NewSyscallError("GetFileAttributes", err)
- }
- if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
- dir = pathname
- } else {
- dir, _ = filepath.Split(pathname)
- dir = filepath.Clean(dir)
- }
- return
-}
-
-func (w *Watcher) getIno(path string) (ino *inode, err error) {
- h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
- windows.FILE_LIST_DIRECTORY,
- windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
- nil, windows.OPEN_EXISTING,
- windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
- if err != nil {
- return nil, os.NewSyscallError("CreateFile", err)
- }
-
- var fi windows.ByHandleFileInformation
- err = windows.GetFileInformationByHandle(h, &fi)
- if err != nil {
- windows.CloseHandle(h)
- return nil, os.NewSyscallError("GetFileInformationByHandle", err)
- }
- ino = &inode{
- handle: h,
- volume: fi.VolumeSerialNumber,
- index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
- }
- return ino, nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) get(ino *inode) *watch {
- if i := m[ino.volume]; i != nil {
- return i[ino.index]
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) set(ino *inode, watch *watch) {
- i := m[ino.volume]
- if i == nil {
- i = make(indexMap)
- m[ino.volume] = i
- }
- i[ino.index] = watch
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
- dir, err := w.getDir(pathname)
- if err != nil {
- return err
- }
-
- ino, err := w.getIno(dir)
- if err != nil {
- return err
- }
- w.mu.Lock()
- watchEntry := w.watches.get(ino)
- w.mu.Unlock()
- if watchEntry == nil {
- _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
- if err != nil {
- windows.CloseHandle(ino.handle)
- return os.NewSyscallError("CreateIoCompletionPort", err)
- }
- watchEntry = &watch{
- ino: ino,
- path: dir,
- names: make(map[string]uint64),
- }
- w.mu.Lock()
- w.watches.set(ino, watchEntry)
- w.mu.Unlock()
- flags |= provisional
- } else {
- windows.CloseHandle(ino.handle)
- }
- if pathname == dir {
- watchEntry.mask |= flags
- } else {
- watchEntry.names[filepath.Base(pathname)] |= flags
- }
-
- err = w.startRead(watchEntry)
- if err != nil {
- return err
- }
-
- if pathname == dir {
- watchEntry.mask &= ^provisional
- } else {
- watchEntry.names[filepath.Base(pathname)] &= ^provisional
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
- dir, err := w.getDir(pathname)
- if err != nil {
- return err
- }
- ino, err := w.getIno(dir)
- if err != nil {
- return err
- }
-
- w.mu.Lock()
- watch := w.watches.get(ino)
- w.mu.Unlock()
-
- err = windows.CloseHandle(ino.handle)
- if err != nil {
- w.sendError(os.NewSyscallError("CloseHandle", err))
- }
- if watch == nil {
- return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
- }
- if pathname == dir {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
- watch.mask = 0
- } else {
- name := filepath.Base(pathname)
- w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
- delete(watch.names, name)
- }
-
- return w.startRead(watch)
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
- for name, mask := range watch.names {
- if mask&provisional == 0 {
- w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
- }
- delete(watch.names, name)
- }
- if watch.mask != 0 {
- if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
- }
- watch.mask = 0
- }
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
- err := windows.CancelIo(watch.ino.handle)
- if err != nil {
- w.sendError(os.NewSyscallError("CancelIo", err))
- w.deleteWatch(watch)
- }
- mask := w.toWindowsFlags(watch.mask)
- for _, m := range watch.names {
- mask |= w.toWindowsFlags(m)
- }
- if mask == 0 {
- err := windows.CloseHandle(watch.ino.handle)
- if err != nil {
- w.sendError(os.NewSyscallError("CloseHandle", err))
- }
- w.mu.Lock()
- delete(w.watches[watch.ino.volume], watch.ino.index)
- w.mu.Unlock()
- return nil
- }
-
- rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
- uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
- if rdErr != nil {
- err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
- if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
- // Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
- err = nil
- }
- w.deleteWatch(watch)
- w.startRead(watch)
- return err
- }
- return nil
-}
-
-// readEvents reads from the I/O completion port, converts the
-// received events into Event objects and sends them via the Events channel.
-// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
- var (
- n uint32
- key uintptr
- ov *windows.Overlapped
- )
- runtime.LockOSThread()
-
- for {
- qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
- // This error is handled after the watch == nil check below. NOTE: this
- // seems odd, note sure if it's correct.
-
- watch := (*watch)(unsafe.Pointer(ov))
- if watch == nil {
- select {
- case ch := <-w.quit:
- w.mu.Lock()
- var indexes []indexMap
- for _, index := range w.watches {
- indexes = append(indexes, index)
- }
- w.mu.Unlock()
- for _, index := range indexes {
- for _, watch := range index {
- w.deleteWatch(watch)
- w.startRead(watch)
- }
- }
-
- err := windows.CloseHandle(w.port)
- if err != nil {
- err = os.NewSyscallError("CloseHandle", err)
- }
- close(w.Events)
- close(w.Errors)
- ch <- err
- return
- case in := <-w.input:
- switch in.op {
- case opAddWatch:
- in.reply <- w.addWatch(in.path, uint64(in.flags))
- case opRemoveWatch:
- in.reply <- w.remWatch(in.path)
- }
- default:
- }
- continue
- }
-
- switch qErr {
- case windows.ERROR_MORE_DATA:
- if watch == nil {
- w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
- } else {
- // The i/o succeeded but the buffer is full.
- // In theory we should be building up a full packet.
- // In practice we can get away with just carrying on.
- n = uint32(unsafe.Sizeof(watch.buf))
- }
- case windows.ERROR_ACCESS_DENIED:
- // Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
- w.deleteWatch(watch)
- w.startRead(watch)
- continue
- case windows.ERROR_OPERATION_ABORTED:
- // CancelIo was called on this handle
- continue
- default:
- w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
- continue
- case nil:
- }
-
- var offset uint32
- for {
- if n == 0 {
- w.sendError(errors.New("short read in readEvents()"))
- break
- }
-
- // Point "raw" to the event in the buffer
- raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
-
- // Create a buf that is the size of the path name
- size := int(raw.FileNameLength / 2)
- var buf []uint16
- // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
- sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
- sh.Len = size
- sh.Cap = size
- name := windows.UTF16ToString(buf)
- fullname := filepath.Join(watch.path, name)
-
- var mask uint64
- switch raw.Action {
- case windows.FILE_ACTION_REMOVED:
- mask = sysFSDELETESELF
- case windows.FILE_ACTION_MODIFIED:
- mask = sysFSMODIFY
- case windows.FILE_ACTION_RENAMED_OLD_NAME:
- watch.rename = name
- case windows.FILE_ACTION_RENAMED_NEW_NAME:
- // Update saved path of all sub-watches.
- old := filepath.Join(watch.path, watch.rename)
- w.mu.Lock()
- for _, watchMap := range w.watches {
- for _, ww := range watchMap {
- if strings.HasPrefix(ww.path, old) {
- ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
- }
- }
- }
- w.mu.Unlock()
-
- if watch.names[watch.rename] != 0 {
- watch.names[name] |= watch.names[watch.rename]
- delete(watch.names, watch.rename)
- mask = sysFSMOVESELF
- }
- }
-
- sendNameEvent := func() {
- w.sendEvent(fullname, watch.names[name]&mask)
- }
- if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
- sendNameEvent()
- }
- if raw.Action == windows.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
- delete(watch.names, name)
- }
-
- w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
- if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
- fullname = filepath.Join(watch.path, watch.rename)
- sendNameEvent()
- }
-
- // Move to the next event in the buffer
- if raw.NextEntryOffset == 0 {
- break
- }
- offset += raw.NextEntryOffset
-
- // Error!
- if offset >= n {
- w.sendError(errors.New(
- "Windows system assumed buffer larger than it is, events have likely been missed."))
- break
- }
- }
-
- if err := w.startRead(watch); err != nil {
- w.sendError(err)
- }
- }
-}
-
-func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
- var m uint32
- if mask&sysFSMODIFY != 0 {
- m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
- }
- if mask&sysFSATTRIB != 0 {
- m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
- }
- if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
- m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
- }
- return m
-}
-
-func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
- switch action {
- case windows.FILE_ACTION_ADDED:
- return sysFSCREATE
- case windows.FILE_ACTION_REMOVED:
- return sysFSDELETE
- case windows.FILE_ACTION_MODIFIED:
- return sysFSMODIFY
- case windows.FILE_ACTION_RENAMED_OLD_NAME:
- return sysFSMOVEDFROM
- case windows.FILE_ACTION_RENAMED_NEW_NAME:
- return sysFSMOVEDTO
- }
- return 0
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/etcd/vendor/github.com/fsnotify/fsnotify/fsnotify.go
deleted file mode 100644
index 30a5bf0f07..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ /dev/null
@@ -1,81 +0,0 @@
-//go:build !plan9
-// +build !plan9
-
-// Package fsnotify provides a cross-platform interface for file system
-// notifications.
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-// Event represents a file system notification.
-type Event struct {
- // Path to the file or directory.
- //
- // Paths are relative to the input; for example with Add("dir") the Name
- // will be set to "dir/file" if you create that file, but if you use
- // Add("/path/to/dir") it will be "/path/to/dir/file".
- Name string
-
- // File operation that triggered the event.
- //
- // This is a bitmask and some systems may send multiple operations at once.
- // Use the Event.Has() method instead of comparing with ==.
- Op Op
-}
-
-// Op describes a set of file operations.
-type Op uint32
-
-// The operations fsnotify can trigger; see the documentation on [Watcher] for a
-// full description, and check them with [Event.Has].
-const (
- Create Op = 1 << iota
- Write
- Remove
- Rename
- Chmod
-)
-
-// Common errors that can be reported by a watcher
-var (
- ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
- ErrEventOverflow = errors.New("fsnotify queue overflow")
-)
-
-func (op Op) String() string {
- var b strings.Builder
- if op.Has(Create) {
- b.WriteString("|CREATE")
- }
- if op.Has(Remove) {
- b.WriteString("|REMOVE")
- }
- if op.Has(Write) {
- b.WriteString("|WRITE")
- }
- if op.Has(Rename) {
- b.WriteString("|RENAME")
- }
- if op.Has(Chmod) {
- b.WriteString("|CHMOD")
- }
- if b.Len() == 0 {
- return "[no events]"
- }
- return b.String()[1:]
-}
-
-// Has reports if this operation has the given operation.
-func (o Op) Has(h Op) bool { return o&h == h }
-
-// Has reports if this event has the given operation.
-func (e Event) Has(op Op) bool { return e.Op.Has(op) }
-
-// String returns a string representation of the event with their path.
-func (e Event) String() string {
- return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
-}
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/etcd/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
deleted file mode 100644
index b09ef76834..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/usr/bin/env zsh
-[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
-setopt err_exit no_unset pipefail extended_glob
-
-# Simple script to update the godoc comments on all watchers. Probably took me
-# more time to write this than doing it manually, but ah well 🙃
-
-watcher=$(</tmp/x
- print -r -- $cmt >>/tmp/x
- tail -n+$(( end + 1 )) $file >>/tmp/x
- mv /tmp/x $file
- done
-}
-
-set-cmt '^type Watcher struct ' $watcher
-set-cmt '^func NewWatcher(' $new
-set-cmt '^func (w \*Watcher) Add(' $add
-set-cmt '^func (w \*Watcher) Remove(' $remove
-set-cmt '^func (w \*Watcher) Close(' $close
-set-cmt '^func (w \*Watcher) WatchList(' $watchlist
-set-cmt '^[[:space:]]*Events *chan Event$' $events
-set-cmt '^[[:space:]]*Errors *chan error$' $errors
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/etcd/vendor/github.com/fsnotify/fsnotify/system_bsd.go
deleted file mode 100644
index 4322b0b885..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/system_bsd.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//go:build freebsd || openbsd || netbsd || dragonfly
-// +build freebsd openbsd netbsd dragonfly
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/etcd/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/etcd/vendor/github.com/fsnotify/fsnotify/system_darwin.go
deleted file mode 100644
index 5da5ffa78f..0000000000
--- a/etcd/vendor/github.com/fsnotify/fsnotify/system_darwin.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build darwin
-// +build darwin
-
-package fsnotify
-
-import "golang.org/x/sys/unix"
-
-// note: this constant is not defined on BSD
-const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/etcd/vendor/github.com/golang/groupcache/LICENSE b/etcd/vendor/github.com/golang/groupcache/LICENSE
deleted file mode 100644
index 37ec93a14f..0000000000
--- a/etcd/vendor/github.com/golang/groupcache/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/golang/groupcache/lru/lru.go b/etcd/vendor/github.com/golang/groupcache/lru/lru.go
deleted file mode 100644
index eac1c7664f..0000000000
--- a/etcd/vendor/github.com/golang/groupcache/lru/lru.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2013 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package lru implements an LRU cache.
-package lru
-
-import "container/list"
-
-// Cache is an LRU cache. It is not safe for concurrent access.
-type Cache struct {
- // MaxEntries is the maximum number of cache entries before
- // an item is evicted. Zero means no limit.
- MaxEntries int
-
- // OnEvicted optionally specifies a callback function to be
- // executed when an entry is purged from the cache.
- OnEvicted func(key Key, value interface{})
-
- ll *list.List
- cache map[interface{}]*list.Element
-}
-
-// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
-type Key interface{}
-
-type entry struct {
- key Key
- value interface{}
-}
-
-// New creates a new Cache.
-// If maxEntries is zero, the cache has no limit and it's assumed
-// that eviction is done by the caller.
-func New(maxEntries int) *Cache {
- return &Cache{
- MaxEntries: maxEntries,
- ll: list.New(),
- cache: make(map[interface{}]*list.Element),
- }
-}
-
-// Add adds a value to the cache.
-func (c *Cache) Add(key Key, value interface{}) {
- if c.cache == nil {
- c.cache = make(map[interface{}]*list.Element)
- c.ll = list.New()
- }
- if ee, ok := c.cache[key]; ok {
- c.ll.MoveToFront(ee)
- ee.Value.(*entry).value = value
- return
- }
- ele := c.ll.PushFront(&entry{key, value})
- c.cache[key] = ele
- if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
- c.RemoveOldest()
- }
-}
-
-// Get looks up a key's value from the cache.
-func (c *Cache) Get(key Key) (value interface{}, ok bool) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.ll.MoveToFront(ele)
- return ele.Value.(*entry).value, true
- }
- return
-}
-
-// Remove removes the provided key from the cache.
-func (c *Cache) Remove(key Key) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.removeElement(ele)
- }
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *Cache) RemoveOldest() {
- if c.cache == nil {
- return
- }
- ele := c.ll.Back()
- if ele != nil {
- c.removeElement(ele)
- }
-}
-
-func (c *Cache) removeElement(e *list.Element) {
- c.ll.Remove(e)
- kv := e.Value.(*entry)
- delete(c.cache, kv.key)
- if c.OnEvicted != nil {
- c.OnEvicted(kv.key, kv.value)
- }
-}
-
-// Len returns the number of items in the cache.
-func (c *Cache) Len() int {
- if c.cache == nil {
- return 0
- }
- return c.ll.Len()
-}
-
-// Clear purges all stored items from the cache.
-func (c *Cache) Clear() {
- if c.OnEvicted != nil {
- for _, e := range c.cache {
- kv := e.Value.(*entry)
- c.OnEvicted(kv.key, kv.value)
- }
- }
- c.ll = nil
- c.cache = nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/LICENSE b/etcd/vendor/github.com/google/cel-go/LICENSE
deleted file mode 100644
index 2493ed2eb4..0000000000
--- a/etcd/vendor/github.com/google/cel-go/LICENSE
+++ /dev/null
@@ -1,233 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-===========================================================================
-The common/types/pb/equal.go modification of proto.Equal logic
-===========================================================================
-Copyright (c) 2018 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/etcd/vendor/github.com/google/cel-go/cel/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/cel/BUILD.bazel
deleted file mode 100644
index e973abfc54..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/BUILD.bazel
+++ /dev/null
@@ -1,76 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "cel.go",
- "decls.go",
- "env.go",
- "io.go",
- "library.go",
- "macro.go",
- "options.go",
- "program.go",
- ],
- importpath = "github.com/google/cel-go/cel",
- visibility = ["//visibility:public"],
- deps = [
- "//checker:go_default_library",
- "//checker/decls:go_default_library",
- "//common:go_default_library",
- "//common/containers:go_default_library",
- "//common/overloads:go_default_library",
- "//common/types:go_default_library",
- "//common/types/pb:go_default_library",
- "//common/types/ref:go_default_library",
- "//common/types/traits:go_default_library",
- "//interpreter:go_default_library",
- "//interpreter/functions:go_default_library",
- "//parser:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
- "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
- "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
- "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
- "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
- "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- srcs = [
- "cel_example_test.go",
- "cel_test.go",
- "decls_test.go",
- "env_test.go",
- "io_test.go",
- ],
- data = [
- "//cel/testdata:gen_test_fds",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "//common/operators:go_default_library",
- "//common/overloads:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
- "//common/types/traits:go_default_library",
- "//test:go_default_library",
- "//test/proto2pb:go_default_library",
- "//test/proto3pb:go_default_library",
- "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/cel/cel.go b/etcd/vendor/github.com/google/cel-go/cel/cel.go
deleted file mode 100644
index eb5a9f4cc5..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/cel.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cel defines the top-level interface for the Common Expression Language (CEL).
-//
-// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
-// expressions against user-defined environments.
-package cel
diff --git a/etcd/vendor/github.com/google/cel-go/cel/decls.go b/etcd/vendor/github.com/google/cel-go/cel/decls.go
deleted file mode 100644
index f2df721d07..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/decls.go
+++ /dev/null
@@ -1,1179 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "fmt"
- "strings"
-
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "github.com/google/cel-go/interpreter/functions"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types.
-type Kind uint
-
-const (
- // DynKind represents a dynamic type. This kind only exists at type-check time.
- DynKind Kind = iota
-
- // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
- AnyKind
-
- // BoolKind represents a boolean type.
- BoolKind
-
- // BytesKind represents a bytes type.
- BytesKind
-
- // DoubleKind represents a double type.
- DoubleKind
-
- // DurationKind represents a CEL duration type.
- DurationKind
-
- // IntKind represents an integer type.
- IntKind
-
- // ListKind represents a list type.
- ListKind
-
- // MapKind represents a map type.
- MapKind
-
- // NullTypeKind represents a null type.
- NullTypeKind
-
- // OpaqueKind represents an abstract type which has no accessible fields.
- OpaqueKind
-
- // StringKind represents a string type.
- StringKind
-
- // StructKind represents a structured object with typed fields.
- StructKind
-
- // TimestampKind represents a a CEL time type.
- TimestampKind
-
- // TypeKind represents the CEL type.
- TypeKind
-
- // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
- TypeParamKind
-
- // UintKind represents a uint type.
- UintKind
-)
-
-var (
- // AnyType represents the google.protobuf.Any type.
- AnyType = &Type{
- kind: AnyKind,
- runtimeType: types.NewTypeValue("google.protobuf.Any"),
- }
- // BoolType represents the bool type.
- BoolType = &Type{
- kind: BoolKind,
- runtimeType: types.BoolType,
- }
- // BytesType represents the bytes type.
- BytesType = &Type{
- kind: BytesKind,
- runtimeType: types.BytesType,
- }
- // DoubleType represents the double type.
- DoubleType = &Type{
- kind: DoubleKind,
- runtimeType: types.DoubleType,
- }
- // DurationType represents the CEL duration type.
- DurationType = &Type{
- kind: DurationKind,
- runtimeType: types.DurationType,
- }
- // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
- DynType = &Type{
- kind: DynKind,
- runtimeType: types.NewTypeValue("dyn"),
- }
- // IntType represents the int type.
- IntType = &Type{
- kind: IntKind,
- runtimeType: types.IntType,
- }
- // NullType represents the type of a null value.
- NullType = &Type{
- kind: NullTypeKind,
- runtimeType: types.NullType,
- }
- // StringType represents the string type.
- StringType = &Type{
- kind: StringKind,
- runtimeType: types.StringType,
- }
- // TimestampType represents the time type.
- TimestampType = &Type{
- kind: TimestampKind,
- runtimeType: types.TimestampType,
- }
- // TypeType represents a CEL type
- TypeType = &Type{
- kind: TypeKind,
- runtimeType: types.TypeType,
- }
- //UintType represents a uint type.
- UintType = &Type{
- kind: UintKind,
- runtimeType: types.UintType,
- }
-)
-
-// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
-type Type struct {
- // kind indicates general category of the type.
- kind Kind
-
- // runtimeType is the runtime type of the declaration.
- runtimeType ref.Type
-
- // parameters holds the optional type-checked set of type parameters that are used during static analysis.
- parameters []*Type
-
- // isAssignableType function determines whether one type is assignable to this type.
- // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters.
- isAssignableType func(other *Type) bool
-
- // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type.
- // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name.
- isAssignableRuntimeType func(other ref.Val) bool
-}
-
-// IsAssignableType determines whether the current type is type-check assignable from the input fromType.
-func (t *Type) IsAssignableType(fromType *Type) bool {
- if t.isAssignableType != nil {
- return t.isAssignableType(fromType)
- }
- return t.defaultIsAssignableType(fromType)
-}
-
-// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType.
-//
-// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string)
-// will have a runtime assignable type of a map.
-func (t *Type) IsAssignableRuntimeType(val ref.Val) bool {
- if t.isAssignableRuntimeType != nil {
- return t.isAssignableRuntimeType(val)
- }
- return t.defaultIsAssignableRuntimeType(val)
-}
-
-// String returns a human-readable definition of the type name.
-func (t *Type) String() string {
- if len(t.parameters) == 0 {
- return t.runtimeType.TypeName()
- }
- params := make([]string, len(t.parameters))
- for i, p := range t.parameters {
- params[i] = p.String()
- }
- return fmt.Sprintf("%s(%s)", t.runtimeType.TypeName(), strings.Join(params, ", "))
-}
-
-// isDyn indicates whether the type is dynamic in any way.
-func (t *Type) isDyn() bool {
- return t.kind == DynKind || t.kind == AnyKind || t.kind == TypeParamKind
-}
-
-// equals indicates whether two types have the same kind, type name, and parameters.
-func (t *Type) equals(other *Type) bool {
- if t.kind != other.kind ||
- t.runtimeType.TypeName() != other.runtimeType.TypeName() ||
- len(t.parameters) != len(other.parameters) {
- return false
- }
- for i, p := range t.parameters {
- if !p.equals(other.parameters[i]) {
- return false
- }
- }
- return true
-}
-
-// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another
-// where any of the following may return a true result:
-// - The from types are the same instance
-// - The target type is dynamic
-// - The fromType has the same kind and type name as the target type, and all parameters of the target type
-// are IsAssignableType() from the parameters of the fromType.
-func (t *Type) defaultIsAssignableType(fromType *Type) bool {
- if t == fromType || t.isDyn() {
- return true
- }
- if t.kind != fromType.kind ||
- t.runtimeType.TypeName() != fromType.runtimeType.TypeName() ||
- len(t.parameters) != len(fromType.parameters) {
- return false
- }
- for i, tp := range t.parameters {
- fp := fromType.parameters[i]
- if !tp.IsAssignableType(fp) {
- return false
- }
- }
- return true
-}
-
-// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types
-// to determine whether a ref.Val is assignable to the declared type for a function signature.
-func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool {
- valType := val.Type()
- if !(t.runtimeType == valType || t.isDyn() || t.runtimeType.TypeName() == valType.TypeName()) {
- return false
- }
- switch t.runtimeType {
- case types.ListType:
- elemType := t.parameters[0]
- l := val.(traits.Lister)
- if l.Size() == types.IntZero {
- return true
- }
- it := l.Iterator()
- for it.HasNext() == types.True {
- elemVal := it.Next()
- return elemType.IsAssignableRuntimeType(elemVal)
- }
- case types.MapType:
- keyType := t.parameters[0]
- elemType := t.parameters[1]
- m := val.(traits.Mapper)
- if m.Size() == types.IntZero {
- return true
- }
- it := m.Iterator()
- for it.HasNext() == types.True {
- keyVal := it.Next()
- elemVal := m.Get(keyVal)
- return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal)
- }
- }
- return true
-}
-
-// ListType creates an instances of a list type value with the provided element type.
-func ListType(elemType *Type) *Type {
- return &Type{
- kind: ListKind,
- runtimeType: types.ListType,
- parameters: []*Type{elemType},
- }
-}
-
-// MapType creates an instance of a map type value with the provided key and value types.
-func MapType(keyType, valueType *Type) *Type {
- return &Type{
- kind: MapKind,
- runtimeType: types.MapType,
- parameters: []*Type{keyType, valueType},
- }
-}
-
-// NullableType creates an instance of a nullable type with the provided wrapped type.
-//
-// Note: only primitive types are supported as wrapped types.
-func NullableType(wrapped *Type) *Type {
- return &Type{
- kind: wrapped.kind,
- runtimeType: wrapped.runtimeType,
- parameters: wrapped.parameters,
- isAssignableType: func(other *Type) bool {
- return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other)
- },
- isAssignableRuntimeType: func(other ref.Val) bool {
- return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other)
- },
- }
-}
-
-// OpaqueType creates an abstract parameterized type with a given name.
-func OpaqueType(name string, params ...*Type) *Type {
- return &Type{
- kind: OpaqueKind,
- runtimeType: types.NewTypeValue(name),
- parameters: params,
- }
-}
-
-// ObjectType creates a type references to an externally defined type, e.g. a protobuf message type.
-func ObjectType(typeName string) *Type {
- return &Type{
- kind: StructKind,
- runtimeType: types.NewObjectTypeValue(typeName),
- }
-}
-
-// TypeParamType creates a parameterized type instance.
-func TypeParamType(paramName string) *Type {
- return &Type{
- kind: TypeParamKind,
- runtimeType: types.NewTypeValue(paramName),
- }
-}
-
-// Variable creates an instance of a variable declaration with a variable name and type.
-func Variable(name string, t *Type) EnvOption {
- return func(e *Env) (*Env, error) {
- et, err := TypeToExprType(t)
- if err != nil {
- return nil, err
- }
- e.declarations = append(e.declarations, decls.NewVar(name, et))
- return e, nil
- }
-}
-
-// Function defines a function and overloads with optional singleton or per-overload bindings.
-//
-// Using Function is roughly equivalent to calling Declarations() to declare the function signatures
-// and Functions() to define the function bindings, if they have been defined. Specifying the
-// same function name more than once will result in the aggregation of the function overloads. If any
-// signatures conflict between the existing and new function definition an error will be raised.
-// However, if the signatures are identical and the overload ids are the same, the redefinition will
-// be considered a no-op.
-//
-// One key difference with using Function() is that each FunctionDecl provided will handle dynamic
-// dispatch based on the type-signatures of the overloads provided which means overload resolution at
-// runtime is handled out of the box rather than via a custom binding for overload resolution via
-// Functions():
-//
-// - Overloads are searched in the order they are declared
-// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
-// at runtime. Empty lists and maps will result in a 'default dispatch'
-// - In the event that a default dispatch occurs, the first overload provided is the one invoked
-//
-// If you intend to use overloads which differentiate based on the key or element type of a list or
-// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this
-// will allow your implementation to determine how best to handle dispatch and the default behavior
-// for empty lists and maps whose contents cannot be inspected.
-//
-// For functions which use parameterized opaque types (abstract types), consider using a singleton
-// function which is capable of inspecting the contents of the type and resolving the appropriate
-// overload as CEL can only make inferences by type-name regarding such types.
-func Function(name string, opts ...FunctionOpt) EnvOption {
- return func(e *Env) (*Env, error) {
- fn := &functionDecl{
- name: name,
- overloads: []*overloadDecl{},
- options: opts,
- }
- err := fn.init()
- if err != nil {
- return nil, err
- }
- _, err = functionDeclToExprDecl(fn)
- if err != nil {
- return nil, err
- }
- if existing, found := e.functions[fn.name]; found {
- fn, err = existing.merge(fn)
- if err != nil {
- return nil, err
- }
- }
- e.functions[name] = fn
- return e, nil
- }
-}
-
-// FunctionOpt defines a functional option for configuring a function declaration.
-type FunctionOpt func(*functionDecl) (*functionDecl, error)
-
-// SingletonUnaryBinding creates a singleton function defintion to be used for all function overloads.
-//
-// Note, this approach works well if operand is expected to have a specific trait which it implements,
-// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
-func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
- trait := 0
- for _, t := range traits {
- trait = trait | t
- }
- return func(f *functionDecl) (*functionDecl, error) {
- if f.singleton != nil {
- return nil, fmt.Errorf("function already has a singleton binding: %s", f.name)
- }
- f.singleton = &functions.Overload{
- Operator: f.name,
- Unary: fn,
- OperandTrait: trait,
- }
- return f, nil
- }
-}
-
-// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads.
-//
-// Note, this approach works well if operand is expected to have a specific trait which it implements,
-// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
-func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
- trait := 0
- for _, t := range traits {
- trait = trait | t
- }
- return func(f *functionDecl) (*functionDecl, error) {
- if f.singleton != nil {
- return nil, fmt.Errorf("function already has a singleton binding: %s", f.name)
- }
- f.singleton = &functions.Overload{
- Operator: f.name,
- Binary: fn,
- OperandTrait: trait,
- }
- return f, nil
- }
-}
-
-// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads.
-//
-// Note, this approach works well if operand is expected to have a specific trait which it implements,
-// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
-func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
- trait := 0
- for _, t := range traits {
- trait = trait | t
- }
- return func(f *functionDecl) (*functionDecl, error) {
- if f.singleton != nil {
- return nil, fmt.Errorf("function already has a singleton binding: %s", f.name)
- }
- f.singleton = &functions.Overload{
- Operator: f.name,
- Function: fn,
- OperandTrait: trait,
- }
- return f, nil
- }
-}
-
-// Overload defines a new global overload with an overload id, argument types, and result type. Through the
-// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
-// be non-strict.
-//
-// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
-// strict-ness should be rare occurrences.
-func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
- return newOverload(overloadID, false, args, resultType, opts...)
-}
-
-// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
-// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
-// an operand trait, and to be non-strict.
-//
-// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
-// strict-ness should be rare occurrences.
-func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
- return newOverload(overloadID, true, args, resultType, opts...)
-}
-
-// OverloadOpt is a functional option for configuring a function overload.
-type OverloadOpt func(*overloadDecl) (*overloadDecl, error)
-
-// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
-// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
-func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
- return func(o *overloadDecl) (*overloadDecl, error) {
- if o.hasBinding() {
- return nil, fmt.Errorf("overload already has a binding: %s", o.id)
- }
- if len(o.argTypes) != 1 {
- return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.id)
- }
- o.unaryOp = binding
- return o, nil
- }
-}
-
-// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
-// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
-func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
- return func(o *overloadDecl) (*overloadDecl, error) {
- if o.hasBinding() {
- return nil, fmt.Errorf("overload already has a binding: %s", o.id)
- }
- if len(o.argTypes) != 2 {
- return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.id)
- }
- o.binaryOp = binding
- return o, nil
- }
-}
-
-// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
-// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
-func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
- return func(o *overloadDecl) (*overloadDecl, error) {
- if o.hasBinding() {
- return nil, fmt.Errorf("overload already has a binding: %s", o.id)
- }
- o.functionOp = binding
- return o, nil
- }
-}
-
-// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
-//
-// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
-func OverloadIsNonStrict() OverloadOpt {
- return func(o *overloadDecl) (*overloadDecl, error) {
- o.nonStrict = true
- return o, nil
- }
-}
-
-// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
-// successfully invoked.
-func OverloadOperandTrait(trait int) OverloadOpt {
- return func(o *overloadDecl) (*overloadDecl, error) {
- o.operandTrait = trait
- return o, nil
- }
-}
-
-type functionDecl struct {
- name string
- overloads []*overloadDecl
- options []FunctionOpt
- singleton *functions.Overload
- initialized bool
-}
-
-// init ensures that a function's options have been applied.
-//
-// This function is used in both the environment configuration and internally for function merges.
-func (f *functionDecl) init() error {
- if f.initialized {
- return nil
- }
- f.initialized = true
-
- var err error
- for _, opt := range f.options {
- f, err = opt(f)
- if err != nil {
- return err
- }
- }
- if len(f.overloads) == 0 {
- return fmt.Errorf("function %s must have at least one overload", f.name)
- }
- return nil
-}
-
-// bindings produces a set of function bindings, if any are defined.
-func (f *functionDecl) bindings() ([]*functions.Overload, error) {
- overloads := []*functions.Overload{}
- nonStrict := false
- for _, o := range f.overloads {
- if o.hasBinding() {
- overload := &functions.Overload{
- Operator: o.id,
- Unary: o.guardedUnaryOp(f.name),
- Binary: o.guardedBinaryOp(f.name),
- Function: o.guardedFunctionOp(f.name),
- OperandTrait: o.operandTrait,
- NonStrict: o.nonStrict,
- }
- overloads = append(overloads, overload)
- nonStrict = nonStrict || o.nonStrict
- }
- }
- if f.singleton != nil {
- if len(overloads) != 0 {
- return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.name)
- }
- return []*functions.Overload{
- {
- Operator: f.name,
- Unary: f.singleton.Unary,
- Binary: f.singleton.Binary,
- Function: f.singleton.Function,
- OperandTrait: f.singleton.OperandTrait,
- },
- }, nil
- }
- if len(overloads) == 0 {
- return overloads, nil
- }
- // Single overload. Replicate an entry for it using the function name as well.
- if len(overloads) == 1 {
- if overloads[0].Operator == f.name {
- return overloads, nil
- }
- return append(overloads, &functions.Overload{
- Operator: f.name,
- Unary: overloads[0].Unary,
- Binary: overloads[0].Binary,
- Function: overloads[0].Function,
- NonStrict: overloads[0].NonStrict,
- OperandTrait: overloads[0].OperandTrait,
- }), nil
- }
- // All of the defined overloads are wrapped into a top-level function which
- // performs dynamic dispatch to the proper overload based on the argument types.
- bindings := append([]*functions.Overload{}, overloads...)
- funcDispatch := func(args ...ref.Val) ref.Val {
- for _, o := range f.overloads {
- if !o.matchesRuntimeSignature(args...) {
- continue
- }
- switch len(args) {
- case 1:
- if o.unaryOp != nil {
- return o.unaryOp(args[0])
- }
- case 2:
- if o.binaryOp != nil {
- return o.binaryOp(args[0], args[1])
- }
- }
- if o.functionOp != nil {
- return o.functionOp(args...)
- }
- // eventually this will fall through to the noSuchOverload below.
- }
- return noSuchOverload(f.name, args...)
- }
- function := &functions.Overload{
- Operator: f.name,
- Function: funcDispatch,
- NonStrict: nonStrict,
- }
- return append(bindings, function), nil
-}
-
-// merge one function declaration with another.
-//
-// If a function is extended, by say adding new overloads to an existing function, then it is merged with the
-// prior definition of the function at which point its overloads must not collide with pre-existing overloads
-// and its bindings (singleton, or per-overload) must not conflict with previous definitions either.
-func (f *functionDecl) merge(other *functionDecl) (*functionDecl, error) {
- if f.name != other.name {
- return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.name, other.name)
- }
- err := f.init()
- if err != nil {
- return nil, err
- }
- err = other.init()
- if err != nil {
- return nil, err
- }
- merged := &functionDecl{
- name: f.name,
- overloads: make([]*overloadDecl, len(f.overloads)),
- options: []FunctionOpt{},
- initialized: true,
- singleton: f.singleton,
- }
- copy(merged.overloads, f.overloads)
- for _, o := range other.overloads {
- err := merged.addOverload(o)
- if err != nil {
- return nil, fmt.Errorf("function declaration merge failed: %v", err)
- }
- }
- if other.singleton != nil {
- if merged.singleton != nil {
- return nil, fmt.Errorf("function already has a binding: %s", f.name)
- }
- merged.singleton = other.singleton
- }
- return merged, nil
-}
-
-// addOverload ensures that the new overload does not collide with an existing overload signature;
-// however, if the function signatures are identical, the implementation may be rewritten as its
-// difficult to compare functions by object identity.
-func (f *functionDecl) addOverload(overload *overloadDecl) error {
- for index, o := range f.overloads {
- if o.id != overload.id && o.signatureOverlaps(overload) {
- return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.name, o.id, overload.id)
- }
- if o.id == overload.id {
- if o.signatureEquals(overload) && o.nonStrict == overload.nonStrict {
- // Allow redefinition of an overload implementation so long as the signatures match.
- f.overloads[index] = overload
- return nil
- } else {
- return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
- }
- }
- }
- f.overloads = append(f.overloads, overload)
- return nil
-}
-
-func noSuchOverload(funcName string, args ...ref.Val) ref.Val {
- argTypes := make([]string, len(args))
- for i, arg := range args {
- argTypes[i] = arg.Type().TypeName()
- }
- signature := strings.Join(argTypes, ", ")
- return types.NewErr("no such overload: %s(%s)", funcName, signature)
-}
-
-// overloadDecl contains all of the relevant information regarding a specific function overload.
-type overloadDecl struct {
- id string
- argTypes []*Type
- resultType *Type
- memberFunction bool
-
- // binding options, optional but encouraged.
- unaryOp functions.UnaryOp
- binaryOp functions.BinaryOp
- functionOp functions.FunctionOp
-
- // behavioral options, uncommon
- nonStrict bool
- operandTrait int
-}
-
-func (o *overloadDecl) hasBinding() bool {
- return o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil
-}
-
-// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined.
-func (o *overloadDecl) guardedUnaryOp(funcName string) functions.UnaryOp {
- if o.unaryOp == nil {
- return nil
- }
- return func(arg ref.Val) ref.Val {
- if !o.matchesRuntimeUnarySignature(arg) {
- return noSuchOverload(funcName, arg)
- }
- return o.unaryOp(arg)
- }
-}
-
-// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined.
-func (o *overloadDecl) guardedBinaryOp(funcName string) functions.BinaryOp {
- if o.binaryOp == nil {
- return nil
- }
- return func(arg1, arg2 ref.Val) ref.Val {
- if !o.matchesRuntimeBinarySignature(arg1, arg2) {
- return noSuchOverload(funcName, arg1, arg2)
- }
- return o.binaryOp(arg1, arg2)
- }
-}
-
-// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided.
-func (o *overloadDecl) guardedFunctionOp(funcName string) functions.FunctionOp {
- if o.functionOp == nil {
- return nil
- }
- return func(args ...ref.Val) ref.Val {
- if !o.matchesRuntimeSignature(args...) {
- return noSuchOverload(funcName, args...)
- }
- return o.functionOp(args...)
- }
-}
-
-// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument.
-func (o *overloadDecl) matchesRuntimeUnarySignature(arg ref.Val) bool {
- if o.nonStrict && types.IsUnknownOrError(arg) {
- return true
- }
- return o.argTypes[0].IsAssignableRuntimeType(arg) && (o.operandTrait == 0 || arg.Type().HasTrait(o.operandTrait))
-}
-
-// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
-func (o *overloadDecl) matchesRuntimeBinarySignature(arg1, arg2 ref.Val) bool {
- if o.nonStrict {
- if types.IsUnknownOrError(arg1) {
- return types.IsUnknownOrError(arg2) || o.argTypes[1].IsAssignableRuntimeType(arg2)
- }
- } else if !o.argTypes[1].IsAssignableRuntimeType(arg2) {
- return false
- }
- return o.argTypes[0].IsAssignableRuntimeType(arg1) && (o.operandTrait == 0 || arg1.Type().HasTrait(o.operandTrait))
-}
-
-// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
-func (o *overloadDecl) matchesRuntimeSignature(args ...ref.Val) bool {
- if len(args) != len(o.argTypes) {
- return false
- }
- if len(args) == 0 {
- return true
- }
- allArgsMatch := true
- for i, arg := range args {
- if o.nonStrict && types.IsUnknownOrError(arg) {
- continue
- }
- allArgsMatch = allArgsMatch && o.argTypes[i].IsAssignableRuntimeType(arg)
- }
-
- arg := args[0]
- return allArgsMatch && (o.operandTrait == 0 || (o.nonStrict && types.IsUnknownOrError(arg)) || arg.Type().HasTrait(o.operandTrait))
-}
-
-// signatureEquals indicates whether one overload has an identical signature to another overload.
-//
-// Providing a duplicate signature is not an issue, but an overloapping signature is problematic.
-func (o *overloadDecl) signatureEquals(other *overloadDecl) bool {
- if o.id != other.id || o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) {
- return false
- }
- for i, at := range o.argTypes {
- oat := other.argTypes[i]
- if !at.equals(oat) {
- return false
- }
- }
- return o.resultType.equals(other.resultType)
-}
-
-// signatureOverlaps indicates whether one overload has an overlapping signature with another overload.
-//
-// The 'other' overload must first be checked for equality before determining whether it overlaps in order to be completely accurate.
-func (o *overloadDecl) signatureOverlaps(other *overloadDecl) bool {
- if o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) {
- return false
- }
- argsOverlap := true
- for i, argType := range o.argTypes {
- otherArgType := other.argTypes[i]
- argsOverlap = argsOverlap &&
- (argType.IsAssignableType(otherArgType) ||
- otherArgType.IsAssignableType(argType))
- }
- return argsOverlap
-}
-
-func newOverload(overloadID string, memberFunction bool, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
- return func(f *functionDecl) (*functionDecl, error) {
- overload := &overloadDecl{
- id: overloadID,
- argTypes: args,
- resultType: resultType,
- memberFunction: memberFunction,
- }
- var err error
- for _, opt := range opts {
- overload, err = opt(overload)
- if err != nil {
- return nil, err
- }
- }
- err = f.addOverload(overload)
- if err != nil {
- return nil, err
- }
- return f, nil
- }
-}
-
-func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
- if t.IsAssignableType(NullType) {
- return decls.NewWrapperType(pbType)
- }
- return pbType
-}
-
-// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
-func TypeToExprType(t *Type) (*exprpb.Type, error) {
- switch t.kind {
- case AnyKind:
- return decls.Any, nil
- case BoolKind:
- return maybeWrapper(t, decls.Bool), nil
- case BytesKind:
- return maybeWrapper(t, decls.Bytes), nil
- case DoubleKind:
- return maybeWrapper(t, decls.Double), nil
- case DurationKind:
- return decls.Duration, nil
- case DynKind:
- return decls.Dyn, nil
- case IntKind:
- return maybeWrapper(t, decls.Int), nil
- case ListKind:
- et, err := TypeToExprType(t.parameters[0])
- if err != nil {
- return nil, err
- }
- return decls.NewListType(et), nil
- case MapKind:
- kt, err := TypeToExprType(t.parameters[0])
- if err != nil {
- return nil, err
- }
- vt, err := TypeToExprType(t.parameters[1])
- if err != nil {
- return nil, err
- }
- return decls.NewMapType(kt, vt), nil
- case NullTypeKind:
- return decls.Null, nil
- case OpaqueKind:
- params := make([]*exprpb.Type, len(t.parameters))
- for i, p := range t.parameters {
- pt, err := TypeToExprType(p)
- if err != nil {
- return nil, err
- }
- params[i] = pt
- }
- return decls.NewAbstractType(t.runtimeType.TypeName(), params...), nil
- case StringKind:
- return maybeWrapper(t, decls.String), nil
- case StructKind:
- switch t.runtimeType.TypeName() {
- case "google.protobuf.Any":
- return decls.Any, nil
- case "google.protobuf.Duration":
- return decls.Duration, nil
- case "google.protobuf.Timestamp":
- return decls.Timestamp, nil
- case "google.protobuf.Value":
- return decls.Dyn, nil
- case "google.protobuf.ListValue":
- return decls.NewListType(decls.Dyn), nil
- case "google.protobuf.Struct":
- return decls.NewMapType(decls.String, decls.Dyn), nil
- case "google.protobuf.BoolValue":
- return decls.NewWrapperType(decls.Bool), nil
- case "google.protobuf.BytesValue":
- return decls.NewWrapperType(decls.Bytes), nil
- case "google.protobuf.DoubleValue", "google.protobuf.FloatValue":
- return decls.NewWrapperType(decls.Double), nil
- case "google.protobuf.Int32Value", "google.protobuf.Int64Value":
- return decls.NewWrapperType(decls.Int), nil
- case "google.protobuf.StringValue":
- return decls.NewWrapperType(decls.String), nil
- case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value":
- return decls.NewWrapperType(decls.Uint), nil
- default:
- return decls.NewObjectType(t.runtimeType.TypeName()), nil
- }
- case TimestampKind:
- return decls.Timestamp, nil
- case TypeParamKind:
- return decls.NewTypeParamType(t.runtimeType.TypeName()), nil
- case TypeKind:
- return decls.NewTypeType(decls.Dyn), nil
- case UintKind:
- return maybeWrapper(t, decls.Uint), nil
- }
- return nil, fmt.Errorf("missing type conversion to proto: %v", t)
-}
-
-// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
-func ExprTypeToType(t *exprpb.Type) (*Type, error) {
- switch t.GetTypeKind().(type) {
- case *exprpb.Type_Dyn:
- return DynType, nil
- case *exprpb.Type_AbstractType_:
- paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
- for i, p := range t.GetAbstractType().GetParameterTypes() {
- pt, err := ExprTypeToType(p)
- if err != nil {
- return nil, err
- }
- paramTypes[i] = pt
- }
- return OpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
- case *exprpb.Type_ListType_:
- et, err := ExprTypeToType(t.GetListType().GetElemType())
- if err != nil {
- return nil, err
- }
- return ListType(et), nil
- case *exprpb.Type_MapType_:
- kt, err := ExprTypeToType(t.GetMapType().GetKeyType())
- if err != nil {
- return nil, err
- }
- vt, err := ExprTypeToType(t.GetMapType().GetValueType())
- if err != nil {
- return nil, err
- }
- return MapType(kt, vt), nil
- case *exprpb.Type_MessageType:
- switch t.GetMessageType() {
- case "google.protobuf.Any":
- return AnyType, nil
- case "google.protobuf.Duration":
- return DurationType, nil
- case "google.protobuf.Timestamp":
- return TimestampType, nil
- case "google.protobuf.Value":
- return DynType, nil
- case "google.protobuf.ListValue":
- return ListType(DynType), nil
- case "google.protobuf.Struct":
- return MapType(StringType, DynType), nil
- case "google.protobuf.BoolValue":
- return NullableType(BoolType), nil
- case "google.protobuf.BytesValue":
- return NullableType(BytesType), nil
- case "google.protobuf.DoubleValue", "google.protobuf.FloatValue":
- return NullableType(DoubleType), nil
- case "google.protobuf.Int32Value", "google.protobuf.Int64Value":
- return NullableType(IntType), nil
- case "google.protobuf.StringValue":
- return NullableType(StringType), nil
- case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value":
- return NullableType(UintType), nil
- default:
- return ObjectType(t.GetMessageType()), nil
- }
- case *exprpb.Type_Null:
- return NullType, nil
- case *exprpb.Type_Primitive:
- switch t.GetPrimitive() {
- case exprpb.Type_BOOL:
- return BoolType, nil
- case exprpb.Type_BYTES:
- return BytesType, nil
- case exprpb.Type_DOUBLE:
- return DoubleType, nil
- case exprpb.Type_INT64:
- return IntType, nil
- case exprpb.Type_STRING:
- return StringType, nil
- case exprpb.Type_UINT64:
- return UintType, nil
- default:
- return nil, fmt.Errorf("unsupported primitive type: %v", t)
- }
- case *exprpb.Type_TypeParam:
- return TypeParamType(t.GetTypeParam()), nil
- case *exprpb.Type_Type:
- return TypeType, nil
- case *exprpb.Type_WellKnown:
- switch t.GetWellKnown() {
- case exprpb.Type_ANY:
- return AnyType, nil
- case exprpb.Type_DURATION:
- return DurationType, nil
- case exprpb.Type_TIMESTAMP:
- return TimestampType, nil
- default:
- return nil, fmt.Errorf("unsupported well-known type: %v", t)
- }
- case *exprpb.Type_Wrapper:
- t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}})
- if err != nil {
- return nil, err
- }
- return NullableType(t), nil
- default:
- return nil, fmt.Errorf("unsupported type: %v", t)
- }
-}
-
-// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function.
-func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
- switch d.GetDeclKind().(type) {
- case *exprpb.Decl_Function:
- overloads := d.GetFunction().GetOverloads()
- opts := make([]FunctionOpt, len(overloads))
- for i, o := range overloads {
- args := make([]*Type, len(o.GetParams()))
- for j, p := range o.GetParams() {
- a, err := ExprTypeToType(p)
- if err != nil {
- return nil, err
- }
- args[j] = a
- }
- res, err := ExprTypeToType(o.GetResultType())
- if err != nil {
- return nil, err
- }
- opts[i] = Overload(o.GetOverloadId(), args, res)
- }
- return Function(d.GetName(), opts...), nil
- case *exprpb.Decl_Ident:
- t, err := ExprTypeToType(d.GetIdent().GetType())
- if err != nil {
- return nil, err
- }
- return Variable(d.GetName(), t), nil
- default:
- return nil, fmt.Errorf("unsupported decl: %v", d)
- }
-
-}
-
-func functionDeclToExprDecl(f *functionDecl) (*exprpb.Decl, error) {
- overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
- i := 0
- for _, o := range f.overloads {
- paramNames := map[string]struct{}{}
- argTypes := make([]*exprpb.Type, len(o.argTypes))
- for j, a := range o.argTypes {
- collectParamNames(paramNames, a)
- at, err := TypeToExprType(a)
- if err != nil {
- return nil, err
- }
- argTypes[j] = at
- }
- collectParamNames(paramNames, o.resultType)
- resultType, err := TypeToExprType(o.resultType)
- if err != nil {
- return nil, err
- }
- if len(paramNames) == 0 {
- if o.memberFunction {
- overloads[i] = decls.NewInstanceOverload(o.id, argTypes, resultType)
- } else {
- overloads[i] = decls.NewOverload(o.id, argTypes, resultType)
- }
- } else {
- params := []string{}
- for pn := range paramNames {
- params = append(params, pn)
- }
- if o.memberFunction {
- overloads[i] = decls.NewParameterizedInstanceOverload(o.id, argTypes, resultType, params)
- } else {
- overloads[i] = decls.NewParameterizedOverload(o.id, argTypes, resultType, params)
- }
- }
- i++
- }
- return decls.NewFunction(f.name, overloads...), nil
-}
-
-func collectParamNames(paramNames map[string]struct{}, arg *Type) {
- if arg.kind == TypeParamKind {
- paramNames[arg.runtimeType.TypeName()] = struct{}{}
- }
- for _, param := range arg.parameters {
- collectParamNames(paramNames, param)
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/cel/env.go b/etcd/vendor/github.com/google/cel-go/cel/env.go
deleted file mode 100644
index 4e9ecdd648..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/env.go
+++ /dev/null
@@ -1,613 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "github.com/google/cel-go/checker"
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/interpreter"
- "github.com/google/cel-go/parser"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Source interface representing a user-provided expression.
-type Source = common.Source
-
-// Ast representing the checked or unchecked expression, its source, and related metadata such as
-// source position information.
-type Ast struct {
- expr *exprpb.Expr
- info *exprpb.SourceInfo
- source Source
- refMap map[int64]*exprpb.Reference
- typeMap map[int64]*exprpb.Type
-}
-
-// Expr returns the proto serializable instance of the parsed/checked expression.
-func (ast *Ast) Expr() *exprpb.Expr {
- return ast.expr
-}
-
-// IsChecked returns whether the Ast value has been successfully type-checked.
-func (ast *Ast) IsChecked() bool {
- return ast.typeMap != nil && len(ast.typeMap) > 0
-}
-
-// SourceInfo returns character offset and newline position information about expression elements.
-func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
- return ast.info
-}
-
-// ResultType returns the output type of the expression if the Ast has been type-checked, else
-// returns decls.Dyn as the parse step cannot infer the type.
-//
-// Deprecated: use OutputType
-func (ast *Ast) ResultType() *exprpb.Type {
- if !ast.IsChecked() {
- return decls.Dyn
- }
- return ast.typeMap[ast.expr.GetId()]
-}
-
-// OutputType returns the output type of the expression if the Ast has been type-checked, else
-// returns cel.DynType as the parse step cannot infer types.
-func (ast *Ast) OutputType() *Type {
- t, err := ExprTypeToType(ast.ResultType())
- if err != nil {
- return DynType
- }
- return t
-}
-
-// Source returns a view of the input used to create the Ast. This source may be complete or
-// constructed from the SourceInfo.
-func (ast *Ast) Source() Source {
- return ast.source
-}
-
-// FormatType converts a type message into a string representation.
-func FormatType(t *exprpb.Type) string {
- return checker.FormatCheckedType(t)
-}
-
-// Env encapsulates the context necessary to perform parsing, type checking, or generation of
-// evaluable programs for different expressions.
-type Env struct {
- Container *containers.Container
- functions map[string]*functionDecl
- declarations []*exprpb.Decl
- macros []parser.Macro
- adapter ref.TypeAdapter
- provider ref.TypeProvider
- features map[int]bool
- appliedFeatures map[int]bool
-
- // Internal parser representation
- prsr *parser.Parser
-
- // Internal checker representation
- chk *checker.Env
- chkErr error
- chkOnce sync.Once
- chkOpts []checker.Option
-
- // Program options tied to the environment
- progOpts []ProgramOption
-}
-
-// NewEnv creates a program environment configured with the standard library of CEL functions and
-// macros. The Env value returned can parse and check any CEL program which builds upon the core
-// features documented in the CEL specification.
-//
-// See the EnvOption helper functions for the options that can be used to configure the
-// environment.
-func NewEnv(opts ...EnvOption) (*Env, error) {
- // Extend the statically configured standard environment, disabling eager validation to ensure
- // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier
- // releases. The user provided options can easily re-enable the eager validation as they are
- // processed after this default option.
- stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...)
- env, err := getStdEnv()
- if err != nil {
- return nil, err
- }
- return env.Extend(stdOpts...)
-}
-
-// NewCustomEnv creates a custom program environment which is not automatically configured with the
-// standard library of functions and macros documented in the CEL spec.
-//
-// The purpose for using a custom environment might be for subsetting the standard library produced
-// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
-// limit the compute and memory impact of a CEL program by controlling the functions and macros
-// that may appear in a given expression.
-//
-// See the EnvOption helper functions for the options that can be used to configure the
-// environment.
-func NewCustomEnv(opts ...EnvOption) (*Env, error) {
- registry, err := types.NewRegistry()
- if err != nil {
- return nil, err
- }
- return (&Env{
- declarations: []*exprpb.Decl{},
- functions: map[string]*functionDecl{},
- macros: []parser.Macro{},
- Container: containers.DefaultContainer,
- adapter: registry,
- provider: registry,
- features: map[int]bool{},
- appliedFeatures: map[int]bool{},
- progOpts: []ProgramOption{},
- }).configure(opts)
-}
-
-// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
-//
-// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil.
-// Issues should be inspected if they are non-nil, but may not represent a fatal error.
-//
-// It is possible to have both non-nil Ast and Issues values returned from this call: however,
-// the mere presence of an Ast does not imply that it is valid for use.
-func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
- // Note, errors aren't currently possible on the Ast to ParsedExpr conversion.
- pe, _ := AstToParsedExpr(ast)
-
- // Construct the internal checker env, erroring if there is an issue adding the declarations.
- err := e.initChecker()
- if err != nil {
- errs := common.NewErrors(ast.Source())
- errs.ReportError(common.NoLocation, e.chkErr.Error())
- return nil, NewIssues(errs)
- }
-
- res, errs := checker.Check(pe, ast.Source(), e.chk)
- if len(errs.GetErrors()) > 0 {
- return nil, NewIssues(errs)
- }
- // Manually create the Ast to ensure that the Ast source information (which may be more
- // detailed than the information provided by Check), is returned to the caller.
- return &Ast{
- source: ast.Source(),
- expr: res.GetExpr(),
- info: res.GetSourceInfo(),
- refMap: res.GetReferenceMap(),
- typeMap: res.GetTypeMap()}, nil
-}
-
-// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
-// associated issues.
-//
-// If an error is encountered during parsing the Compile step will not continue with the Check
-// phase. If non-error issues are encountered during Parse, they may be combined with any issues
-// discovered during Check.
-//
-// Note, for parse-only uses of CEL use Parse.
-func (e *Env) Compile(txt string) (*Ast, *Issues) {
- return e.CompileSource(common.NewTextSource(txt))
-}
-
-// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
-// associated issues.
-//
-// If an error is encountered during parsing the CompileSource step will not continue with the
-// Check phase. If non-error issues are encountered during Parse, they may be combined with any
-// issues discovered during Check.
-//
-// Note, for parse-only uses of CEL use Parse.
-func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
- ast, iss := e.ParseSource(src)
- if iss.Err() != nil {
- return nil, iss
- }
- checked, iss2 := e.Check(ast)
- if iss2.Err() != nil {
- return nil, iss2
- }
- return checked, iss2
-}
-
-// Extend the current environment with additional options to produce a new Env.
-//
-// Note, the extended Env value should not share memory with the original. It is possible, however,
-// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
-// To ensure separation of state between extended environments either make sure the TypeAdapter and
-// TypeProvider are immutable, or that their underlying implementations are based on the
-// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
-func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
- if e.chkErr != nil {
- return nil, e.chkErr
- }
-
- // The type-checker is configured with Declarations. The declarations may either be provided
- // as options which have not yet been validated, or may come from a previous checker instance
- // whose types have already been validated.
- chkOptsCopy := make([]checker.Option, len(e.chkOpts))
- copy(chkOptsCopy, e.chkOpts)
-
- // Copy the declarations if needed.
- decsCopy := []*exprpb.Decl{}
- if e.chk != nil {
- // If the type-checker has already been instantiated, then the e.declarations have been
- // valdiated within the chk instance.
- chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk))
- } else {
- // If the type-checker has not been instantiated, ensure the unvalidated declarations are
- // provided to the extended Env instance.
- decsCopy = make([]*exprpb.Decl, len(e.declarations))
- copy(decsCopy, e.declarations)
- }
-
- // Copy macros and program options
- macsCopy := make([]parser.Macro, len(e.macros))
- progOptsCopy := make([]ProgramOption, len(e.progOpts))
- copy(macsCopy, e.macros)
- copy(progOptsCopy, e.progOpts)
-
- // Copy the adapter / provider if they appear to be mutable.
- adapter := e.adapter
- provider := e.provider
- adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry)
- providerReg, isProviderReg := e.provider.(ref.TypeRegistry)
- // In most cases the provider and adapter will be a ref.TypeRegistry;
- // however, in the rare cases where they are not, they are assumed to
- // be immutable. Since it is possible to set the TypeProvider separately
- // from the TypeAdapter, the possible configurations which could use a
- // TypeRegistry as the base implementation are captured below.
- if isAdapterReg && isProviderReg {
- reg := providerReg.Copy()
- provider = reg
- // If the adapter and provider are the same object, set the adapter
- // to the same ref.TypeRegistry as the provider.
- if adapterReg == providerReg {
- adapter = reg
- } else {
- // Otherwise, make a copy of the adapter.
- adapter = adapterReg.Copy()
- }
- } else if isProviderReg {
- provider = providerReg.Copy()
- } else if isAdapterReg {
- adapter = adapterReg.Copy()
- }
-
- featuresCopy := make(map[int]bool, len(e.features))
- for k, v := range e.features {
- featuresCopy[k] = v
- }
- appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures))
- for k, v := range e.appliedFeatures {
- appliedFeaturesCopy[k] = v
- }
- funcsCopy := make(map[string]*functionDecl, len(e.functions))
- for k, v := range e.functions {
- funcsCopy[k] = v
- }
-
- // TODO: functions copy needs to happen here.
- ext := &Env{
- Container: e.Container,
- declarations: decsCopy,
- functions: funcsCopy,
- macros: macsCopy,
- progOpts: progOptsCopy,
- adapter: adapter,
- features: featuresCopy,
- appliedFeatures: appliedFeaturesCopy,
- provider: provider,
- chkOpts: chkOptsCopy,
- }
- return ext.configure(opts)
-}
-
-// HasFeature checks whether the environment enables the given feature
-// flag, as enumerated in options.go.
-func (e *Env) HasFeature(flag int) bool {
- enabled, has := e.features[flag]
- return has && enabled
-}
-
-// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
-//
-// This form of Parse creates a Source value for the input `txt` and forwards to the
-// ParseSource method.
-func (e *Env) Parse(txt string) (*Ast, *Issues) {
- src := common.NewTextSource(txt)
- return e.ParseSource(src)
-}
-
-// ParseSource parses the input source to an Ast and/or set of Issues.
-//
-// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
-// Issues should be inspected if they are non-nil, but may not represent a fatal error.
-//
-// It is possible to have both non-nil Ast and Issues values returned from this call; however,
-// the mere presence of an Ast does not imply that it is valid for use.
-func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
- res, errs := e.prsr.Parse(src)
- if len(errs.GetErrors()) > 0 {
- return nil, &Issues{errs: errs}
- }
- // Manually create the Ast to ensure that the text source information is propagated on
- // subsequent calls to Check.
- return &Ast{
- source: src,
- expr: res.GetExpr(),
- info: res.GetSourceInfo()}, nil
-}
-
-// Program generates an evaluable instance of the Ast within the environment (Env).
-func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
- optSet := e.progOpts
- if len(opts) != 0 {
- mergedOpts := []ProgramOption{}
- mergedOpts = append(mergedOpts, e.progOpts...)
- mergedOpts = append(mergedOpts, opts...)
- optSet = mergedOpts
- }
- return newProgram(e, ast, optSet)
-}
-
-// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
-func (e *Env) TypeAdapter() ref.TypeAdapter {
- return e.adapter
-}
-
-// TypeProvider returns the `ref.TypeProvider` configured for the environment.
-func (e *Env) TypeProvider() ref.TypeProvider {
- return e.provider
-}
-
-// UnknownVars returns an interpreter.PartialActivation which marks all variables
-// declared in the Env as unknown AttributePattern values.
-//
-// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation
-// unless the PartialAttributes option is provided as a ProgramOption.
-func (e *Env) UnknownVars() interpreter.PartialActivation {
- var unknownPatterns []*interpreter.AttributePattern
- for _, d := range e.declarations {
- switch d.GetDeclKind().(type) {
- case *exprpb.Decl_Ident:
- unknownPatterns = append(unknownPatterns,
- interpreter.NewAttributePattern(d.GetName()))
- }
- }
- part, _ := PartialVars(
- interpreter.EmptyActivation(),
- unknownPatterns...)
- return part
-}
-
-// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
-// attribute references which are unknown.
-//
-// Residual expressions are beneficial in a few scenarios:
-//
-// - Optimizing constant expression evaluations away.
-// - Indexing and pruning expressions based on known input arguments.
-// - Surfacing additional requirements that are needed in order to complete an evaluation.
-// - Sharing the evaluation of an expression across multiple machines/nodes.
-//
-// For example, if an expression targets a 'resource' and 'request' attribute and the possible
-// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
-// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
-// of the expression that reference the 'request'.
-//
-// Note, the expression ids within the residual AST generated through this method have no
-// correlation to the expression ids of the original AST.
-//
-// See the PartialVars helper for how to construct a PartialActivation.
-//
-// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
-// Ast format and then Program again.
-func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
- pruned := interpreter.PruneAst(a.Expr(), details.State())
- expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
- if err != nil {
- return nil, err
- }
- parsed, iss := e.Parse(expr)
- if iss != nil && iss.Err() != nil {
- return nil, iss.Err()
- }
- if !a.IsChecked() {
- return parsed, nil
- }
- checked, iss := e.Check(parsed)
- if iss != nil && iss.Err() != nil {
- return nil, iss.Err()
- }
- return checked, nil
-}
-
-// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
-// extension functions provided by estimator.
-func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator) (checker.CostEstimate, error) {
- checked, err := AstToCheckedExpr(ast)
- if err != nil {
- return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err)
- }
- return checker.Cost(checked, estimator), nil
-}
-
-// configure applies a series of EnvOptions to the current environment.
-func (e *Env) configure(opts []EnvOption) (*Env, error) {
- // Customized the environment using the provided EnvOption values. If an error is
- // generated at any step this, will be returned as a nil Env with a non-nil error.
- var err error
- for _, opt := range opts {
- e, err = opt(e)
- if err != nil {
- return nil, err
- }
- }
-
- // If the default UTC timezone fix has been enabled, make sure the library is configured
- if e.HasFeature(featureDefaultUTCTimeZone) {
- if _, found := e.appliedFeatures[featureDefaultUTCTimeZone]; !found {
- e, err = Lib(timeUTCLibrary{})(e)
- if err != nil {
- return nil, err
- }
- // record that the feature has been applied since it will generate declarations
- // and functions which will be propagated on Extend() calls and which should only
- // be registered once.
- e.appliedFeatures[featureDefaultUTCTimeZone] = true
- }
- }
-
- // Initialize all of the functions configured within the environment.
- for _, fn := range e.functions {
- err = fn.init()
- if err != nil {
- return nil, err
- }
- }
-
- // Configure the parser.
- prsrOpts := []parser.Option{parser.Macros(e.macros...)}
- if e.HasFeature(featureEnableMacroCallTracking) {
- prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
- }
- e.prsr, err = parser.NewParser(prsrOpts...)
- if err != nil {
- return nil, err
- }
-
- // Ensure that the checker init happens eagerly rather than lazily.
- if e.HasFeature(featureEagerlyValidateDeclarations) {
- err := e.initChecker()
- if err != nil {
- return nil, err
- }
- }
-
- return e, nil
-}
-
-func (e *Env) initChecker() error {
- e.chkOnce.Do(func() {
- chkOpts := []checker.Option{}
- chkOpts = append(chkOpts, e.chkOpts...)
- chkOpts = append(chkOpts,
- checker.HomogeneousAggregateLiterals(
- e.HasFeature(featureDisableDynamicAggregateLiterals)),
- checker.CrossTypeNumericComparisons(
- e.HasFeature(featureCrossTypeNumericComparisons)))
-
- ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
- if err != nil {
- e.chkErr = err
- return
- }
- // Add the statically configured declarations.
- err = ce.Add(e.declarations...)
- if err != nil {
- e.chkErr = err
- return
- }
- // Add the function declarations which are derived from the FunctionDecl instances.
- for _, fn := range e.functions {
- fnDecl, err := functionDeclToExprDecl(fn)
- if err != nil {
- e.chkErr = err
- return
- }
- err = ce.Add(fnDecl)
- if err != nil {
- e.chkErr = err
- return
- }
- }
- // Add function declarations here separately.
- e.chk = ce
- })
- return e.chkErr
-}
-
-// Issues defines methods for inspecting the error details of parse and check calls.
-//
-// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
-type Issues struct {
- errs *common.Errors
-}
-
-// NewIssues returns an Issues struct from a common.Errors object.
-func NewIssues(errs *common.Errors) *Issues {
- return &Issues{
- errs: errs,
- }
-}
-
-// Err returns an error value if the issues list contains one or more errors.
-func (i *Issues) Err() error {
- if i == nil {
- return nil
- }
- if len(i.Errors()) > 0 {
- return errors.New(i.String())
- }
- return nil
-}
-
-// Errors returns the collection of errors encountered in more granular detail.
-func (i *Issues) Errors() []common.Error {
- if i == nil {
- return []common.Error{}
- }
- return i.errs.GetErrors()
-}
-
-// Append collects the issues from another Issues struct into a new Issues object.
-func (i *Issues) Append(other *Issues) *Issues {
- if i == nil {
- return other
- }
- if other == nil {
- return i
- }
- return NewIssues(i.errs.Append(other.errs.GetErrors()))
-}
-
-// String converts the issues to a suitable display string.
-func (i *Issues) String() string {
- if i == nil {
- return ""
- }
- return i.errs.ToDisplayString()
-}
-
-// getStdEnv lazy initializes the CEL standard environment.
-func getStdEnv() (*Env, error) {
- stdEnvInit.Do(func() {
- stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true))
- })
- return stdEnv, stdEnvErr
-}
-
-var (
- stdEnvInit sync.Once
- stdEnv *Env
- stdEnvErr error
-)
diff --git a/etcd/vendor/github.com/google/cel-go/cel/io.go b/etcd/vendor/github.com/google/cel-go/cel/io.go
deleted file mode 100644
index e721c97f66..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/io.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "github.com/google/cel-go/parser"
-
- "google.golang.org/protobuf/proto"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-// CheckedExprToAst converts a checked expression proto message to an Ast.
-func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
- return CheckedExprToAstWithSource(checkedExpr, nil)
-}
-
-// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
-// using the provided Source as the textual contents.
-//
-// In general the source is not necessary unless the AST has been modified between the
-// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
-// through future calls.
-//
-// Prefer CheckedExprToAst if loading expressions from storage.
-func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast {
- refMap := checkedExpr.GetReferenceMap()
- if refMap == nil {
- refMap = map[int64]*exprpb.Reference{}
- }
- typeMap := checkedExpr.GetTypeMap()
- if typeMap == nil {
- typeMap = map[int64]*exprpb.Type{}
- }
- si := checkedExpr.GetSourceInfo()
- if si == nil {
- si = &exprpb.SourceInfo{}
- }
- if src == nil {
- src = common.NewInfoSource(si)
- }
- return &Ast{
- expr: checkedExpr.GetExpr(),
- info: si,
- source: src,
- refMap: refMap,
- typeMap: typeMap,
- }
-}
-
-// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
-//
-// If the Ast.IsChecked() returns false, this conversion method will return an error.
-func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
- if !a.IsChecked() {
- return nil, fmt.Errorf("cannot convert unchecked ast")
- }
- return &exprpb.CheckedExpr{
- Expr: a.Expr(),
- SourceInfo: a.SourceInfo(),
- ReferenceMap: a.refMap,
- TypeMap: a.typeMap,
- }, nil
-}
-
-// ParsedExprToAst converts a parsed expression proto message to an Ast.
-func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
- return ParsedExprToAstWithSource(parsedExpr, nil)
-}
-
-// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
-// using the provided Source as the textual contents.
-//
-// In general you only need this if you need to recheck a previously checked
-// expression, or if you need to separately check a subset of an expression.
-//
-// Prefer ParsedExprToAst if loading expressions from storage.
-func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
- si := parsedExpr.GetSourceInfo()
- if si == nil {
- si = &exprpb.SourceInfo{}
- }
- if src == nil {
- src = common.NewInfoSource(si)
- }
- return &Ast{
- expr: parsedExpr.GetExpr(),
- info: si,
- source: src,
- }
-}
-
-// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
-func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
- return &exprpb.ParsedExpr{
- Expr: a.Expr(),
- SourceInfo: a.SourceInfo(),
- }, nil
-}
-
-// AstToString converts an Ast back to a string if possible.
-//
-// Note, the conversion may not be an exact replica of the original expression, but will produce
-// a string that is semantically equivalent and whose textual representation is stable.
-func AstToString(a *Ast) (string, error) {
- expr := a.Expr()
- info := a.SourceInfo()
- return parser.Unparse(expr, info)
-}
-
-// RefValueToValue converts between ref.Val and api.expr.Value.
-// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
-func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
- switch res.Type() {
- case types.BoolType:
- return &exprpb.Value{
- Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
- case types.BytesType:
- return &exprpb.Value{
- Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
- case types.DoubleType:
- return &exprpb.Value{
- Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
- case types.IntType:
- return &exprpb.Value{
- Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
- case types.ListType:
- l := res.(traits.Lister)
- sz := l.Size().(types.Int)
- elts := make([]*exprpb.Value, 0, int64(sz))
- for i := types.Int(0); i < sz; i++ {
- v, err := RefValueToValue(l.Get(i))
- if err != nil {
- return nil, err
- }
- elts = append(elts, v)
- }
- return &exprpb.Value{
- Kind: &exprpb.Value_ListValue{
- ListValue: &exprpb.ListValue{Values: elts}}}, nil
- case types.MapType:
- mapper := res.(traits.Mapper)
- sz := mapper.Size().(types.Int)
- entries := make([]*exprpb.MapValue_Entry, 0, int64(sz))
- for it := mapper.Iterator(); it.HasNext().(types.Bool); {
- k := it.Next()
- v := mapper.Get(k)
- kv, err := RefValueToValue(k)
- if err != nil {
- return nil, err
- }
- vv, err := RefValueToValue(v)
- if err != nil {
- return nil, err
- }
- entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv})
- }
- return &exprpb.Value{
- Kind: &exprpb.Value_MapValue{
- MapValue: &exprpb.MapValue{Entries: entries}}}, nil
- case types.NullType:
- return &exprpb.Value{
- Kind: &exprpb.Value_NullValue{}}, nil
- case types.StringType:
- return &exprpb.Value{
- Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
- case types.TypeType:
- typeName := res.(ref.Type).TypeName()
- return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil
- case types.UintType:
- return &exprpb.Value{
- Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
- default:
- any, err := res.ConvertToNative(anyPbType)
- if err != nil {
- return nil, err
- }
- return &exprpb.Value{
- Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
- }
-}
-
-var (
- typeNameToTypeValue = map[string]*types.TypeValue{
- "bool": types.BoolType,
- "bytes": types.BytesType,
- "double": types.DoubleType,
- "null_type": types.NullType,
- "int": types.IntType,
- "list": types.ListType,
- "map": types.MapType,
- "string": types.StringType,
- "type": types.TypeType,
- "uint": types.UintType,
- }
-
- anyPbType = reflect.TypeOf(&anypb.Any{})
-)
-
-// ValueToRefValue converts between exprpb.Value and ref.Val.
-func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) {
- switch v.Kind.(type) {
- case *exprpb.Value_NullValue:
- return types.NullValue, nil
- case *exprpb.Value_BoolValue:
- return types.Bool(v.GetBoolValue()), nil
- case *exprpb.Value_Int64Value:
- return types.Int(v.GetInt64Value()), nil
- case *exprpb.Value_Uint64Value:
- return types.Uint(v.GetUint64Value()), nil
- case *exprpb.Value_DoubleValue:
- return types.Double(v.GetDoubleValue()), nil
- case *exprpb.Value_StringValue:
- return types.String(v.GetStringValue()), nil
- case *exprpb.Value_BytesValue:
- return types.Bytes(v.GetBytesValue()), nil
- case *exprpb.Value_ObjectValue:
- any := v.GetObjectValue()
- msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
- if err != nil {
- return nil, err
- }
- return adapter.NativeToValue(msg), nil
- case *exprpb.Value_MapValue:
- m := v.GetMapValue()
- entries := make(map[ref.Val]ref.Val)
- for _, entry := range m.Entries {
- key, err := ValueToRefValue(adapter, entry.Key)
- if err != nil {
- return nil, err
- }
- pb, err := ValueToRefValue(adapter, entry.Value)
- if err != nil {
- return nil, err
- }
- entries[key] = pb
- }
- return adapter.NativeToValue(entries), nil
- case *exprpb.Value_ListValue:
- l := v.GetListValue()
- elts := make([]ref.Val, len(l.Values))
- for i, e := range l.Values {
- rv, err := ValueToRefValue(adapter, e)
- if err != nil {
- return nil, err
- }
- elts[i] = rv
- }
- return adapter.NativeToValue(elts), nil
- case *exprpb.Value_TypeValue:
- typeName := v.GetTypeValue()
- tv, ok := typeNameToTypeValue[typeName]
- if ok {
- return tv, nil
- }
- return types.NewObjectTypeValue(typeName), nil
- }
- return nil, errors.New("unknown value")
-}
diff --git a/etcd/vendor/github.com/google/cel-go/cel/library.go b/etcd/vendor/github.com/google/cel-go/cel/library.go
deleted file mode 100644
index 5ca528459a..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/library.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "strconv"
- "strings"
- "time"
-
- "github.com/google/cel-go/checker"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/interpreter/functions"
-)
-
-// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
-// environment for a particular use case or with a related set of functionality.
-//
-// Note, the ProgramOption values provided by a library are expected to be static and not vary
-// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
-// configure these options outside the Library and within the Env.Program() call directly.
-type Library interface {
- // CompileOptions returns a collection of functional options for configuring the Parse / Check
- // environment.
- CompileOptions() []EnvOption
-
- // ProgramOptions returns a collection of functional options which should be included in every
- // Program generated from the Env.Program() call.
- ProgramOptions() []ProgramOption
-}
-
-// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
-// and to be linked to each other.
-func Lib(l Library) EnvOption {
- return func(e *Env) (*Env, error) {
- var err error
- for _, opt := range l.CompileOptions() {
- e, err = opt(e)
- if err != nil {
- return nil, err
- }
- }
- e.progOpts = append(e.progOpts, l.ProgramOptions()...)
- return e, nil
- }
-}
-
-// StdLib returns an EnvOption for the standard library of CEL functions and macros.
-func StdLib() EnvOption {
- return Lib(stdLibrary{})
-}
-
-// stdLibrary implements the Library interface and provides functional options for the core CEL
-// features documented in the specification.
-type stdLibrary struct{}
-
-// EnvOptions returns options for the standard CEL function declarations and macros.
-func (stdLibrary) CompileOptions() []EnvOption {
- return []EnvOption{
- Declarations(checker.StandardDeclarations()...),
- Macros(StandardMacros...),
- }
-}
-
-// ProgramOptions returns function implementations for the standard CEL functions.
-func (stdLibrary) ProgramOptions() []ProgramOption {
- return []ProgramOption{
- Functions(functions.StandardOverloads()...),
- }
-}
-
-type timeUTCLibrary struct{}
-
-func (timeUTCLibrary) CompileOptions() []EnvOption {
- return timeOverloadDeclarations
-}
-
-func (timeUTCLibrary) ProgramOptions() []ProgramOption {
- return []ProgramOption{}
-}
-
-// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
-// in the CEL expression.
-var (
- utcTZ = types.String("UTC")
-
- timeOverloadDeclarations = []EnvOption{
- Function(overloads.TimeGetHours,
- MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
- UnaryBinding(func(dur ref.Val) ref.Val {
- d := dur.(types.Duration)
- return types.Int(d.Hours())
- }))),
- Function(overloads.TimeGetMinutes,
- MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
- UnaryBinding(func(dur ref.Val) ref.Val {
- d := dur.(types.Duration)
- return types.Int(d.Minutes())
- }))),
- Function(overloads.TimeGetSeconds,
- MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
- UnaryBinding(func(dur ref.Val) ref.Val {
- d := dur.(types.Duration)
- return types.Int(d.Seconds())
- }))),
- Function(overloads.TimeGetMilliseconds,
- MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
- UnaryBinding(func(dur ref.Val) ref.Val {
- d := dur.(types.Duration)
- return types.Int(d.Milliseconds())
- }))),
- Function(overloads.TimeGetFullYear,
- MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetFullYear(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetFullYear),
- ),
- ),
- Function(overloads.TimeGetMonth,
- MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMonth(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMonth),
- ),
- ),
- Function(overloads.TimeGetDayOfYear,
- MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfYear(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(func(ts, tz ref.Val) ref.Val {
- return timestampGetDayOfYear(ts, tz)
- }),
- ),
- ),
- Function(overloads.TimeGetDayOfMonth,
- MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfMonthZeroBased(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfMonthZeroBased),
- ),
- ),
- Function(overloads.TimeGetDate,
- MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfMonthOneBased(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfMonthOneBased),
- ),
- ),
- Function(overloads.TimeGetDayOfWeek,
- MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetDayOfWeek(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetDayOfWeek),
- ),
- ),
- Function(overloads.TimeGetHours,
- MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetHours(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetHours),
- ),
- ),
- Function(overloads.TimeGetMinutes,
- MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMinutes(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMinutes),
- ),
- ),
- Function(overloads.TimeGetSeconds,
- MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetSeconds(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetSeconds),
- ),
- ),
- Function(overloads.TimeGetMilliseconds,
- MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
- UnaryBinding(func(ts ref.Val) ref.Val {
- return timestampGetMilliseconds(ts, utcTZ)
- }),
- ),
- MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
- BinaryBinding(timestampGetMilliseconds),
- ),
- ),
- }
-)
-
-func timestampGetFullYear(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Year())
-}
-
-func timestampGetMonth(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- // CEL spec indicates that the month should be 0-based, but the Time value
- // for Month() is 1-based.
- return types.Int(t.Month() - 1)
-}
-
-func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.YearDay() - 1)
-}
-
-func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Day() - 1)
-}
-
-func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Day())
-}
-
-func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Weekday())
-}
-
-func timestampGetHours(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Hour())
-}
-
-func timestampGetMinutes(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Minute())
-}
-
-func timestampGetSeconds(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Second())
-}
-
-func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
- t, err := inTimeZone(ts, tz)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(t.Nanosecond() / 1000000)
-}
-
-func inTimeZone(ts, tz ref.Val) (time.Time, error) {
- t := ts.(types.Timestamp)
- val := string(tz.(types.String))
- ind := strings.Index(val, ":")
- if ind == -1 {
- loc, err := time.LoadLocation(val)
- if err != nil {
- return time.Time{}, err
- }
- return t.In(loc), nil
- }
-
- // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
- // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
- hr, err := strconv.Atoi(string(val[0:ind]))
- if err != nil {
- return time.Time{}, err
- }
- min, err := strconv.Atoi(string(val[ind+1:]))
- if err != nil {
- return time.Time{}, err
- }
- var offset int
- if string(val[0]) == "-" {
- offset = hr*60 - min
- } else {
- offset = hr*60 + min
- }
- secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
- timezone := time.FixedZone("", secondsEastOfUTC)
- return t.In(timezone), nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/cel/macro.go b/etcd/vendor/github.com/google/cel-go/cel/macro.go
deleted file mode 100644
index e43cb4eeea..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/macro.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/parser"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Macro describes a function signature to match and the MacroExpander to apply.
-//
-// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
-// a Macro should be created per arg-count or as a var arg macro.
-type Macro = parser.Macro
-
-// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
-// if the input arguments are not suitable for the expansion requirements for the macro in question.
-//
-// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
-// and produces as output an Expr ast node.
-//
-// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
-type MacroExpander = parser.MacroExpander
-
-// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
-type MacroExprHelper = parser.ExprHelper
-
-// NewGlobalMacro creates a Macro for a global function with the specified arg count.
-func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
- return parser.NewGlobalMacro(function, argCount, expander)
-}
-
-// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
-func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
- return parser.NewReceiverMacro(function, argCount, expander)
-}
-
-// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
-func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
- return parser.NewGlobalVarArgMacro(function, expander)
-}
-
-// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
-func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
- return parser.NewReceiverVarArgMacro(function, expander)
-}
-
-// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
-func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return parser.MakeHas(meh, target, args)
-}
-
-// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
-// elements in the range match the predicate expressions:
-// .exists(, )
-func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return parser.MakeExists(meh, target, args)
-}
-
-// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
-// one of the elements in the range match the predicate expressions:
-// .exists_one(, )
-func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return parser.MakeExistsOne(meh, target, args)
-}
-
-// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
-// input to produce an output list.
-//
-// There are two call patterns supported by map:
-// .map(, )
-// .map(, , )
-// In the second form only iterVar values which return true when provided to the predicate expression
-// are transformed.
-func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return parser.MakeMap(meh, target, args)
-}
-
-// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
-// only elements which match the provided predicate expression:
-// .filter(, )
-func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return parser.MakeFilter(meh, target, args)
-}
-
-var (
- // Aliases to each macro in the CEL standard environment.
- // Note: reassigning these macro variables may result in undefined behavior.
-
- // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
- // specify the field as a string.
- HasMacro = parser.HasMacro
-
- // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
- // elements in the range satisfy the predicate.
- AllMacro = parser.AllMacro
-
- // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
- // some element in the range satisfies the predicate.
- ExistsMacro = parser.ExistsMacro
-
- // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
- // element in range the predicate holds.
- ExistsOneMacro = parser.ExistsOneMacro
-
- // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
- // to each element in the range to produce a new list.
- MapMacro = parser.MapMacro
-
- // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
- // first filters the elements in the range by the predicate, then applies the transform function
- // to produce a new list.
- MapFilterMacro = parser.MapFilterMacro
-
- // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
- // elements in the range, producing a new list from the elements that satisfy the predicate.
- FilterMacro = parser.FilterMacro
-
- // StandardMacros provides an alias to all the CEL macros defined in the standard environment.
- StandardMacros = []Macro{
- HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro,
- }
-
- // NoMacros provides an alias to an empty list of macros
- NoMacros = []Macro{}
-)
diff --git a/etcd/vendor/github.com/google/cel-go/cel/options.go b/etcd/vendor/github.com/google/cel-go/cel/options.go
deleted file mode 100644
index 21c7570106..0000000000
--- a/etcd/vendor/github.com/google/cel-go/cel/options.go
+++ /dev/null
@@ -1,543 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cel
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/types/dynamicpb"
-
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/interpreter"
- "github.com/google/cel-go/interpreter/functions"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- descpb "google.golang.org/protobuf/types/descriptorpb"
-)
-
-// These constants beginning with "Feature" enable optional behavior in
-// the library. See the documentation for each constant to see its
-// effects, compatibility restrictions, and standard conformance.
-const (
- _ = iota
-
- // Disallow heterogeneous aggregate (list, map) literals.
- // Note, it is still possible to have heterogeneous aggregates when
- // provided as variables to the expression, as well as via conversion
- // of well-known dynamic types, or with unchecked expressions.
- // Affects checking. Provides a subset of standard behavior.
- featureDisableDynamicAggregateLiterals
-
- // Enable the tracking of function call expressions replaced by macros.
- featureEnableMacroCallTracking
-
- // Enable the use of cross-type numeric comparisons at the type-checker.
- featureCrossTypeNumericComparisons
-
- // Enable eager validation of declarations to ensure that Env values created
- // with `Extend` inherit a validated list of declarations from the parent Env.
- featureEagerlyValidateDeclarations
-
- // Enable the use of the default UTC timezone when a timezone is not specified
- // on a CEL timestamp operation. This fixes the scenario where the input time
- // is not already in UTC.
- featureDefaultUTCTimeZone
-)
-
-// EnvOption is a functional interface for configuring the environment.
-type EnvOption func(e *Env) (*Env, error)
-
-// ClearMacros options clears all parser macros.
-//
-// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
-// comprehensions such as `all` and `exists` are enabled only via macros.
-func ClearMacros() EnvOption {
- return func(e *Env) (*Env, error) {
- e.macros = NoMacros
- return e, nil
- }
-}
-
-// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one.
-//
-// Note: This option must be specified before the Types and TypeDescs options when used together.
-func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption {
- return func(e *Env) (*Env, error) {
- e.adapter = adapter
- return e, nil
- }
-}
-
-// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one.
-//
-// Note: This option must be specified before the Types and TypeDescs options when used together.
-func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
- return func(e *Env) (*Env, error) {
- e.provider = provider
- return e, nil
- }
-}
-
-// Declarations option extends the declaration set configured in the environment.
-//
-// Note: Declarations will by default be appended to the pre-existing declaration set configured
-// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
-// purely custom set of declarations use NewCustomEnv.
-func Declarations(decls ...*exprpb.Decl) EnvOption {
- return func(e *Env) (*Env, error) {
- e.declarations = append(e.declarations, decls...)
- return e, nil
- }
-}
-
-// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught
-// at the time of the `NewEnv` call.
-//
-// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value.
-// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated
-// as declarations will be collision-checked at most once and only incrementally by way of `Extend`
-//
-// Disabled by default as not all environments are used for type-checking.
-func EagerlyValidateDeclarations(enabled bool) EnvOption {
- return features(featureEagerlyValidateDeclarations, enabled)
-}
-
-// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree
-// during type-checking.
-//
-// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
-// expression, as well as via conversion of well-known dynamic types, or with unchecked
-// expressions.
-func HomogeneousAggregateLiterals() EnvOption {
- return features(featureDisableDynamicAggregateLiterals, true)
-}
-
-// Macros option extends the macro set configured in the environment.
-//
-// Note: This option must be specified after ClearMacros if used together.
-func Macros(macros ...Macro) EnvOption {
- return func(e *Env) (*Env, error) {
- e.macros = append(e.macros, macros...)
- return e, nil
- }
-}
-
-// Container sets the container for resolving variable names. Defaults to an empty container.
-//
-// If all references within an expression are relative to a protocol buffer package, then
-// specifying a container of `google.type` would make it possible to write expressions such as
-// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
-func Container(name string) EnvOption {
- return func(e *Env) (*Env, error) {
- cont, err := e.Container.Extend(containers.Name(name))
- if err != nil {
- return nil, err
- }
- e.Container = cont
- return e, nil
- }
-}
-
-// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
-//
-// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
-// Abbreviations can be useful when working with variables, functions, and especially types from
-// multiple namespaces:
-//
-// // CEL object construction
-// qual.pkg.version.ObjTypeName{
-// field: alt.container.ver.FieldTypeName{value: ...}
-// }
-//
-// Only one the qualified names above may be used as the CEL container, so at least one of these
-// references must be a long qualified name within an otherwise short CEL program. Using the
-// following abbreviations, the program becomes much simpler:
-//
-// // CEL Go option
-// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
-// // Simplified Object construction
-// ObjTypeName{field: FieldTypeName{value: ...}}
-//
-// There are a few rules for the qualified names and the simple abbreviations generated from them:
-// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
-// - The last element in the qualified name is the abbreviation.
-// - Abbreviations must not collide with each other.
-// - The abbreviation must not collide with unqualified names in use.
-//
-// Abbreviations are distinct from container-based references in the following important ways:
-// - Abbreviations must expand to a fully-qualified name.
-// - Expanded abbreviations do not participate in namespace resolution.
-// - Abbreviation expansion is done instead of the container search for a matching identifier.
-// - Containers follow C++ namespace resolution rules with searches from the most qualified name
-// to the least qualified name.
-// - Container references within the CEL program may be relative, and are resolved to fully
-// qualified names at either type-check time or program plan time, whichever comes first.
-//
-// If there is ever a case where an identifier could be in both the container and as an
-// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
-// preserved between compilations even as the container evolves.
-func Abbrevs(qualifiedNames ...string) EnvOption {
- return func(e *Env) (*Env, error) {
- cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
- if err != nil {
- return nil, err
- }
- e.Container = cont
- return e, nil
- }
-}
-
-// Types adds one or more type declarations to the environment, allowing for construction of
-// type-literals whose definitions are included in the common expression built-in set.
-//
-// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
-// provided to this option will result in an error.
-//
-// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
-// environment by default.
-//
-// Note: This option must be specified after the CustomTypeProvider option when used together.
-func Types(addTypes ...interface{}) EnvOption {
- return func(e *Env) (*Env, error) {
- reg, isReg := e.provider.(ref.TypeRegistry)
- if !isReg {
- return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
- }
- for _, t := range addTypes {
- switch v := t.(type) {
- case proto.Message:
- fdMap := pb.CollectFileDescriptorSet(v)
- for _, fd := range fdMap {
- err := reg.RegisterDescriptor(fd)
- if err != nil {
- return nil, err
- }
- }
- case ref.Type:
- err := reg.RegisterType(v)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("unsupported type: %T", t)
- }
- }
- return e, nil
- }
-}
-
-// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
-// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
-//
-// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
-// rather than the concrete message type.
-//
-// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
-// extension or by re-using the same EnvOption with another NewEnv() call.
-func TypeDescs(descs ...interface{}) EnvOption {
- return func(e *Env) (*Env, error) {
- reg, isReg := e.provider.(ref.TypeRegistry)
- if !isReg {
- return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
- }
- // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
- // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
- // and will not resolve properly unless they are part of the same set.
- var fds *descpb.FileDescriptorSet
- for _, d := range descs {
- switch f := d.(type) {
- case *descpb.FileDescriptorProto:
- if fds == nil {
- fds = &descpb.FileDescriptorSet{
- File: []*descpb.FileDescriptorProto{},
- }
- }
- fds.File = append(fds.File, f)
- }
- }
- if fds != nil {
- if err := registerFileSet(reg, fds); err != nil {
- return nil, err
- }
- }
- for _, d := range descs {
- switch f := d.(type) {
- case *protoregistry.Files:
- if err := registerFiles(reg, f); err != nil {
- return nil, err
- }
- case protoreflect.FileDescriptor:
- if err := reg.RegisterDescriptor(f); err != nil {
- return nil, err
- }
- case *descpb.FileDescriptorSet:
- if err := registerFileSet(reg, f); err != nil {
- return nil, err
- }
- case *descpb.FileDescriptorProto:
- // skip, handled as a synthetic file descriptor set.
- default:
- return nil, fmt.Errorf("unsupported type descriptor: %T", d)
- }
- }
- return e, nil
- }
-}
-
-func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error {
- files, err := protodesc.NewFiles(fileSet)
- if err != nil {
- return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
- }
- return registerFiles(reg, files)
-}
-
-func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error {
- var err error
- files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
- err = reg.RegisterDescriptor(fd)
- return err == nil
- })
- return err
-}
-
-// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
-type ProgramOption func(p *prog) (*prog, error)
-
-// CustomDecorator appends an InterpreterDecorator to the program.
-//
-// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
-func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
- return func(p *prog) (*prog, error) {
- p.decorators = append(p.decorators, dec)
- return p, nil
- }
-}
-
-// Functions adds function overloads that extend or override the set of CEL built-ins.
-//
-// Deprecated: use Function() instead to declare the function, its overload signatures,
-// and the overload implementations.
-func Functions(funcs ...*functions.Overload) ProgramOption {
- return func(p *prog) (*prog, error) {
- if err := p.dispatcher.Add(funcs...); err != nil {
- return nil, err
- }
- return p, nil
- }
-}
-
-// Globals sets the global variable values for a given program. These values may be shadowed by
-// variables with the same name provided to the Eval() call. If Globals is used in a Library with
-// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
-//
-// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
-func Globals(vars interface{}) ProgramOption {
- return func(p *prog) (*prog, error) {
- defaultVars, err := interpreter.NewActivation(vars)
- if err != nil {
- return nil, err
- }
- if p.defaultVars != nil {
- defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars)
- }
- p.defaultVars = defaultVars
- return p, nil
- }
-}
-
-// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used
-// to compile regex string constants at program creation time and report any errors and then use the
-// compiled regex for all regex function invocations.
-func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption {
- return func(p *prog) (*prog, error) {
- p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...)
- return p, nil
- }
-}
-
-// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
-// in the output result.
-type EvalOption int
-
-const (
- // OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
- OptTrackState EvalOption = 1 << iota
-
- // OptExhaustiveEval causes the runtime to disable short-circuits and track state.
- OptExhaustiveEval EvalOption = 1< 0 {
- decorators = append(decorators, interpreter.InterruptableEval())
- }
- // Enable constant folding first.
- if p.evalOpts&OptOptimize == OptOptimize {
- decorators = append(decorators, interpreter.Optimize())
- p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
- }
- // Enable regex compilation of constants immediately after folding constants.
- if len(p.regexOptimizations) > 0 {
- decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
- }
-
- // Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
- if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
- factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
- costTracker.Estimator = p.callCostEstimator
- costTracker.Limit = p.costLimit
- // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
- // prevents the underlying memory from being shared between factory function calls causing
- // undesired mutations.
- decs := decorators[:len(decorators):len(decorators)]
- var observers []interpreter.EvalObserver
-
- if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
- // EvalStateObserver is required for OptExhaustiveEval.
- observers = append(observers, interpreter.EvalStateObserver(state))
- }
- if p.evalOpts&OptTrackCost == OptTrackCost {
- observers = append(observers, interpreter.CostObserver(costTracker))
- }
-
- // Enable exhaustive eval over a basic observer since it offers a superset of features.
- if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
- decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
- } else if len(observers) > 0 {
- decs = append(decs, interpreter.Observe(observers...))
- }
-
- return p.clone().initInterpretable(ast, decs)
- }
- return newProgGen(factory)
- }
- return p.initInterpretable(ast, decorators)
-}
-
-func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) {
- // Unchecked programs do not contain type and reference information and may be slower to execute.
- if !ast.IsChecked() {
- interpretable, err :=
- p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...)
- if err != nil {
- return nil, err
- }
- p.interpretable = interpretable
- return p, nil
- }
-
- // When the AST has been checked it contains metadata that can be used to speed up program execution.
- var checked *exprpb.CheckedExpr
- checked, err := AstToCheckedExpr(ast)
- if err != nil {
- return nil, err
- }
- interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
- if err != nil {
- return nil, err
- }
- p.interpretable = interpretable
- return p, nil
-}
-
-// Eval implements the Program interface method.
-func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
- // Configure error recovery for unexpected panics during evaluation. Note, the use of named
- // return values makes it possible to modify the error response during the recovery
- // function.
- defer func() {
- if r := recover(); r != nil {
- switch t := r.(type) {
- case interpreter.EvalCancelledError:
- err = t
- default:
- err = fmt.Errorf("internal error: %v", r)
- }
- }
- }()
- // Build a hierarchical activation if there are default vars set.
- var vars interpreter.Activation
- switch v := input.(type) {
- case interpreter.Activation:
- vars = v
- case map[string]interface{}:
- vars = activationPool.Setup(v)
- defer activationPool.Put(vars)
- default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
- }
- if p.defaultVars != nil {
- vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
- }
- v = p.interpretable.Eval(vars)
- // The output of an internal Eval may have a value (`v`) that is a types.Err. This step
- // translates the CEL value to a Go error response. This interface does not quite match the
- // RPC signature which allows for multiple errors to be returned, but should be sufficient.
- if types.IsError(v) {
- err = v.(*types.Err)
- }
- return
-}
-
-// ContextEval implements the Program interface.
-func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
- if ctx == nil {
- return nil, nil, fmt.Errorf("context can not be nil")
- }
- // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
- // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
- var vars interpreter.Activation
- switch v := input.(type) {
- case interpreter.Activation:
- vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
- defer ctxActivationPool.Put(vars)
- case map[string]interface{}:
- rawVars := activationPool.Setup(v)
- defer activationPool.Put(rawVars)
- vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
- defer ctxActivationPool.Put(vars)
- default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
- }
- return p.Eval(vars)
-}
-
-// Cost implements the Coster interface method.
-func (p *prog) Cost() (min, max int64) {
- return estimateCost(p.interpretable)
-}
-
-// progFactory is a helper alias for marking a program creation factory function.
-type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
-
-// progGen holds a reference to a progFactory instance and implements the Program interface.
-type progGen struct {
- factory progFactory
-}
-
-// newProgGen tests the factory object by calling it once and returns a factory-based Program if
-// the test is successful.
-func newProgGen(factory progFactory) (Program, error) {
- // Test the factory to make sure that configuration errors are spotted at config
- _, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{})
- if err != nil {
- return nil, err
- }
- return &progGen{factory: factory}, nil
-}
-
-// Eval implements the Program interface method.
-func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
- // The factory based Eval() differs from the standard evaluation model in that it generates a
- // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
- // results.
- state := interpreter.NewEvalState()
- costTracker := &interpreter.CostTracker{}
- det := &EvalDetails{state: state, costTracker: costTracker}
-
- // Generate a new instance of the interpretable using the factory configured during the call to
- // newProgram(). It is incredibly unlikely that the factory call will generate an error given
- // the factory test performed within the Program() call.
- p, err := gen.factory(state, costTracker)
- if err != nil {
- return nil, det, err
- }
-
- // Evaluate the input, returning the result and the 'state' within EvalDetails.
- v, _, err := p.Eval(input)
- if err != nil {
- return v, det, err
- }
- return v, det, nil
-}
-
-// ContextEval implements the Program interface method.
-func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
- if ctx == nil {
- return nil, nil, fmt.Errorf("context can not be nil")
- }
- // The factory based Eval() differs from the standard evaluation model in that it generates a
- // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
- // results.
- state := interpreter.NewEvalState()
- costTracker := &interpreter.CostTracker{}
- det := &EvalDetails{state: state, costTracker: costTracker}
-
- // Generate a new instance of the interpretable using the factory configured during the call to
- // newProgram(). It is incredibly unlikely that the factory call will generate an error given
- // the factory test performed within the Program() call.
- p, err := gen.factory(state, costTracker)
- if err != nil {
- return nil, det, err
- }
-
- // Evaluate the input, returning the result and the 'state' within EvalDetails.
- v, _, err := p.ContextEval(ctx, input)
- if err != nil {
- return v, det, err
- }
- return v, det, nil
-}
-
-// Cost implements the Coster interface method.
-func (gen *progGen) Cost() (min, max int64) {
- // Use an empty state value since no evaluation is performed.
- p, err := gen.factory(emptyEvalState, nil)
- if err != nil {
- return 0, math.MaxInt64
- }
- return estimateCost(p)
-}
-
-// EstimateCost returns the heuristic cost interval for the program.
-func EstimateCost(p Program) (min, max int64) {
- return estimateCost(p)
-}
-
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(interpreter.Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
-}
-
-type ctxEvalActivation struct {
- parent interpreter.Activation
- interrupt <-chan struct{}
- interruptCheckCount uint
- interruptCheckFrequency uint
-}
-
-// ResolveName implements the Activation interface method, but adds a special #interrupted variable
-// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
-func (a *ctxEvalActivation) ResolveName(name string) (interface{}, bool) {
- if name == "#interrupted" {
- a.interruptCheckCount++
- if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
- select {
- case <-a.interrupt:
- return true, true
- default:
- return nil, false
- }
- }
- return nil, false
- }
- return a.parent.ResolveName(name)
-}
-
-func (a *ctxEvalActivation) Parent() interpreter.Activation {
- return a.parent
-}
-
-func newCtxEvalActivationPool() *ctxEvalActivationPool {
- return &ctxEvalActivationPool{
- Pool: sync.Pool{
- New: func() interface{} {
- return &ctxEvalActivation{}
- },
- },
- }
-}
-
-type ctxEvalActivationPool struct {
- sync.Pool
-}
-
-// Setup initializes a pooled Activation with the ability check for context.Context cancellation
-func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
- a := p.Pool.Get().(*ctxEvalActivation)
- a.parent = vars
- a.interrupt = done
- a.interruptCheckCount = 0
- a.interruptCheckFrequency = interruptCheckRate
- return a
-}
-
-type evalActivation struct {
- vars map[string]interface{}
- lazyVars map[string]interface{}
-}
-
-// ResolveName looks up the value of the input variable name, if found.
-//
-// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
-// - func() ref.Val
-//
-// The lazy binding will only be invoked once per evaluation.
-//
-// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
-// the ref.TypeAdapter configured in the environment.
-func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
- v, found := a.vars[name]
- if !found {
- return nil, false
- }
- switch obj := v.(type) {
- case func() ref.Val:
- if resolved, found := a.lazyVars[name]; found {
- return resolved, true
- }
- lazy := obj()
- a.lazyVars[name] = lazy
- return lazy, true
- case func() interface{}:
- if resolved, found := a.lazyVars[name]; found {
- return resolved, true
- }
- lazy := obj()
- a.lazyVars[name] = lazy
- return lazy, true
- default:
- return obj, true
- }
-}
-
-// Parent implements the interpreter.Activation interface
-func (a *evalActivation) Parent() interpreter.Activation {
- return nil
-}
-
-func newEvalActivationPool() *evalActivationPool {
- return &evalActivationPool{
- Pool: sync.Pool{
- New: func() interface{} {
- return &evalActivation{lazyVars: make(map[string]interface{})}
- },
- },
- }
-}
-
-type evalActivationPool struct {
- sync.Pool
-}
-
-// Setup initializes a pooled Activation object with the map input.
-func (p *evalActivationPool) Setup(vars map[string]interface{}) *evalActivation {
- a := p.Pool.Get().(*evalActivation)
- a.vars = vars
- return a
-}
-
-func (p *evalActivationPool) Put(value interface{}) {
- a := value.(*evalActivation)
- for k := range a.lazyVars {
- delete(a.lazyVars, k)
- }
- p.Pool.Put(a)
-}
-
-var (
- emptyEvalState = interpreter.NewEvalState()
-
- // activationPool is an internally managed pool of Activation values that wrap map[string]interface{} inputs
- activationPool = newEvalActivationPool()
-
- // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
- ctxActivationPool = newCtxEvalActivationPool()
-)
diff --git a/etcd/vendor/github.com/google/cel-go/checker/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/checker/BUILD.bazel
deleted file mode 100644
index bec40b6e69..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/BUILD.bazel
+++ /dev/null
@@ -1,60 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "checker.go",
- "cost.go",
- "env.go",
- "errors.go",
- "mapping.go",
- "options.go",
- "printer.go",
- "standard.go",
- "types.go",
- ],
- importpath = "github.com/google/cel-go/checker",
- visibility = ["//visibility:public"],
- deps = [
- "//checker/decls:go_default_library",
- "//common:go_default_library",
- "//common/containers:go_default_library",
- "//common/debug:go_default_library",
- "//common/operators:go_default_library",
- "//common/overloads:go_default_library",
- "//common/types:go_default_library",
- "//common/types/pb:go_default_library",
- "//common/types/ref:go_default_library",
- "//parser:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "checker_test.go",
- "cost_test.go",
- "env_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "//common/types:go_default_library",
- "//parser:go_default_library",
- "//test:go_default_library",
- "//test/proto2pb:go_default_library",
- "//test/proto3pb:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/checker/checker.go b/etcd/vendor/github.com/google/cel-go/checker/checker.go
deleted file mode 100644
index fcddb1b2c2..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/checker.go
+++ /dev/null
@@ -1,641 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checker defines functions to type-checked a parsed expression
-// against a set of identifier and function declarations.
-package checker
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types/ref"
-
- "google.golang.org/protobuf/proto"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-type checker struct {
- env *Env
- errors *typeErrors
- mappings *mapping
- freeTypeVarCounter int
- sourceInfo *exprpb.SourceInfo
- types map[int64]*exprpb.Type
- references map[int64]*exprpb.Reference
-}
-
-// Check performs type checking, giving a typed AST.
-// The input is a ParsedExpr proto and an env which encapsulates
-// type binding of variables, declarations of built-in functions,
-// descriptions of protocol buffers, and a registry for errors.
-// Returns a CheckedExpr proto, which might not be usable if
-// there are errors in the error registry.
-func Check(parsedExpr *exprpb.ParsedExpr,
- source common.Source,
- env *Env) (*exprpb.CheckedExpr, *common.Errors) {
- c := checker{
- env: env,
- errors: &typeErrors{common.NewErrors(source)},
- mappings: newMapping(),
- freeTypeVarCounter: 0,
- sourceInfo: parsedExpr.GetSourceInfo(),
- types: make(map[int64]*exprpb.Type),
- references: make(map[int64]*exprpb.Reference),
- }
- c.check(parsedExpr.GetExpr())
-
- // Walk over the final type map substituting any type parameters either by their bound value or
- // by DYN.
- m := make(map[int64]*exprpb.Type)
- for k, v := range c.types {
- m[k] = substitute(c.mappings, v, true)
- }
-
- return &exprpb.CheckedExpr{
- Expr: parsedExpr.GetExpr(),
- SourceInfo: parsedExpr.GetSourceInfo(),
- TypeMap: m,
- ReferenceMap: c.references,
- }, c.errors.Errors
-}
-
-func (c *checker) check(e *exprpb.Expr) {
- if e == nil {
- return
- }
-
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- literal := e.GetConstExpr()
- switch literal.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- c.checkBoolLiteral(e)
- case *exprpb.Constant_BytesValue:
- c.checkBytesLiteral(e)
- case *exprpb.Constant_DoubleValue:
- c.checkDoubleLiteral(e)
- case *exprpb.Constant_Int64Value:
- c.checkInt64Literal(e)
- case *exprpb.Constant_NullValue:
- c.checkNullLiteral(e)
- case *exprpb.Constant_StringValue:
- c.checkStringLiteral(e)
- case *exprpb.Constant_Uint64Value:
- c.checkUint64Literal(e)
- }
- case *exprpb.Expr_IdentExpr:
- c.checkIdent(e)
- case *exprpb.Expr_SelectExpr:
- c.checkSelect(e)
- case *exprpb.Expr_CallExpr:
- c.checkCall(e)
- case *exprpb.Expr_ListExpr:
- c.checkCreateList(e)
- case *exprpb.Expr_StructExpr:
- c.checkCreateStruct(e)
- case *exprpb.Expr_ComprehensionExpr:
- c.checkComprehension(e)
- default:
- c.errors.ReportError(
- c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e))
- }
-}
-
-func (c *checker) checkInt64Literal(e *exprpb.Expr) {
- c.setType(e, decls.Int)
-}
-
-func (c *checker) checkUint64Literal(e *exprpb.Expr) {
- c.setType(e, decls.Uint)
-}
-
-func (c *checker) checkStringLiteral(e *exprpb.Expr) {
- c.setType(e, decls.String)
-}
-
-func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
- c.setType(e, decls.Bytes)
-}
-
-func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
- c.setType(e, decls.Double)
-}
-
-func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
- c.setType(e, decls.Bool)
-}
-
-func (c *checker) checkNullLiteral(e *exprpb.Expr) {
- c.setType(e, decls.Null)
-}
-
-func (c *checker) checkIdent(e *exprpb.Expr) {
- identExpr := e.GetIdentExpr()
- // Check to see if the identifier is declared.
- if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
- c.setType(e, ident.GetIdent().GetType())
- c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
- // Overwrite the identifier with its fully qualified name.
- identExpr.Name = ident.GetName()
- return
- }
-
- c.setType(e, decls.Error)
- c.errors.undeclaredReference(
- c.location(e), c.env.container.Name(), identExpr.GetName())
-}
-
-func (c *checker) checkSelect(e *exprpb.Expr) {
- sel := e.GetSelectExpr()
- // Before traversing down the tree, try to interpret as qualified name.
- qname, found := containers.ToQualifiedName(e)
- if found {
- ident := c.env.LookupIdent(qname)
- if ident != nil {
- // We don't check for a TestOnly expression here since the `found` result is
- // always going to be false for TestOnly expressions.
-
- // Rewrite the node to be a variable reference to the resolved fully-qualified
- // variable name.
- c.setType(e, ident.GetIdent().Type)
- c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
- identName := ident.GetName()
- e.ExprKind = &exprpb.Expr_IdentExpr{
- IdentExpr: &exprpb.Expr_Ident{
- Name: identName,
- },
- }
- return
- }
- }
-
- // Interpret as field selection, first traversing down the operand.
- c.check(sel.GetOperand())
- targetType := substitute(c.mappings, c.getType(sel.GetOperand()), false)
- // Assume error type by default as most types do not support field selection.
- resultType := decls.Error
- switch kindOf(targetType) {
- case kindMap:
- // Maps yield their value type as the selection result type.
- mapType := targetType.GetMapType()
- resultType = mapType.GetValueType()
- case kindObject:
- // Objects yield their field type declaration as the selection result type, but only if
- // the field is defined.
- messageType := targetType
- if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), sel.GetField()); found {
- resultType = fieldType.Type
- }
- case kindTypeParam:
- // Set the operand type to DYN to prevent assignment to a potentially incorrect type
- // at a later point in type-checking. The isAssignable call will update the type
- // substitutions for the type param under the covers.
- c.isAssignable(decls.Dyn, targetType)
- // Also, set the result type to DYN.
- resultType = decls.Dyn
- default:
- // Dynamic / error values are treated as DYN type. Errors are handled this way as well
- // in order to allow forward progress on the check.
- if isDynOrError(targetType) {
- resultType = decls.Dyn
- } else {
- c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
- }
- }
- if sel.TestOnly {
- resultType = decls.Bool
- }
- c.setType(e, substitute(c.mappings, resultType, false))
-}
-
-func (c *checker) checkCall(e *exprpb.Expr) {
- // Note: similar logic exists within the `interpreter/planner.go`. If making changes here
- // please consider the impact on planner.go and consolidate implementations or mirror code
- // as appropriate.
- call := e.GetCallExpr()
- target := call.GetTarget()
- args := call.GetArgs()
- fnName := call.GetFunction()
-
- // Traverse arguments.
- for _, arg := range args {
- c.check(arg)
- }
-
- // Regular static call with simple name.
- if target == nil {
- // Check for the existence of the function.
- fn := c.env.LookupFunction(fnName)
- if fn == nil {
- c.errors.undeclaredReference(
- c.location(e), c.env.container.Name(), fnName)
- c.setType(e, decls.Error)
- return
- }
- // Overwrite the function name with its fully qualified resolved name.
- call.Function = fn.GetName()
- // Check to see whether the overload resolves.
- c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
- return
- }
-
- // If a receiver 'target' is present, it may either be a receiver function, or a namespaced
- // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with
- // target a.b.
- //
- // Check whether the target is a namespaced function name.
- qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
- if maybeQualified {
- maybeQualifiedName := qualifiedPrefix + "." + fnName
- fn := c.env.LookupFunction(maybeQualifiedName)
- if fn != nil {
- // The function name is namespaced and so preserving the target operand would
- // be an inaccurate representation of the desired evaluation behavior.
- // Overwrite with fully-qualified resolved function name sans receiver target.
- call.Target = nil
- call.Function = fn.GetName()
- c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
- return
- }
- }
-
- // Regular instance call.
- c.check(call.Target)
- fn := c.env.LookupFunction(fnName)
- // Function found, attempt overload resolution.
- if fn != nil {
- c.resolveOverloadOrError(c.location(e), e, fn, target, args)
- return
- }
- // Function name not declared, record error.
- c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName)
-}
-
-func (c *checker) resolveOverloadOrError(
- loc common.Location,
- e *exprpb.Expr,
- fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) {
- // Attempt to resolve the overload.
- resolution := c.resolveOverload(loc, fn, target, args)
- // No such overload, error noted in the resolveOverload call, type recorded here.
- if resolution == nil {
- c.setType(e, decls.Error)
- return
- }
- // Overload found.
- c.setType(e, resolution.Type)
- c.setReference(e, resolution.Reference)
-}
-
-func (c *checker) resolveOverload(
- loc common.Location,
- fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
-
- var argTypes []*exprpb.Type
- if target != nil {
- argTypes = append(argTypes, c.getType(target))
- }
- for _, arg := range args {
- argTypes = append(argTypes, c.getType(arg))
- }
-
- var resultType *exprpb.Type
- var checkedRef *exprpb.Reference
- for _, overload := range fn.GetFunction().GetOverloads() {
- // Determine whether the overload is currently considered.
- if c.env.isOverloadDisabled(overload.GetOverloadId()) {
- continue
- }
-
- // Ensure the call style for the overload matches.
- if (target == nil && overload.GetIsInstanceFunction()) ||
- (target != nil && !overload.GetIsInstanceFunction()) {
- // not a compatible call style.
- continue
- }
-
- overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...)
- if len(overload.GetTypeParams()) > 0 {
- // Instantiate overload's type with fresh type variables.
- substitutions := newMapping()
- for _, typePar := range overload.GetTypeParams() {
- substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar())
- }
- overloadType = substitute(substitutions, overloadType, false)
- }
-
- candidateArgTypes := overloadType.GetFunction().GetArgTypes()
- if c.isAssignableList(argTypes, candidateArgTypes) {
- if checkedRef == nil {
- checkedRef = newFunctionReference(overload.GetOverloadId())
- } else {
- checkedRef.OverloadId = append(checkedRef.GetOverloadId(), overload.GetOverloadId())
- }
-
- // First matching overload, determines result type.
- fnResultType := substitute(c.mappings, overloadType.GetFunction().GetResultType(), false)
- if resultType == nil {
- resultType = fnResultType
- } else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) {
- resultType = decls.Dyn
- }
- }
- }
-
- if resultType == nil {
- c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
- resultType = decls.Error
- return nil
- }
-
- return newResolution(checkedRef, resultType)
-}
-
-func (c *checker) checkCreateList(e *exprpb.Expr) {
- create := e.GetListExpr()
- var elemType *exprpb.Type
- for _, e := range create.GetElements() {
- c.check(e)
- elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
- }
- if elemType == nil {
- // If the list is empty, assign free type var to elem type.
- elemType = c.newTypeVar()
- }
- c.setType(e, decls.NewListType(elemType))
-}
-
-func (c *checker) checkCreateStruct(e *exprpb.Expr) {
- str := e.GetStructExpr()
- if str.GetMessageName() != "" {
- c.checkCreateMessage(e)
- } else {
- c.checkCreateMap(e)
- }
-}
-
-func (c *checker) checkCreateMap(e *exprpb.Expr) {
- mapVal := e.GetStructExpr()
- var keyType *exprpb.Type
- var valueType *exprpb.Type
- for _, ent := range mapVal.GetEntries() {
- key := ent.GetMapKey()
- c.check(key)
- keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
-
- c.check(ent.Value)
- valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
- }
- if keyType == nil {
- // If the map is empty, assign free type variables to typeKey and value type.
- keyType = c.newTypeVar()
- valueType = c.newTypeVar()
- }
- c.setType(e, decls.NewMapType(keyType, valueType))
-}
-
-func (c *checker) checkCreateMessage(e *exprpb.Expr) {
- msgVal := e.GetStructExpr()
- // Determine the type of the message.
- messageType := decls.Error
- decl := c.env.LookupIdent(msgVal.GetMessageName())
- if decl == nil {
- c.errors.undeclaredReference(
- c.location(e), c.env.container.Name(), msgVal.GetMessageName())
- return
- }
- // Ensure the type name is fully qualified in the AST.
- msgVal.MessageName = decl.GetName()
- c.setReference(e, newIdentReference(decl.GetName(), nil))
- ident := decl.GetIdent()
- identKind := kindOf(ident.GetType())
- if identKind != kindError {
- if identKind != kindType {
- c.errors.notAType(c.location(e), ident.GetType())
- } else {
- messageType = ident.GetType().GetType()
- if kindOf(messageType) != kindObject {
- c.errors.notAMessageType(c.location(e), messageType)
- messageType = decls.Error
- }
- }
- }
- if isObjectWellKnownType(messageType) {
- c.setType(e, getObjectWellKnownType(messageType))
- } else {
- c.setType(e, messageType)
- }
-
- // Check the field initializers.
- for _, ent := range msgVal.GetEntries() {
- field := ent.GetFieldKey()
- value := ent.GetValue()
- c.check(value)
-
- fieldType := decls.Error
- if t, found := c.lookupFieldType(
- c.locationByID(ent.GetId()),
- messageType.GetMessageType(),
- field); found {
- fieldType = t.Type
- }
- if !c.isAssignable(fieldType, c.getType(value)) {
- c.errors.fieldTypeMismatch(
- c.locationByID(ent.Id), field, fieldType, c.getType(value))
- }
- }
-}
-
-func (c *checker) checkComprehension(e *exprpb.Expr) {
- comp := e.GetComprehensionExpr()
- c.check(comp.GetIterRange())
- c.check(comp.GetAccuInit())
- accuType := c.getType(comp.GetAccuInit())
- rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
- var varType *exprpb.Type
-
- switch kindOf(rangeType) {
- case kindList:
- varType = rangeType.GetListType().GetElemType()
- case kindMap:
- // Ranges over the keys.
- varType = rangeType.GetMapType().GetKeyType()
- case kindDyn, kindError, kindTypeParam:
- // Set the range type to DYN to prevent assignment to a potentially incorrect type
- // at a later point in type-checking. The isAssignable call will update the type
- // substitutions for the type param under the covers.
- c.isAssignable(decls.Dyn, rangeType)
- // Set the range iteration variable to type DYN as well.
- varType = decls.Dyn
- default:
- c.errors.notAComprehensionRange(c.location(comp.GetIterRange()), rangeType)
- varType = decls.Error
- }
-
- // Create a scope for the comprehension since it has a local accumulation variable.
- // This scope will contain the accumulation variable used to compute the result.
- c.env = c.env.enterScope()
- c.env.Add(decls.NewVar(comp.GetAccuVar(), accuType))
- // Create a block scope for the loop.
- c.env = c.env.enterScope()
- c.env.Add(decls.NewVar(comp.GetIterVar(), varType))
- // Check the variable references in the condition and step.
- c.check(comp.GetLoopCondition())
- c.assertType(comp.GetLoopCondition(), decls.Bool)
- c.check(comp.GetLoopStep())
- c.assertType(comp.GetLoopStep(), accuType)
- // Exit the loop's block scope before checking the result.
- c.env = c.env.exitScope()
- c.check(comp.GetResult())
- // Exit the comprehension scope.
- c.env = c.env.exitScope()
- c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false))
-}
-
-// Checks compatibility of joined types, and returns the most general common type.
-func (c *checker) joinTypes(loc common.Location,
- previous *exprpb.Type,
- current *exprpb.Type) *exprpb.Type {
- if previous == nil {
- return current
- }
- if c.isAssignable(previous, current) {
- return mostGeneral(previous, current)
- }
- if c.dynAggregateLiteralElementTypesEnabled() {
- return decls.Dyn
- }
- c.errors.typeMismatch(loc, previous, current)
- return decls.Error
-}
-
-func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
- return c.env.aggLitElemType == dynElementType
-}
-
-func (c *checker) newTypeVar() *exprpb.Type {
- id := c.freeTypeVarCounter
- c.freeTypeVarCounter++
- return decls.NewTypeParamType(fmt.Sprintf("_var%d", id))
-}
-
-func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
- subs := isAssignable(c.mappings, t1, t2)
- if subs != nil {
- c.mappings = subs
- return true
- }
-
- return false
-}
-
-func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
- subs := isAssignableList(c.mappings, l1, l2)
- if subs != nil {
- c.mappings = subs
- return true
- }
-
- return false
-}
-
-func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) {
- if _, found := c.env.provider.FindType(messageType); !found {
- // This should not happen, anyway, report an error.
- c.errors.unexpectedFailedResolution(l, messageType)
- return nil, false
- }
-
- if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found {
- return ft, found
- }
-
- c.errors.undefinedField(l, fieldName)
- return nil, false
-}
-
-func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) {
- if old, found := c.types[e.GetId()]; found && !proto.Equal(old, t) {
- c.errors.ReportError(c.location(e),
- "(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t)
- return
- }
- c.types[e.GetId()] = t
-}
-
-func (c *checker) getType(e *exprpb.Expr) *exprpb.Type {
- return c.types[e.GetId()]
-}
-
-func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) {
- if old, found := c.references[e.GetId()]; found && !proto.Equal(old, r) {
- c.errors.ReportError(c.location(e),
- "Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, r)
- return
- }
- c.references[e.GetId()] = r
-}
-
-func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) {
- if !c.isAssignable(t, c.getType(e)) {
- c.errors.typeMismatch(c.location(e), t, c.getType(e))
- }
-}
-
-type overloadResolution struct {
- Reference *exprpb.Reference
- Type *exprpb.Type
-}
-
-func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution {
- return &overloadResolution{
- Reference: checkedRef,
- Type: t,
- }
-}
-
-func (c *checker) location(e *exprpb.Expr) common.Location {
- return c.locationByID(e.GetId())
-}
-
-func (c *checker) locationByID(id int64) common.Location {
- positions := c.sourceInfo.GetPositions()
- var line = 1
- if offset, found := positions[id]; found {
- col := int(offset)
- for _, lineOffset := range c.sourceInfo.GetLineOffsets() {
- if lineOffset < offset {
- line++
- col = int(offset - lineOffset)
- } else {
- break
- }
- }
- return common.NewLocation(line, col)
- }
- return common.NoLocation
-}
-
-func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference {
- return &exprpb.Reference{Name: name, Value: value}
-}
-
-func newFunctionReference(overloads ...string) *exprpb.Reference {
- return &exprpb.Reference{OverloadId: overloads}
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/cost.go b/etcd/vendor/github.com/google/cel-go/checker/cost.go
deleted file mode 100644
index 7312d1fe2f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/cost.go
+++ /dev/null
@@ -1,627 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "math"
-
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/parser"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
-
-// CostEstimator estimates the sizes of variable length input data and the costs of functions.
-type CostEstimator interface {
- // EstimateSize returns a SizeEstimate for the given AstNode, or nil if
- // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
- // length of strings and bytes, number of map entries or number of list items.
- // EstimateSize is only called for AstNodes where
- // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
- // is already obvious to CEL.
- EstimateSize(element AstNode) *SizeEstimate
- // EstimateCallCost returns the estimated cost of an invocation, or nil if
- // the estimator has no estimate to provide.
- EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
-}
-
-// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size.
-// The ResultSize should only be provided if the call results in a map, list, string or bytes.
-type CallEstimate struct {
- CostEstimate
- ResultSize *SizeEstimate
-}
-
-// AstNode represents an AST node for the purpose of cost estimations.
-type AstNode interface {
- // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not
- // represent type directly reachable from the provided type declarations.
- // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
- Path() []string
- // Type returns the deduced type of the AstNode.
- Type() *exprpb.Type
- // Expr returns the expression of the AstNode.
- Expr() *exprpb.Expr
- // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
- // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
- // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
- // computed size available.
- ComputedSize() *SizeEstimate
-}
-
-type astNode struct {
- path []string
- t *exprpb.Type
- expr *exprpb.Expr
- derivedSize *SizeEstimate
-}
-
-func (e astNode) Path() []string {
- return e.path
-}
-
-func (e astNode) Type() *exprpb.Type {
- return e.t
-}
-
-func (e astNode) Expr() *exprpb.Expr {
- return e.expr
-}
-
-func (e astNode) ComputedSize() *SizeEstimate {
- if e.derivedSize != nil {
- return e.derivedSize
- }
- var v uint64
- switch ek := e.expr.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- switch ck := ek.ConstExpr.GetConstantKind().(type) {
- case *exprpb.Constant_StringValue:
- v = uint64(len(ck.StringValue))
- case *exprpb.Constant_BytesValue:
- v = uint64(len(ck.BytesValue))
- case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
- *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value,
- *exprpb.Constant_NullValue:
- v = uint64(1)
- default:
- return nil
- }
- case *exprpb.Expr_ListExpr:
- v = uint64(len(ek.ListExpr.GetElements()))
- case *exprpb.Expr_StructExpr:
- if ek.StructExpr.GetMessageName() == "" {
- v = uint64(len(ek.StructExpr.GetEntries()))
- }
- default:
- return nil
- }
-
- return &SizeEstimate{Min: v, Max: v}
-}
-
-// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
-type SizeEstimate struct {
- Min, Max uint64
-}
-
-// Add adds to another SizeEstimate and returns the sum.
-// If add would result in an uint64 overflow, the result is math.MaxUint64.
-func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
- return SizeEstimate{
- addUint64NoOverflow(se.Min, sizeEstimate.Min),
- addUint64NoOverflow(se.Max, sizeEstimate.Max),
- }
-}
-
-// Multiply multiplies by another SizeEstimate and returns the product.
-// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
-func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate {
- return SizeEstimate{
- multiplyUint64NoOverflow(se.Min, sizeEstimate.Min),
- multiplyUint64NoOverflow(se.Max, sizeEstimate.Max),
- }
-}
-
-// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the
-// nearest integer of the result, rounded up.
-func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
- return CostEstimate{
- multiplyByCostFactor(se.Min, costPerUnit),
- multiplyByCostFactor(se.Max, costPerUnit),
- }
-}
-
-// MultiplyByCost multiplies by the cost and returns the product.
-// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
-func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate {
- return CostEstimate{
- multiplyUint64NoOverflow(se.Min, cost.Min),
- multiplyUint64NoOverflow(se.Max, cost.Max),
- }
-}
-
-// Union returns a SizeEstimate that encompasses both input the SizeEstimate.
-func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate {
- result := se
- if size.Min < result.Min {
- result.Min = size.Min
- }
- if size.Max > result.Max {
- result.Max = size.Max
- }
- return result
-}
-
-// CostEstimate represents an estimated cost range and provides add and multiply operations
-// that do not overflow.
-type CostEstimate struct {
- Min, Max uint64
-}
-
-// Add adds the costs and returns the sum.
-// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
-func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
- return CostEstimate{
- addUint64NoOverflow(ce.Min, cost.Min),
- addUint64NoOverflow(ce.Max, cost.Max),
- }
-}
-
-// Multiply multiplies by the cost and returns the product.
-// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
-func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
- return CostEstimate{
- multiplyUint64NoOverflow(ce.Min, cost.Min),
- multiplyUint64NoOverflow(ce.Max, cost.Max),
- }
-}
-
-// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the
-// nearest integer of the result, rounded up.
-func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
- return CostEstimate{
- multiplyByCostFactor(ce.Min, costPerUnit),
- multiplyByCostFactor(ce.Max, costPerUnit),
- }
-}
-
-// Union returns a CostEstimate that encompasses both input the CostEstimates.
-func (ce CostEstimate) Union(size CostEstimate) CostEstimate {
- result := ce
- if size.Min < result.Min {
- result.Min = size.Min
- }
- if size.Max > result.Max {
- result.Max = size.Max
- }
- return result
-}
-
-// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
-// is returned.
-func addUint64NoOverflow(x, y uint64) uint64 {
- if y > 0 && x > math.MaxUint64-y {
- return math.MaxUint64
- }
- return x + y
-}
-
-// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
-// is returned.
-func multiplyUint64NoOverflow(x, y uint64) uint64 {
- if x > 0 && y > 0 && x > math.MaxUint64/y {
- return math.MaxUint64
- }
- return x * y
-}
-
-// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up.
-func multiplyByCostFactor(x uint64, y float64) uint64 {
- xFloat := float64(x)
- if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
- return math.MaxUint64
- }
- return uint64(math.Ceil(xFloat * y))
-}
-
-var (
- selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
- constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
-
- createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
- createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
- createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
-)
-
-type coster struct {
- // exprPath maps from Expr Id to field path.
- exprPath map[int64][]string
- // iterRanges tracks the iterRange of each iterVar.
- iterRanges iterRangeScopes
- // computedSizes tracks the computed sizes of call results.
- computedSizes map[int64]SizeEstimate
- checkedExpr *exprpb.CheckedExpr
- estimator CostEstimator
-}
-
-// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
-type iterRangeScopes map[string][]int64
-
-func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) {
- vs[varName] = append(vs[varName], expr.GetId())
-}
-
-func (vs iterRangeScopes) pop(varName string) {
- varStack := vs[varName]
- vs[varName] = varStack[:len(varStack)-1]
-}
-
-func (vs iterRangeScopes) peek(varName string) (int64, bool) {
- varStack := vs[varName]
- if len(varStack) > 0 {
- return varStack[len(varStack)-1], true
- }
- return 0, false
-}
-
-// Cost estimates the cost of the parsed and type checked CEL expression.
-func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator) CostEstimate {
- c := coster{
- checkedExpr: checker,
- estimator: estimator,
- exprPath: map[int64][]string{},
- iterRanges: map[string][]int64{},
- computedSizes: map[int64]SizeEstimate{},
- }
- return c.cost(checker.GetExpr())
-}
-
-func (c *coster) cost(e *exprpb.Expr) CostEstimate {
- if e == nil {
- return CostEstimate{}
- }
- var cost CostEstimate
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_ConstExpr:
- cost = constCost
- case *exprpb.Expr_IdentExpr:
- cost = c.costIdent(e)
- case *exprpb.Expr_SelectExpr:
- cost = c.costSelect(e)
- case *exprpb.Expr_CallExpr:
- cost = c.costCall(e)
- case *exprpb.Expr_ListExpr:
- cost = c.costCreateList(e)
- case *exprpb.Expr_StructExpr:
- cost = c.costCreateStruct(e)
- case *exprpb.Expr_ComprehensionExpr:
- cost = c.costComprehension(e)
- default:
- return CostEstimate{}
- }
- return cost
-}
-
-func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
- identExpr := e.GetIdentExpr()
-
- // build and track the field path
- if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
- switch c.checkedExpr.TypeMap[iterRange].GetTypeKind().(type) {
- case *exprpb.Type_ListType_:
- c.addPath(e, append(c.exprPath[iterRange], "@items"))
- case *exprpb.Type_MapType_:
- c.addPath(e, append(c.exprPath[iterRange], "@keys"))
- }
- } else {
- c.addPath(e, []string{identExpr.GetName()})
- }
-
- return selectAndIdentCost
-}
-
-func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
- sel := e.GetSelectExpr()
- var sum CostEstimate
- if sel.GetTestOnly() {
- return sum
- }
- sum = sum.Add(c.cost(sel.GetOperand()))
- targetType := c.getType(sel.GetOperand())
- switch kindOf(targetType) {
- case kindMap, kindObject, kindTypeParam:
- sum = sum.Add(selectAndIdentCost)
- }
-
- // build and track the field path
- c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField()))
-
- return sum
-}
-
-func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
- call := e.GetCallExpr()
- target := call.GetTarget()
- args := call.GetArgs()
-
- var sum CostEstimate
-
- argTypes := make([]AstNode, len(args))
- argCosts := make([]CostEstimate, len(args))
- for i, arg := range args {
- argCosts[i] = c.cost(arg)
- argTypes[i] = c.newAstNode(arg)
- }
-
- ref := c.checkedExpr.ReferenceMap[e.GetId()]
- if ref == nil || len(ref.GetOverloadId()) == 0 {
- return CostEstimate{}
- }
- var targetType AstNode
- if target != nil {
- if call.Target != nil {
- sum = sum.Add(c.cost(call.GetTarget()))
- targetType = c.newAstNode(call.GetTarget())
- }
- }
- // Pick a cost estimate range that covers all the overload cost estimation ranges
- fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
- var resultSize *SizeEstimate
- for _, overload := range ref.GetOverloadId() {
- overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
- fnCost = fnCost.Union(overloadCost.CostEstimate)
- if overloadCost.ResultSize != nil {
- if resultSize == nil {
- resultSize = overloadCost.ResultSize
- } else {
- size := resultSize.Union(*overloadCost.ResultSize)
- resultSize = &size
- }
- }
- // build and track the field path for index operations
- switch overload {
- case overloads.IndexList:
- if len(args) > 0 {
- c.addPath(e, append(c.getPath(args[0]), "@items"))
- }
- case overloads.IndexMap:
- if len(args) > 0 {
- c.addPath(e, append(c.getPath(args[0]), "@values"))
- }
- }
- }
- if resultSize != nil {
- c.computedSizes[e.GetId()] = *resultSize
- }
- return sum.Add(fnCost)
-}
-
-func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate {
- create := e.GetListExpr()
- var sum CostEstimate
- for _, e := range create.GetElements() {
- sum = sum.Add(c.cost(e))
- }
- return sum.Add(createListBaseCost)
-}
-
-func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate {
- str := e.GetStructExpr()
- if str.MessageName != "" {
- return c.costCreateMessage(e)
- }
- return c.costCreateMap(e)
-}
-
-func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate {
- mapVal := e.GetStructExpr()
- var sum CostEstimate
- for _, ent := range mapVal.GetEntries() {
- key := ent.GetMapKey()
- sum = sum.Add(c.cost(key))
-
- sum = sum.Add(c.cost(ent.GetValue()))
- }
- return sum.Add(createMapBaseCost)
-}
-
-func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate {
- msgVal := e.GetStructExpr()
- var sum CostEstimate
- for _, ent := range msgVal.GetEntries() {
- sum = sum.Add(c.cost(ent.GetValue()))
- }
- return sum.Add(createMessageBaseCost)
-}
-
-func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
- comp := e.GetComprehensionExpr()
- var sum CostEstimate
- sum = sum.Add(c.cost(comp.GetIterRange()))
- sum = sum.Add(c.cost(comp.GetAccuInit()))
-
- // Track the iterRange of each IterVar for field path construction
- c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange())
- loopCost := c.cost(comp.GetLoopCondition())
- stepCost := c.cost(comp.GetLoopStep())
- c.iterRanges.pop(comp.GetIterVar())
- sum = sum.Add(c.cost(comp.Result))
- rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
- rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
- sum = sum.Add(rangeCost)
-
- return sum
-}
-
-func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
- if l := t.ComputedSize(); l != nil {
- return *l
- }
- if l := c.estimator.EstimateSize(t); l != nil {
- return *l
- }
- // return an estimate of 1 for return types of set
- // lengths, since strings/bytes/more complex objects could be of
- // variable length
- if isScalar(t.Type()) {
- // TODO: since the logic for size estimation is split between
- // ComputedSize and isScalar, changing one will likely require changing
- // the other, so they should be merged in the future if possible
- return SizeEstimate{Min: 1, Max: 1}
- }
- return SizeEstimate{Min: 0, Max: math.MaxUint64}
-}
-
-func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
- argCostSum := func() CostEstimate {
- var sum CostEstimate
- for _, a := range argCosts {
- sum = sum.Add(a)
- }
- return sum
- }
-
- if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
- callEst := *est
- return CallEstimate{CostEstimate: callEst.Add(argCostSum())}
- }
- switch overloadID {
- // O(n) functions
- case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
- if len(args) == 1 {
- return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
- }
- case overloads.InList:
- // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
- // We just assume all list containment checks are O(n).
- if len(args) == 2 {
- return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
- }
- // O(nm) functions
- case overloads.MatchesString:
- // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
- if target != nil && len(args) == 1 {
- // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
- // in case where string is empty but regex is still expensive.
- strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
- // We don't know how many expressions are in the regex, just the string length (a huge
- // improvement here would be to somehow get a count the number of expressions in the regex or
- // how many states are in the regex state machine and use that to measure regex cost).
- // For now, we're making a guess that each expression in a regex is typically at least 4 chars
- // in length.
- regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
- return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
- }
- case overloads.ContainsString:
- if target != nil && len(args) == 1 {
- strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
- substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
- return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
- }
- case overloads.LogicalOr, overloads.LogicalAnd:
- lhs := argCosts[0]
- rhs := argCosts[1]
- // min cost is min of LHS for short circuited && or ||
- argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
- return CallEstimate{CostEstimate: argCost}
- case overloads.Conditional:
- size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
- conditionalCost := argCosts[0]
- ifTrueCost := argCosts[1]
- ifFalseCost := argCosts[2]
- argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost))
- return CallEstimate{CostEstimate: argCost, ResultSize: &size}
- case overloads.AddString, overloads.AddBytes, overloads.AddList:
- if len(args) == 2 {
- lhsSize := c.sizeEstimate(args[0])
- rhsSize := c.sizeEstimate(args[1])
- resultSize := lhsSize.Add(rhsSize)
- switch overloadID {
- case overloads.AddList:
- // list concatenation is O(1), but we handle it here to track size
- return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
- default:
- return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
- }
- }
- case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
- overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
- overloads.Equals, overloads.NotEquals:
- lhsCost := c.sizeEstimate(args[0])
- rhsCost := c.sizeEstimate(args[1])
- min := uint64(0)
- smallestMax := lhsCost.Max
- if rhsCost.Max < smallestMax {
- smallestMax = rhsCost.Max
- }
- if smallestMax > 0 {
- min = 1
- }
- // equality of 2 scalar values results in a cost of 1
- return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
- }
- // O(1) functions
- // See CostTracker.costCall for more details about O(1) cost calculations
-
- // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
- // which on an Intel xeon 2.20GHz CPU is 50ns.
- return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
-}
-
-func (c *coster) getType(e *exprpb.Expr) *exprpb.Type {
- return c.checkedExpr.TypeMap[e.GetId()]
-}
-
-func (c *coster) getPath(e *exprpb.Expr) []string {
- return c.exprPath[e.GetId()]
-}
-
-func (c *coster) addPath(e *exprpb.Expr, path []string) {
- c.exprPath[e.GetId()] = path
-}
-
-func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
- path := c.getPath(e)
- if len(path) > 0 && path[0] == parser.AccumulatorName {
- // only provide paths to root vars; omit accumulator vars
- path = nil
- }
- var derivedSize *SizeEstimate
- if size, ok := c.computedSizes[e.GetId()]; ok {
- derivedSize = &size
- }
- return &astNode{path: path, t: c.getType(e), expr: e, derivedSize: derivedSize}
-}
-
-// isScalar returns true if the given type is known to be of a constant size at
-// compile time. isScalar will return false for strings (they are variable-width)
-// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
-func isScalar(t *exprpb.Type) bool {
- switch kindOf(t) {
- case kindPrimitive:
- if t.GetPrimitive() != exprpb.Type_STRING && t.GetPrimitive() != exprpb.Type_BYTES {
- return true
- }
- case kindWellKnown:
- if t.GetWellKnown() == exprpb.Type_DURATION || t.GetWellKnown() == exprpb.Type_TIMESTAMP {
- return true
- }
- }
- return false
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
deleted file mode 100644
index 5a24f1da80..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
+++ /dev/null
@@ -1,20 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "decls.go",
- "scopes.go",
- ],
- importpath = "github.com/google/cel-go/checker/decls",
- deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/checker/decls/decls.go b/etcd/vendor/github.com/google/cel-go/checker/decls/decls.go
deleted file mode 100644
index 88a99282d9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/decls/decls.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package decls provides helpers for creating variable and function declarations.
-package decls
-
-import (
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-var (
- // Error type used to communicate issues during type-checking.
- Error = &exprpb.Type{
- TypeKind: &exprpb.Type_Error{
- Error: &emptypb.Empty{}}}
-
- // Dyn is a top-type used to represent any value.
- Dyn = &exprpb.Type{
- TypeKind: &exprpb.Type_Dyn{
- Dyn: &emptypb.Empty{}}}
-)
-
-// Commonly used types.
-var (
- Bool = NewPrimitiveType(exprpb.Type_BOOL)
- Bytes = NewPrimitiveType(exprpb.Type_BYTES)
- Double = NewPrimitiveType(exprpb.Type_DOUBLE)
- Int = NewPrimitiveType(exprpb.Type_INT64)
- Null = &exprpb.Type{
- TypeKind: &exprpb.Type_Null{
- Null: structpb.NullValue_NULL_VALUE}}
- String = NewPrimitiveType(exprpb.Type_STRING)
- Uint = NewPrimitiveType(exprpb.Type_UINT64)
-)
-
-// Well-known types.
-// TODO: Replace with an abstract type registry.
-var (
- Any = NewWellKnownType(exprpb.Type_ANY)
- Duration = NewWellKnownType(exprpb.Type_DURATION)
- Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
-)
-
-// NewAbstractType creates an abstract type declaration which references a proto
-// message name and may also include type parameters.
-func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_AbstractType_{
- AbstractType: &exprpb.Type_AbstractType{
- Name: name,
- ParameterTypes: paramTypes}}}
-}
-
-// NewFunctionType creates a function invocation contract, typically only used
-// by type-checking steps after overload resolution.
-func NewFunctionType(resultType *exprpb.Type,
- argTypes ...*exprpb.Type) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Function{
- Function: &exprpb.Type_FunctionType{
- ResultType: resultType,
- ArgTypes: argTypes}}}
-}
-
-// NewFunction creates a named function declaration with one or more overloads.
-func NewFunction(name string,
- overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
- return &exprpb.Decl{
- Name: name,
- DeclKind: &exprpb.Decl_Function{
- Function: &exprpb.Decl_FunctionDecl{
- Overloads: overloads}}}
-}
-
-// NewIdent creates a named identifier declaration with an optional literal
-// value.
-//
-// Literal values are typically only associated with enum identifiers.
-//
-// Deprecated: Use NewVar or NewConst instead.
-func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
- return &exprpb.Decl{
- Name: name,
- DeclKind: &exprpb.Decl_Ident{
- Ident: &exprpb.Decl_IdentDecl{
- Type: t,
- Value: v}}}
-}
-
-// NewConst creates a constant identifier with a CEL constant literal value.
-func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
- return NewIdent(name, t, v)
-}
-
-// NewVar creates a variable identifier.
-func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
- return NewIdent(name, t, nil)
-}
-
-// NewInstanceOverload creates a instance function overload contract.
-// First element of argTypes is instance.
-func NewInstanceOverload(id string, argTypes []*exprpb.Type,
- resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
- return &exprpb.Decl_FunctionDecl_Overload{
- OverloadId: id,
- ResultType: resultType,
- Params: argTypes,
- IsInstanceFunction: true}
-}
-
-// NewListType generates a new list with elements of a certain type.
-func NewListType(elem *exprpb.Type) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_ListType_{
- ListType: &exprpb.Type_ListType{
- ElemType: elem}}}
-}
-
-// NewMapType generates a new map with typed keys and values.
-func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_MapType_{
- MapType: &exprpb.Type_MapType{
- KeyType: key,
- ValueType: value}}}
-}
-
-// NewObjectType creates an object type for a qualified type name.
-func NewObjectType(typeName string) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_MessageType{
- MessageType: typeName}}
-}
-
-// NewOverload creates a function overload declaration which contains a unique
-// overload id as well as the expected argument and result types. Overloads
-// must be aggregated within a Function declaration.
-func NewOverload(id string, argTypes []*exprpb.Type,
- resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
- return &exprpb.Decl_FunctionDecl_Overload{
- OverloadId: id,
- ResultType: resultType,
- Params: argTypes,
- IsInstanceFunction: false}
-}
-
-// NewParameterizedInstanceOverload creates a parametric function instance overload type.
-func NewParameterizedInstanceOverload(id string,
- argTypes []*exprpb.Type,
- resultType *exprpb.Type,
- typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
- return &exprpb.Decl_FunctionDecl_Overload{
- OverloadId: id,
- ResultType: resultType,
- Params: argTypes,
- TypeParams: typeParams,
- IsInstanceFunction: true}
-}
-
-// NewParameterizedOverload creates a parametric function overload type.
-func NewParameterizedOverload(id string,
- argTypes []*exprpb.Type,
- resultType *exprpb.Type,
- typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
- return &exprpb.Decl_FunctionDecl_Overload{
- OverloadId: id,
- ResultType: resultType,
- Params: argTypes,
- TypeParams: typeParams,
- IsInstanceFunction: false}
-}
-
-// NewPrimitiveType creates a type for a primitive value. See the var declarations
-// for Int, Uint, etc.
-func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Primitive{
- Primitive: primitive}}
-}
-
-// NewTypeType creates a new type designating a type.
-func NewTypeType(nested *exprpb.Type) *exprpb.Type {
- if nested == nil {
- // must set the nested field for a valid oneof option
- nested = &exprpb.Type{}
- }
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Type{
- Type: nested}}
-}
-
-// NewTypeParamType creates a type corresponding to a named, contextual parameter.
-func NewTypeParamType(name string) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_TypeParam{
- TypeParam: name}}
-}
-
-// NewWellKnownType creates a type corresponding to a protobuf well-known type
-// value.
-func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_WellKnown{
- WellKnown: wellKnown}}
-}
-
-// NewWrapperType creates a wrapped primitive type instance. Wrapped types
-// are roughly equivalent to a nullable, or optionally valued type.
-func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
- primitive := wrapped.GetPrimitive()
- if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
- // TODO: return an error
- panic("Wrapped type must be a primitive")
- }
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Wrapper{
- Wrapper: primitive}}
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/decls/scopes.go b/etcd/vendor/github.com/google/cel-go/checker/decls/scopes.go
deleted file mode 100644
index 608bca3e53..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/decls/scopes.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package decls
-
-import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-
-// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
-// identifiers in scope and an optional parent representing outer scopes.
-// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
-// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
-type Scopes struct {
- parent *Scopes
- scopes *Group
-}
-
-// NewScopes creates a new, empty Scopes.
-// Some operations can't be safely performed until a Group is added with Push.
-func NewScopes() *Scopes {
- return &Scopes{
- scopes: newGroup(),
- }
-}
-
-// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
-func (s *Scopes) Copy() *Scopes {
- cpy := NewScopes()
- if s == nil {
- return cpy
- }
- if s.parent != nil {
- cpy.parent = s.parent.Copy()
- }
- cpy.scopes = s.scopes.copy()
- return cpy
-}
-
-// Push creates a new Scopes value which references the current Scope as its parent.
-func (s *Scopes) Push() *Scopes {
- return &Scopes{
- parent: s,
- scopes: newGroup(),
- }
-}
-
-// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
-// is nil.
-func (s *Scopes) Pop() *Scopes {
- if s.parent != nil {
- return s.parent
- }
- // TODO: Consider whether this should be an error / panic.
- return s
-}
-
-// AddIdent adds the ident Decl in the current scope.
-// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
-func (s *Scopes) AddIdent(decl *exprpb.Decl) {
- s.scopes.idents[decl.Name] = decl
-}
-
-// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
-// found.
-// Note: The search is performed from innermost to outermost.
-func (s *Scopes) FindIdent(name string) *exprpb.Decl {
- if ident, found := s.scopes.idents[name]; found {
- return ident
- }
- if s.parent != nil {
- return s.parent.FindIdent(name)
- }
- return nil
-}
-
-// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
-// nil if one does not exist.
-// Note: The search is only performed on the current scope and does not search outer scopes.
-func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
- if ident, found := s.scopes.idents[name]; found {
- return ident
- }
- return nil
-}
-
-// SetFunction adds the function Decl to the current scope.
-// Note: Any previous entry for a function in the current scope with the same name is overwritten.
-func (s *Scopes) SetFunction(fn *exprpb.Decl) {
- s.scopes.functions[fn.Name] = fn
-}
-
-// FindFunction finds the first function Decl with a matching name in Scopes.
-// The search is performed from innermost to outermost.
-// Returns nil if no such function in Scopes.
-func (s *Scopes) FindFunction(name string) *exprpb.Decl {
- if fn, found := s.scopes.functions[name]; found {
- return fn
- }
- if s.parent != nil {
- return s.parent.FindFunction(name)
- }
- return nil
-}
-
-// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
-// Contains separate namespaces for identifier and function Decls.
-// (Should be named "Scope" perhaps?)
-type Group struct {
- idents map[string]*exprpb.Decl
- functions map[string]*exprpb.Decl
-}
-
-// copy creates a new Group instance with a shallow copy of the variables and functions.
-// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
-func (g *Group) copy() *Group {
- cpy := &Group{
- idents: make(map[string]*exprpb.Decl, len(g.idents)),
- functions: make(map[string]*exprpb.Decl, len(g.functions)),
- }
- for n, id := range g.idents {
- cpy.idents[n] = id
- }
- for n, fn := range g.functions {
- cpy.functions[n] = fn
- }
- return cpy
-}
-
-// newGroup creates a new Group with empty maps for identifiers and functions.
-func newGroup() *Group {
- return &Group{
- idents: make(map[string]*exprpb.Decl),
- functions: make(map[string]*exprpb.Decl),
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/env.go b/etcd/vendor/github.com/google/cel-go/checker/env.go
deleted file mode 100644
index c7eeb04eee..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/env.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "fmt"
- "strings"
-
- "google.golang.org/protobuf/proto"
-
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/parser"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-type aggregateLiteralElementType int
-
-const (
- dynElementType aggregateLiteralElementType = iota
- homogenousElementType aggregateLiteralElementType = 1 << iota
-)
-
-var (
- crossTypeNumericComparisonOverloads = map[string]struct{}{
- // double <-> int | uint
- overloads.LessDoubleInt64: {},
- overloads.LessDoubleUint64: {},
- overloads.LessEqualsDoubleInt64: {},
- overloads.LessEqualsDoubleUint64: {},
- overloads.GreaterDoubleInt64: {},
- overloads.GreaterDoubleUint64: {},
- overloads.GreaterEqualsDoubleInt64: {},
- overloads.GreaterEqualsDoubleUint64: {},
- // int <-> double | uint
- overloads.LessInt64Double: {},
- overloads.LessInt64Uint64: {},
- overloads.LessEqualsInt64Double: {},
- overloads.LessEqualsInt64Uint64: {},
- overloads.GreaterInt64Double: {},
- overloads.GreaterInt64Uint64: {},
- overloads.GreaterEqualsInt64Double: {},
- overloads.GreaterEqualsInt64Uint64: {},
- // uint <-> double | int
- overloads.LessUint64Double: {},
- overloads.LessUint64Int64: {},
- overloads.LessEqualsUint64Double: {},
- overloads.LessEqualsUint64Int64: {},
- overloads.GreaterUint64Double: {},
- overloads.GreaterUint64Int64: {},
- overloads.GreaterEqualsUint64Double: {},
- overloads.GreaterEqualsUint64Int64: {},
- }
-)
-
-// Env is the environment for type checking.
-//
-// The Env is comprised of a container, type provider, declarations, and other related objects
-// which can be used to assist with type-checking.
-type Env struct {
- container *containers.Container
- provider ref.TypeProvider
- declarations *decls.Scopes
- aggLitElemType aggregateLiteralElementType
- filteredOverloadIDs map[string]struct{}
-}
-
-// NewEnv returns a new *Env with the given parameters.
-func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...Option) (*Env, error) {
- declarations := decls.NewScopes()
- declarations.Push()
-
- envOptions := &options{}
- for _, opt := range opts {
- if err := opt(envOptions); err != nil {
- return nil, err
- }
- }
- aggLitElemType := dynElementType
- if envOptions.homogeneousAggregateLiterals {
- aggLitElemType = homogenousElementType
- }
- filteredOverloadIDs := crossTypeNumericComparisonOverloads
- if envOptions.crossTypeNumericComparisons {
- filteredOverloadIDs = make(map[string]struct{})
- }
- if envOptions.validatedDeclarations != nil {
- declarations = envOptions.validatedDeclarations.Copy()
- }
- return &Env{
- container: container,
- provider: provider,
- declarations: declarations,
- aggLitElemType: aggLitElemType,
- filteredOverloadIDs: filteredOverloadIDs,
- }, nil
-}
-
-// Add adds new Decl protos to the Env.
-// Returns an error for identifier redeclarations.
-func (e *Env) Add(decls ...*exprpb.Decl) error {
- errMsgs := make([]errorMsg, 0)
- for _, decl := range decls {
- switch decl.DeclKind.(type) {
- case *exprpb.Decl_Ident:
- errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl)))
- case *exprpb.Decl_Function:
- errMsgs = append(errMsgs, e.setFunction(sanitizeFunction(decl))...)
- }
- }
- return formatError(errMsgs)
-}
-
-// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
-// Returns nil if no such identifier is found in the Env.
-func (e *Env) LookupIdent(name string) *exprpb.Decl {
- for _, candidate := range e.container.ResolveCandidateNames(name) {
- if ident := e.declarations.FindIdent(candidate); ident != nil {
- return ident
- }
-
- // Next try to import the name as a reference to a message type. If found,
- // the declaration is added to the outest (global) scope of the
- // environment, so next time we can access it faster.
- if t, found := e.provider.FindType(candidate); found {
- decl := decls.NewVar(candidate, t)
- e.declarations.AddIdent(decl)
- return decl
- }
-
- // Next try to import this as an enum value by splitting the name in a type prefix and
- // the enum inside.
- if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
- decl := decls.NewIdent(candidate,
- decls.Int,
- &exprpb.Constant{
- ConstantKind: &exprpb.Constant_Int64Value{
- Int64Value: int64(enumValue.(types.Int))}})
- e.declarations.AddIdent(decl)
- return decl
- }
- }
- return nil
-}
-
-// LookupFunction returns a Decl proto for typeName as a function in env.
-// Returns nil if no such function is found in env.
-func (e *Env) LookupFunction(name string) *exprpb.Decl {
- for _, candidate := range e.container.ResolveCandidateNames(name) {
- if fn := e.declarations.FindFunction(candidate); fn != nil {
- return fn
- }
- }
- return nil
-}
-
-// addOverload adds overload to function declaration f.
-// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro.
-func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg {
- errMsgs := make([]errorMsg, 0)
- function := f.GetFunction()
- emptyMappings := newMapping()
- overloadFunction := decls.NewFunctionType(overload.GetResultType(),
- overload.GetParams()...)
- overloadErased := substitute(emptyMappings, overloadFunction, true)
- for _, existing := range function.GetOverloads() {
- existingFunction := decls.NewFunctionType(existing.GetResultType(), existing.GetParams()...)
- existingErased := substitute(emptyMappings, existingFunction, true)
- overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil ||
- isAssignable(emptyMappings, existingErased, overloadErased) != nil
- if overlap &&
- overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() {
- errMsgs = append(errMsgs,
- overlappingOverloadError(f.Name,
- overload.GetOverloadId(), overloadFunction,
- existing.GetOverloadId(), existingFunction))
- }
- }
-
- for _, macro := range parser.AllMacros {
- if macro.Function() == f.Name &&
- macro.IsReceiverStyle() == overload.GetIsInstanceFunction() &&
- macro.ArgCount() == len(overload.GetParams()) {
- errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount()))
- }
- }
- if len(errMsgs) > 0 {
- return errMsgs
- }
- function.Overloads = append(function.GetOverloads(), overload)
- return errMsgs
-}
-
-// setFunction adds the function Decl to the Env.
-// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
-// If overload overlaps with an existing overload, adds to the errors in the Env instead.
-func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg {
- errorMsgs := make([]errorMsg, 0)
- overloads := decl.GetFunction().GetOverloads()
- current := e.declarations.FindFunction(decl.Name)
- if current == nil {
- //Add the function declaration without overloads and check the overloads below.
- current = decls.NewFunction(decl.Name)
- } else {
- existingOverloads := map[string]*exprpb.Decl_FunctionDecl_Overload{}
- for _, overload := range current.GetFunction().GetOverloads() {
- existingOverloads[overload.GetOverloadId()] = overload
- }
- newOverloads := []*exprpb.Decl_FunctionDecl_Overload{}
- for _, overload := range overloads {
- existing, found := existingOverloads[overload.GetOverloadId()]
- if !found || !proto.Equal(existing, overload) {
- newOverloads = append(newOverloads, overload)
- }
- }
- overloads = newOverloads
- if len(newOverloads) == 0 {
- return errorMsgs
- }
- // Copy on write since we don't know where this original definition came from.
- current = proto.Clone(current).(*exprpb.Decl)
- }
- e.declarations.SetFunction(current)
- for _, overload := range overloads {
- errorMsgs = append(errorMsgs, e.addOverload(current, overload)...)
- }
- return errorMsgs
-}
-
-// addIdent adds the Decl to the declarations in the Env.
-// Returns a non-empty errorMsg if the identifier is already declared in the scope.
-func (e *Env) addIdent(decl *exprpb.Decl) errorMsg {
- current := e.declarations.FindIdentInScope(decl.Name)
- if current != nil {
- if proto.Equal(current, decl) {
- return ""
- }
- return overlappingIdentifierError(decl.Name)
- }
- e.declarations.AddIdent(decl)
- return ""
-}
-
-// isOverloadDisabled returns whether the overloadID is disabled in the current environment.
-func (e *Env) isOverloadDisabled(overloadID string) bool {
- _, found := e.filteredOverloadIDs[overloadID]
- return found
-}
-
-// sanitizeFunction replaces well-known types referenced by message name with their equivalent
-// CEL built-in type instances.
-func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
- fn := decl.GetFunction()
- // Determine whether the declaration requires replacements from proto-based message type
- // references to well-known CEL type references.
- var needsSanitizing bool
- for _, o := range fn.GetOverloads() {
- if isObjectWellKnownType(o.GetResultType()) {
- needsSanitizing = true
- break
- }
- for _, p := range o.GetParams() {
- if isObjectWellKnownType(p) {
- needsSanitizing = true
- break
- }
- }
- }
-
- // Early return if the declaration requires no modification.
- if !needsSanitizing {
- return decl
- }
-
- // Sanitize all of the overloads if any overload requires an update to its type references.
- overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads()))
- for i, o := range fn.GetOverloads() {
- rt := o.GetResultType()
- if isObjectWellKnownType(rt) {
- rt = getObjectWellKnownType(rt)
- }
- params := make([]*exprpb.Type, len(o.GetParams()))
- copy(params, o.GetParams())
- for j, p := range params {
- if isObjectWellKnownType(p) {
- params[j] = getObjectWellKnownType(p)
- }
- }
- // If sanitized, replace the overload definition.
- if o.IsInstanceFunction {
- overloads[i] =
- decls.NewInstanceOverload(o.GetOverloadId(), params, rt)
- } else {
- overloads[i] =
- decls.NewOverload(o.GetOverloadId(), params, rt)
- }
- }
- return decls.NewFunction(decl.GetName(), overloads...)
-}
-
-// sanitizeIdent replaces the identifier's well-known types referenced by message name with
-// references to CEL built-in type instances.
-func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl {
- id := decl.GetIdent()
- t := id.GetType()
- if !isObjectWellKnownType(t) {
- return decl
- }
- return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue())
-}
-
-// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name
-// that corresponds the message name of a built-in CEL type.
-func isObjectWellKnownType(t *exprpb.Type) bool {
- if kindOf(t) != kindObject {
- return false
- }
- _, found := pb.CheckedWellKnowns[t.GetMessageType()]
- return found
-}
-
-// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name.
-func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type {
- return pb.CheckedWellKnowns[t.GetMessageType()]
-}
-
-// validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
-// must be copied before use.
-func (e *Env) validatedDeclarations() *decls.Scopes {
- return e.declarations
-}
-
-// enterScope creates a new Env instance with a new innermost declaration scope.
-func (e *Env) enterScope() *Env {
- childDecls := e.declarations.Push()
- return &Env{
- declarations: childDecls,
- container: e.container,
- provider: e.provider,
- aggLitElemType: e.aggLitElemType,
- }
-}
-
-// exitScope creates a new Env instance with the nearest outer declaration scope.
-func (e *Env) exitScope() *Env {
- parentDecls := e.declarations.Pop()
- return &Env{
- declarations: parentDecls,
- container: e.container,
- provider: e.provider,
- aggLitElemType: e.aggLitElemType,
- }
-}
-
-// errorMsg is a type alias meant to represent error-based return values which
-// may be accumulated into an error at a later point in execution.
-type errorMsg string
-
-func overlappingIdentifierError(name string) errorMsg {
- return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
-}
-
-func overlappingOverloadError(name string,
- overloadID1 string, f1 *exprpb.Type,
- overloadID2 string, f2 *exprpb.Type) errorMsg {
- return errorMsg(fmt.Sprintf(
- "overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+
- "cannot be distinguished from '%s' with overloadId: '%s')",
- name,
- FormatCheckedType(f1),
- overloadID1,
- FormatCheckedType(f2),
- overloadID2))
-}
-
-func overlappingMacroError(name string, argCount int) errorMsg {
- return errorMsg(fmt.Sprintf(
- "overlapping macro for name '%s' with %d args", name, argCount))
-}
-
-func formatError(errMsgs []errorMsg) error {
- errStrs := make([]string, 0)
- if len(errMsgs) > 0 {
- for i := 0; i < len(errMsgs); i++ {
- if errMsgs[i] != "" {
- errStrs = append(errStrs, string(errMsgs[i]))
- }
- }
- }
- if len(errStrs) > 0 {
- return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
- }
- return nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/errors.go b/etcd/vendor/github.com/google/cel-go/checker/errors.go
deleted file mode 100644
index 0014f9abe1..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/errors.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "github.com/google/cel-go/common"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// typeErrors is a specialization of Errors.
-type typeErrors struct {
- *common.Errors
-}
-
-func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) {
- e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container)
-}
-
-func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) {
- e.ReportError(l, "type '%s' does not support field selection", t)
-}
-
-func (e *typeErrors) undefinedField(l common.Location, field string) {
- e.ReportError(l, "undefined field '%s'", field)
-}
-
-func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) {
- signature := formatFunction(nil, args, isInstance)
- e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature)
-}
-
-func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) {
- e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t)
-}
-
-func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) {
- e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t))
-}
-
-func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) {
- e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'",
- name, FormatCheckedType(field), FormatCheckedType(value))
-}
-
-func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) {
- e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName)
-}
-
-func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) {
- e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
- FormatCheckedType(t))
-}
-
-func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) {
- e.ReportError(l, "expected type '%s' but found '%s'",
- FormatCheckedType(expected), FormatCheckedType(actual))
-}
-
-func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
- result := ""
- if isInstance {
- target := argTypes[0]
- argTypes = argTypes[1:]
-
- result += FormatCheckedType(target)
- result += "."
- }
-
- result += "("
- for i, arg := range argTypes {
- if i > 0 {
- result += ", "
- }
- result += FormatCheckedType(arg)
- }
- result += ")"
- if resultType != nil {
- result += " -> "
- result += FormatCheckedType(resultType)
- }
-
- return result
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/mapping.go b/etcd/vendor/github.com/google/cel-go/checker/mapping.go
deleted file mode 100644
index fbc55a28d9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/mapping.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-type mapping struct {
- mapping map[string]*exprpb.Type
-}
-
-func newMapping() *mapping {
- return &mapping{
- mapping: make(map[string]*exprpb.Type),
- }
-}
-
-func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) {
- m.mapping[typeKey(from)] = to
-}
-
-func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) {
- if r, found := m.mapping[typeKey(from)]; found {
- return r, found
- }
- return nil, false
-}
-
-func (m *mapping) copy() *mapping {
- c := newMapping()
-
- for k, v := range m.mapping {
- c.mapping[k] = v
- }
- return c
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/options.go b/etcd/vendor/github.com/google/cel-go/checker/options.go
deleted file mode 100644
index cded00a660..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/options.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import "github.com/google/cel-go/checker/decls"
-
-type options struct {
- crossTypeNumericComparisons bool
- homogeneousAggregateLiterals bool
- validatedDeclarations *decls.Scopes
-}
-
-// Option is a functional option for configuring the type-checker
-type Option func(*options) error
-
-// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type
-// See https://github.com/google/cel-spec/wiki/proposal-210 for more details.
-func CrossTypeNumericComparisons(enabled bool) Option {
- return func(opts *options) error {
- opts.crossTypeNumericComparisons = enabled
- return nil
- }
-}
-
-// HomogeneousAggregateLiterals toggles support for constructing lists and maps whose elements all
-// have the same type.
-func HomogeneousAggregateLiterals(enabled bool) Option {
- return func(opts *options) error {
- opts.homogeneousAggregateLiterals = enabled
- return nil
- }
-}
-
-// ValidatedDeclarations provides a references to validated declarations which will be copied
-// into new checker instances.
-func ValidatedDeclarations(env *Env) Option {
- return func(opts *options) error {
- opts.validatedDeclarations = env.validatedDeclarations()
- return nil
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/printer.go b/etcd/vendor/github.com/google/cel-go/checker/printer.go
deleted file mode 100644
index e2ed35be83..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/printer.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "github.com/google/cel-go/common/debug"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-type semanticAdorner struct {
- checks *exprpb.CheckedExpr
-}
-
-var _ debug.Adorner = &semanticAdorner{}
-
-func (a *semanticAdorner) GetMetadata(elem interface{}) string {
- result := ""
- e, isExpr := elem.(*exprpb.Expr)
- if !isExpr {
- return result
- }
- t := a.checks.TypeMap[e.GetId()]
- if t != nil {
- result += "~"
- result += FormatCheckedType(t)
- }
-
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr,
- *exprpb.Expr_CallExpr,
- *exprpb.Expr_StructExpr,
- *exprpb.Expr_SelectExpr:
- if ref, found := a.checks.ReferenceMap[e.GetId()]; found {
- if len(ref.GetOverloadId()) == 0 {
- result += "^" + ref.Name
- } else {
- for i, overload := range ref.GetOverloadId() {
- if i == 0 {
- result += "^"
- } else {
- result += "|"
- }
- result += overload
- }
- }
- }
- }
-
- return result
-}
-
-// Print returns a string representation of the Expr message,
-// annotated with types from the CheckedExpr. The Expr must
-// be a sub-expression embedded in the CheckedExpr.
-func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
- a := &semanticAdorner{checks: checks}
- return debug.ToAdornedDebugString(e, a)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/standard.go b/etcd/vendor/github.com/google/cel-go/checker/standard.go
deleted file mode 100644
index 5b48a9046a..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/standard.go
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "github.com/google/cel-go/checker/decls"
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/overloads"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-var (
- standardDeclarations []*exprpb.Decl
-)
-
-func init() {
- // Some shortcuts we use when building declarations.
- paramA := decls.NewTypeParamType("A")
- typeParamAList := []string{"A"}
- listOfA := decls.NewListType(paramA)
- paramB := decls.NewTypeParamType("B")
- typeParamABList := []string{"A", "B"}
- mapOfAB := decls.NewMapType(paramA, paramB)
-
- var idents []*exprpb.Decl
- for _, t := range []*exprpb.Type{
- decls.Int, decls.Uint, decls.Bool,
- decls.Double, decls.Bytes, decls.String} {
- idents = append(idents,
- decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t)))
- }
- idents = append(idents,
- decls.NewVar("list", decls.NewTypeType(listOfA)),
- decls.NewVar("map", decls.NewTypeType(mapOfAB)),
- decls.NewVar("null_type", decls.NewTypeType(decls.Null)),
- decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil))))
-
- standardDeclarations = append(standardDeclarations, idents...)
- standardDeclarations = append(standardDeclarations, []*exprpb.Decl{
- // Booleans
- decls.NewFunction(operators.Conditional,
- decls.NewParameterizedOverload(overloads.Conditional,
- []*exprpb.Type{decls.Bool, paramA, paramA}, paramA,
- typeParamAList)),
-
- decls.NewFunction(operators.LogicalAnd,
- decls.NewOverload(overloads.LogicalAnd,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
-
- decls.NewFunction(operators.LogicalOr,
- decls.NewOverload(overloads.LogicalOr,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
-
- decls.NewFunction(operators.LogicalNot,
- decls.NewOverload(overloads.LogicalNot,
- []*exprpb.Type{decls.Bool}, decls.Bool)),
-
- decls.NewFunction(operators.NotStrictlyFalse,
- decls.NewOverload(overloads.NotStrictlyFalse,
- []*exprpb.Type{decls.Bool}, decls.Bool)),
-
- decls.NewFunction(operators.Equals,
- decls.NewParameterizedOverload(overloads.Equals,
- []*exprpb.Type{paramA, paramA}, decls.Bool,
- typeParamAList)),
-
- decls.NewFunction(operators.NotEquals,
- decls.NewParameterizedOverload(overloads.NotEquals,
- []*exprpb.Type{paramA, paramA}, decls.Bool,
- typeParamAList)),
-
- // Algebra.
-
- decls.NewFunction(operators.Subtract,
- decls.NewOverload(overloads.SubtractInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
- decls.NewOverload(overloads.SubtractUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
- decls.NewOverload(overloads.SubtractDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Double),
- decls.NewOverload(overloads.SubtractTimestampTimestamp,
- []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration),
- decls.NewOverload(overloads.SubtractTimestampDuration,
- []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
- decls.NewOverload(overloads.SubtractDurationDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
-
- decls.NewFunction(operators.Multiply,
- decls.NewOverload(overloads.MultiplyInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
- decls.NewOverload(overloads.MultiplyUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
- decls.NewOverload(overloads.MultiplyDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
-
- decls.NewFunction(operators.Divide,
- decls.NewOverload(overloads.DivideInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
- decls.NewOverload(overloads.DivideUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
- decls.NewOverload(overloads.DivideDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
-
- decls.NewFunction(operators.Modulo,
- decls.NewOverload(overloads.ModuloInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
- decls.NewOverload(overloads.ModuloUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)),
-
- decls.NewFunction(operators.Add,
- decls.NewOverload(overloads.AddInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
- decls.NewOverload(overloads.AddUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
- decls.NewOverload(overloads.AddDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Double),
- decls.NewOverload(overloads.AddString,
- []*exprpb.Type{decls.String, decls.String}, decls.String),
- decls.NewOverload(overloads.AddBytes,
- []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes),
- decls.NewParameterizedOverload(overloads.AddList,
- []*exprpb.Type{listOfA, listOfA}, listOfA,
- typeParamAList),
- decls.NewOverload(overloads.AddTimestampDuration,
- []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
- decls.NewOverload(overloads.AddDurationTimestamp,
- []*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp),
- decls.NewOverload(overloads.AddDurationDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
-
- decls.NewFunction(operators.Negate,
- decls.NewOverload(overloads.NegateInt64,
- []*exprpb.Type{decls.Int}, decls.Int),
- decls.NewOverload(overloads.NegateDouble,
- []*exprpb.Type{decls.Double}, decls.Double)),
-
- // Index.
-
- decls.NewFunction(operators.Index,
- decls.NewParameterizedOverload(overloads.IndexList,
- []*exprpb.Type{listOfA, decls.Int}, paramA,
- typeParamAList),
- decls.NewParameterizedOverload(overloads.IndexMap,
- []*exprpb.Type{mapOfAB, paramA}, paramB,
- typeParamABList)),
-
- // Collections.
-
- decls.NewFunction(overloads.Size,
- decls.NewInstanceOverload(overloads.SizeStringInst,
- []*exprpb.Type{decls.String}, decls.Int),
- decls.NewInstanceOverload(overloads.SizeBytesInst,
- []*exprpb.Type{decls.Bytes}, decls.Int),
- decls.NewParameterizedInstanceOverload(overloads.SizeListInst,
- []*exprpb.Type{listOfA}, decls.Int, typeParamAList),
- decls.NewParameterizedInstanceOverload(overloads.SizeMapInst,
- []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList),
- decls.NewOverload(overloads.SizeString,
- []*exprpb.Type{decls.String}, decls.Int),
- decls.NewOverload(overloads.SizeBytes,
- []*exprpb.Type{decls.Bytes}, decls.Int),
- decls.NewParameterizedOverload(overloads.SizeList,
- []*exprpb.Type{listOfA}, decls.Int, typeParamAList),
- decls.NewParameterizedOverload(overloads.SizeMap,
- []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)),
-
- decls.NewFunction(operators.In,
- decls.NewParameterizedOverload(overloads.InList,
- []*exprpb.Type{paramA, listOfA}, decls.Bool,
- typeParamAList),
- decls.NewParameterizedOverload(overloads.InMap,
- []*exprpb.Type{paramA, mapOfAB}, decls.Bool,
- typeParamABList)),
-
- // Deprecated 'in()' function.
-
- decls.NewFunction(overloads.DeprecatedIn,
- decls.NewParameterizedOverload(overloads.InList,
- []*exprpb.Type{paramA, listOfA}, decls.Bool,
- typeParamAList),
- decls.NewParameterizedOverload(overloads.InMap,
- []*exprpb.Type{paramA, mapOfAB}, decls.Bool,
- typeParamABList)),
-
- // Conversions to type.
-
- decls.NewFunction(overloads.TypeConvertType,
- decls.NewParameterizedOverload(overloads.TypeConvertType,
- []*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)),
-
- // Conversions to int.
-
- decls.NewFunction(overloads.TypeConvertInt,
- decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int),
- decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int),
- decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int),
- decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int),
- decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)),
-
- // Conversions to uint.
-
- decls.NewFunction(overloads.TypeConvertUint,
- decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint),
- decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint),
- decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint),
- decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)),
-
- // Conversions to double.
-
- decls.NewFunction(overloads.TypeConvertDouble,
- decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double),
- decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double),
- decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double),
- decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)),
-
- // Conversions to bool.
-
- decls.NewFunction(overloads.TypeConvertBool,
- decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool),
- decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)),
-
- // Conversions to string.
-
- decls.NewFunction(overloads.TypeConvertString,
- decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String),
- decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String),
- decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String),
- decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String),
- decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String),
- decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String),
- decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String),
- decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)),
-
- // Conversions to bytes.
-
- decls.NewFunction(overloads.TypeConvertBytes,
- decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes),
- decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)),
-
- // Conversions to timestamps.
-
- decls.NewFunction(overloads.TypeConvertTimestamp,
- decls.NewOverload(overloads.TimestampToTimestamp,
- []*exprpb.Type{decls.Timestamp}, decls.Timestamp),
- decls.NewOverload(overloads.StringToTimestamp,
- []*exprpb.Type{decls.String}, decls.Timestamp),
- decls.NewOverload(overloads.IntToTimestamp,
- []*exprpb.Type{decls.Int}, decls.Timestamp)),
-
- // Conversions to durations.
-
- decls.NewFunction(overloads.TypeConvertDuration,
- decls.NewOverload(overloads.DurationToDuration,
- []*exprpb.Type{decls.Duration}, decls.Duration),
- decls.NewOverload(overloads.StringToDuration,
- []*exprpb.Type{decls.String}, decls.Duration),
- decls.NewOverload(overloads.IntToDuration,
- []*exprpb.Type{decls.Int}, decls.Duration)),
-
- // Conversions to Dyn.
-
- decls.NewFunction(overloads.TypeConvertDyn,
- decls.NewParameterizedOverload(overloads.ToDyn,
- []*exprpb.Type{paramA}, decls.Dyn,
- typeParamAList)),
-
- // String functions.
-
- decls.NewFunction(overloads.Contains,
- decls.NewInstanceOverload(overloads.ContainsString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
- decls.NewFunction(overloads.EndsWith,
- decls.NewInstanceOverload(overloads.EndsWithString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
- decls.NewFunction(overloads.Matches,
- decls.NewInstanceOverload(overloads.MatchesString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
- decls.NewFunction(overloads.StartsWith,
- decls.NewInstanceOverload(overloads.StartsWithString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
-
- // Date/time functions.
-
- decls.NewFunction(overloads.TimeGetFullYear,
- decls.NewInstanceOverload(overloads.TimestampToYear,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToYearWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetMonth,
- decls.NewInstanceOverload(overloads.TimestampToMonth,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToMonthWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetDayOfYear,
- decls.NewInstanceOverload(overloads.TimestampToDayOfYear,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetDayOfMonth,
- decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetDate,
- decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetDayOfWeek,
- decls.NewInstanceOverload(overloads.TimestampToDayOfWeek,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetHours,
- decls.NewInstanceOverload(overloads.TimestampToHours,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToHoursWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
- decls.NewInstanceOverload(overloads.DurationToHours,
- []*exprpb.Type{decls.Duration}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetMinutes,
- decls.NewInstanceOverload(overloads.TimestampToMinutes,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
- decls.NewInstanceOverload(overloads.DurationToMinutes,
- []*exprpb.Type{decls.Duration}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetSeconds,
- decls.NewInstanceOverload(overloads.TimestampToSeconds,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
- decls.NewInstanceOverload(overloads.DurationToSeconds,
- []*exprpb.Type{decls.Duration}, decls.Int)),
-
- decls.NewFunction(overloads.TimeGetMilliseconds,
- decls.NewInstanceOverload(overloads.TimestampToMilliseconds,
- []*exprpb.Type{decls.Timestamp}, decls.Int),
- decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz,
- []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
- decls.NewInstanceOverload(overloads.DurationToMilliseconds,
- []*exprpb.Type{decls.Duration}, decls.Int)),
-
- // Relations.
- decls.NewFunction(operators.Less,
- decls.NewOverload(overloads.LessBool,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
- decls.NewOverload(overloads.LessInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessInt64Double,
- []*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessInt64Uint64,
- []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessUint64Double,
- []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessUint64Int64,
- []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessDoubleInt64,
- []*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessDoubleUint64,
- []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool),
- decls.NewOverload(overloads.LessBytes,
- []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
- decls.NewOverload(overloads.LessTimestamp,
- []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
- decls.NewOverload(overloads.LessDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
-
- decls.NewFunction(operators.LessEquals,
- decls.NewOverload(overloads.LessEqualsBool,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsInt64Double,
- []*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsInt64Uint64,
- []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsUint64Double,
- []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsUint64Int64,
- []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsDoubleInt64,
- []*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsDoubleUint64,
- []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsBytes,
- []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsTimestamp,
- []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
- decls.NewOverload(overloads.LessEqualsDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
-
- decls.NewFunction(operators.Greater,
- decls.NewOverload(overloads.GreaterBool,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
- decls.NewOverload(overloads.GreaterInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterInt64Double,
- []*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterInt64Uint64,
- []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterUint64Double,
- []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterUint64Int64,
- []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterDoubleInt64,
- []*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterDoubleUint64,
- []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool),
- decls.NewOverload(overloads.GreaterBytes,
- []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
- decls.NewOverload(overloads.GreaterTimestamp,
- []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
- decls.NewOverload(overloads.GreaterDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
-
- decls.NewFunction(operators.GreaterEquals,
- decls.NewOverload(overloads.GreaterEqualsBool,
- []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsInt64,
- []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsInt64Double,
- []*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsInt64Uint64,
- []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsUint64,
- []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsUint64Double,
- []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsUint64Int64,
- []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsDouble,
- []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsDoubleInt64,
- []*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsDoubleUint64,
- []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsString,
- []*exprpb.Type{decls.String, decls.String}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsBytes,
- []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsTimestamp,
- []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
- decls.NewOverload(overloads.GreaterEqualsDuration,
- []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
- }...)
-}
-
-// StandardDeclarations returns the Decls for all functions and constants in the evaluator.
-func StandardDeclarations() []*exprpb.Decl {
- return standardDeclarations
-}
diff --git a/etcd/vendor/github.com/google/cel-go/checker/types.go b/etcd/vendor/github.com/google/cel-go/checker/types.go
deleted file mode 100644
index 8683797d5b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/checker/types.go
+++ /dev/null
@@ -1,494 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checker
-
-import (
- "fmt"
- "strings"
-
- "github.com/google/cel-go/checker/decls"
-
- "google.golang.org/protobuf/proto"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-const (
- kindUnknown = iota + 1
- kindError
- kindFunction
- kindDyn
- kindPrimitive
- kindWellKnown
- kindWrapper
- kindNull
- kindAbstract
- kindType
- kindList
- kindMap
- kindObject
- kindTypeParam
-)
-
-// FormatCheckedType converts a type message into a string representation.
-func FormatCheckedType(t *exprpb.Type) string {
- switch kindOf(t) {
- case kindDyn:
- return "dyn"
- case kindFunction:
- return formatFunction(t.GetFunction().GetResultType(),
- t.GetFunction().GetArgTypes(),
- false)
- case kindList:
- return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
- case kindObject:
- return t.GetMessageType()
- case kindMap:
- return fmt.Sprintf("map(%s, %s)",
- FormatCheckedType(t.GetMapType().GetKeyType()),
- FormatCheckedType(t.GetMapType().GetValueType()))
- case kindNull:
- return "null"
- case kindPrimitive:
- switch t.GetPrimitive() {
- case exprpb.Type_UINT64:
- return "uint"
- case exprpb.Type_INT64:
- return "int"
- }
- return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
- case kindType:
- if t.GetType() == nil {
- return "type"
- }
- return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
- case kindWellKnown:
- switch t.GetWellKnown() {
- case exprpb.Type_ANY:
- return "any"
- case exprpb.Type_DURATION:
- return "duration"
- case exprpb.Type_TIMESTAMP:
- return "timestamp"
- }
- case kindWrapper:
- return fmt.Sprintf("wrapper(%s)",
- FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper())))
- case kindError:
- return "!error!"
- case kindTypeParam:
- return t.GetTypeParam()
- }
- return t.String()
-}
-
-// isDyn returns true if the input t is either type DYN or a well-known ANY message.
-func isDyn(t *exprpb.Type) bool {
- // Note: object type values that are well-known and map to a DYN value in practice
- // are sanitized prior to being added to the environment.
- switch kindOf(t) {
- case kindDyn:
- return true
- case kindWellKnown:
- return t.GetWellKnown() == exprpb.Type_ANY
- default:
- return false
- }
-}
-
-// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
-func isDynOrError(t *exprpb.Type) bool {
- switch kindOf(t) {
- case kindError:
- return true
- default:
- return isDyn(t)
- }
-}
-
-// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
-// A type is less specific if it matches the other type using the DYN type.
-func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
- kind1, kind2 := kindOf(t1), kindOf(t2)
- // The first type is less specific.
- if isDyn(t1) || kind1 == kindTypeParam {
- return true
- }
- // The first type is not less specific.
- if isDyn(t2) || kind2 == kindTypeParam {
- return false
- }
- // Types must be of the same kind to be equal.
- if kind1 != kind2 {
- return false
- }
-
- // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
- // order to return true.
- switch kind1 {
- case kindAbstract:
- a1 := t1.GetAbstractType()
- a2 := t2.GetAbstractType()
- if a1.GetName() != a2.GetName() ||
- len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) {
- return false
- }
- for i, p1 := range a1.GetParameterTypes() {
- if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) {
- return false
- }
- }
- return true
- case kindList:
- return isEqualOrLessSpecific(t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
- case kindMap:
- m1 := t1.GetMapType()
- m2 := t2.GetMapType()
- return isEqualOrLessSpecific(m1.GetKeyType(), m2.GetKeyType()) &&
- isEqualOrLessSpecific(m1.GetValueType(), m2.GetValueType())
- case kindType:
- return true
- default:
- return proto.Equal(t1, t2)
- }
-}
-
-// / internalIsAssignable returns true if t1 is assignable to t2.
-func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
- // Process type parameters.
- kind1, kind2 := kindOf(t1), kindOf(t2)
- if kind2 == kindTypeParam {
- // If t2 is a valid type substitution for t1, return true.
- valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
- if valid {
- return true
- }
- // If t2 is not a valid type sub for t1, and already has a known substitution return false
- // since it is not possible for t1 to be a substitution for t2.
- if !valid && t2HasSub {
- return false
- }
- // Otherwise, fall through to check whether t1 is a possible substitution for t2.
- }
- if kind1 == kindTypeParam {
- // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
- // possible type substitutions have been searched in both directions.
- valid, _ := isValidTypeSubstitution(m, t2, t1)
- return valid
- }
-
- // Next check for wildcard types.
- if isDynOrError(t1) || isDynOrError(t2) {
- return true
- }
-
- // Test for when the types do not need to agree, but are more specific than dyn.
- switch kind1 {
- case kindNull:
- return internalIsAssignableNull(t2)
- case kindPrimitive:
- return internalIsAssignablePrimitive(t1.GetPrimitive(), t2)
- case kindWrapper:
- return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2)
- default:
- if kind1 != kind2 {
- return false
- }
- }
-
- // Test for when the types must agree.
- switch kind1 {
- // ERROR, TYPE_PARAM, and DYN handled above.
- case kindAbstract:
- return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType())
- case kindFunction:
- return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction())
- case kindList:
- return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
- case kindMap:
- return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType())
- case kindObject:
- return t1.GetMessageType() == t2.GetMessageType()
- case kindType:
- // A type is a type is a type, any additional parameterization of the
- // type cannot affect method resolution or assignability.
- return true
- case kindWellKnown:
- return t1.GetWellKnown() == t2.GetWellKnown()
- default:
- return false
- }
-}
-
-// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type
-// substitution for t1, and whether t2 has a type substitution in mapping m.
-//
-// The type t2 is a valid substitution for t1 if any of the following statements is true
-// - t2 has a type substitition (t2sub) equal to t1
-// - t2 has a type substitution (t2sub) assignable to t1
-// - t2 does not occur within t1.
-func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) {
- // Early return if the t1 and t2 are the same instance.
- kind1, kind2 := kindOf(t1), kindOf(t2)
- if kind1 == kind2 && (t1 == t2 || proto.Equal(t1, t2)) {
- return true, true
- }
- if t2Sub, found := m.find(t2); found {
- // Early return if t1 and t2Sub are the same instance as otherwise the mapping
- // might mark a type as being a subtitution for itself.
- if kind1 == kindOf(t2Sub) && (t1 == t2Sub || proto.Equal(t1, t2Sub)) {
- return true, true
- }
- // If the types are compatible, pick the more general type and return true
- if internalIsAssignable(m, t1, t2Sub) {
- t2New := mostGeneral(t1, t2Sub)
- // only update the type reference map if the target type does not occur within it.
- if notReferencedIn(m, t2, t2New) {
- m.add(t2, t2New)
- }
- // acknowledge the type agreement, and that the substitution is already tracked.
- return true, true
- }
- return false, true
- }
- if notReferencedIn(m, t2, t1) {
- m.add(t2, t1)
- return true, false
- }
- return false, false
-}
-
-// internalIsAssignableAbstractType returns true if the abstract type names agree and all type
-// parameters are assignable.
-func internalIsAssignableAbstractType(m *mapping, a1 *exprpb.Type_AbstractType, a2 *exprpb.Type_AbstractType) bool {
- return a1.GetName() == a2.GetName() &&
- internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes())
-}
-
-// internalIsAssignableFunction returns true if the function return type and arg types are
-// assignable.
-func internalIsAssignableFunction(m *mapping, f1 *exprpb.Type_FunctionType, f2 *exprpb.Type_FunctionType) bool {
- f1ArgTypes := flattenFunctionTypes(f1)
- f2ArgTypes := flattenFunctionTypes(f2)
- if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) {
- return true
- }
- return false
-}
-
-// internalIsAssignableList returns true if the element types at each index in the list are
-// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
-// assignable.
-func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
- if len(l1) != len(l2) {
- return false
- }
- for i, t1 := range l1 {
- if !internalIsAssignable(m, t1, l2[i]) {
- return false
- }
- }
- return true
-}
-
-// internalIsAssignableMap returns true if map m1 may be assigned to map m2.
-func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool {
- if internalIsAssignableList(m,
- []*exprpb.Type{m1.GetKeyType(), m1.GetValueType()},
- []*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) {
- return true
- }
- return false
-}
-
-// internalIsAssignableNull returns true if the type is nullable.
-func internalIsAssignableNull(t *exprpb.Type) bool {
- switch kindOf(t) {
- case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper:
- return true
- default:
- return false
- }
-}
-
-// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper
-// for the primitive type.
-func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool {
- switch kindOf(target) {
- case kindPrimitive:
- return p == target.GetPrimitive()
- case kindWrapper:
- return p == target.GetWrapper()
- default:
- return false
- }
-}
-
-// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
-func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
- mCopy := m.copy()
- if internalIsAssignable(mCopy, t1, t2) {
- return mCopy
- }
- return nil
-}
-
-// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
-func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping {
- mCopy := m.copy()
- if internalIsAssignableList(mCopy, l1, l2) {
- return mCopy
- }
- return nil
-}
-
-// kindOf returns the kind of the type as defined in the checked.proto.
-func kindOf(t *exprpb.Type) int {
- if t == nil || t.TypeKind == nil {
- return kindUnknown
- }
- switch t.GetTypeKind().(type) {
- case *exprpb.Type_Error:
- return kindError
- case *exprpb.Type_Function:
- return kindFunction
- case *exprpb.Type_Dyn:
- return kindDyn
- case *exprpb.Type_Primitive:
- return kindPrimitive
- case *exprpb.Type_WellKnown:
- return kindWellKnown
- case *exprpb.Type_Wrapper:
- return kindWrapper
- case *exprpb.Type_Null:
- return kindNull
- case *exprpb.Type_Type:
- return kindType
- case *exprpb.Type_ListType_:
- return kindList
- case *exprpb.Type_MapType_:
- return kindMap
- case *exprpb.Type_MessageType:
- return kindObject
- case *exprpb.Type_TypeParam:
- return kindTypeParam
- case *exprpb.Type_AbstractType_:
- return kindAbstract
- }
- return kindUnknown
-}
-
-// mostGeneral returns the more general of two types which are known to unify.
-func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
- if isEqualOrLessSpecific(t1, t2) {
- return t1
- }
- return t2
-}
-
-// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
-// type. This is a standard requirement for type unification, commonly referred to as the "occurs
-// check".
-func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool {
- if proto.Equal(t, withinType) {
- return false
- }
- withinKind := kindOf(withinType)
- switch withinKind {
- case kindTypeParam:
- wtSub, found := m.find(withinType)
- if !found {
- return true
- }
- return notReferencedIn(m, t, wtSub)
- case kindAbstract:
- for _, pt := range withinType.GetAbstractType().GetParameterTypes() {
- if !notReferencedIn(m, t, pt) {
- return false
- }
- }
- return true
- case kindList:
- return notReferencedIn(m, t, withinType.GetListType().GetElemType())
- case kindMap:
- mt := withinType.GetMapType()
- return notReferencedIn(m, t, mt.GetKeyType()) && notReferencedIn(m, t, mt.GetValueType())
- case kindWrapper:
- return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper()))
- default:
- return true
- }
-}
-
-// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
-// parameters are replaced by DYN if typeParamToDyn is true.
-func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
- if tSub, found := m.find(t); found {
- return substitute(m, tSub, typeParamToDyn)
- }
- kind := kindOf(t)
- if typeParamToDyn && kind == kindTypeParam {
- return decls.Dyn
- }
- switch kind {
- case kindAbstract:
- at := t.GetAbstractType()
- params := make([]*exprpb.Type, len(at.GetParameterTypes()))
- for i, p := range at.GetParameterTypes() {
- params[i] = substitute(m, p, typeParamToDyn)
- }
- return decls.NewAbstractType(at.GetName(), params...)
- case kindFunction:
- fn := t.GetFunction()
- rt := substitute(m, fn.ResultType, typeParamToDyn)
- args := make([]*exprpb.Type, len(fn.GetArgTypes()))
- for i, a := range fn.ArgTypes {
- args[i] = substitute(m, a, typeParamToDyn)
- }
- return decls.NewFunctionType(rt, args...)
- case kindList:
- return decls.NewListType(substitute(m, t.GetListType().GetElemType(), typeParamToDyn))
- case kindMap:
- mt := t.GetMapType()
- return decls.NewMapType(substitute(m, mt.GetKeyType(), typeParamToDyn),
- substitute(m, mt.GetValueType(), typeParamToDyn))
- case kindType:
- if t.GetType() != nil {
- return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn))
- }
- return t
- default:
- return t
- }
-}
-
-func typeKey(t *exprpb.Type) string {
- return FormatCheckedType(t)
-}
-
-// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR
-// and returns a slice containing {T1, T2, ..., TN, TR}.
-func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type {
- argTypes := f.GetArgTypes()
- if len(argTypes) == 0 {
- return []*exprpb.Type{f.GetResultType()}
- }
- flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1)
- for i, at := range argTypes {
- flattend[i] = at
- }
- flattend[len(argTypes)] = f.GetResultType()
- return flattend
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/BUILD.bazel
deleted file mode 100644
index a0058aebe0..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/BUILD.bazel
+++ /dev/null
@@ -1,35 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "cost.go",
- "error.go",
- "errors.go",
- "location.go",
- "source.go",
- ],
- importpath = "github.com/google/cel-go/common",
- deps = [
- "//common/runes:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_x_text//width:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "errors_test.go",
- "source_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
deleted file mode 100644
index 18142d94ef..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
+++ /dev/null
@@ -1,31 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "container.go",
- ],
- importpath = "github.com/google/cel-go/common/containers",
- deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "container_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/containers/container.go b/etcd/vendor/github.com/google/cel-go/common/containers/container.go
deleted file mode 100644
index d46698d3cd..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/containers/container.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package containers defines types and functions for resolving qualified names within a namespace
-// or type provided to CEL.
-package containers
-
-import (
- "fmt"
- "strings"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-var (
- // DefaultContainer has an empty container name.
- DefaultContainer *Container = nil
-
- // Empty map to search for aliases when needed.
- noAliases = make(map[string]string)
-)
-
-// NewContainer creates a new Container with the fully-qualified name.
-func NewContainer(opts ...ContainerOption) (*Container, error) {
- var c *Container
- var err error
- for _, opt := range opts {
- c, err = opt(c)
- if err != nil {
- return nil, err
- }
- }
- return c, nil
-}
-
-// Container holds a reference to an optional qualified container name and set of aliases.
-//
-// The program container can be used to simplify variable, function, and type specification within
-// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more
-// details.
-type Container struct {
- name string
- aliases map[string]string
-}
-
-// Extend creates a new Container with the existing settings and applies a series of
-// ContainerOptions to further configure the new container.
-func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
- if c == nil {
- return NewContainer(opts...)
- }
- // Copy the name and aliases of the existing container.
- ext := &Container{name: c.Name()}
- if len(c.aliasSet()) > 0 {
- aliasSet := make(map[string]string, len(c.aliasSet()))
- for k, v := range c.aliasSet() {
- aliasSet[k] = v
- }
- ext.aliases = aliasSet
- }
- // Apply the new options to the container.
- var err error
- for _, opt := range opts {
- ext, err = opt(ext)
- if err != nil {
- return nil, err
- }
- }
- return ext, nil
-}
-
-// Name returns the fully-qualified name of the container.
-//
-// The name may conceptually be a namespace, package, or type.
-func (c *Container) Name() string {
- if c == nil {
- return ""
- }
- return c.name
-}
-
-// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution
-// order.
-//
-// Names which shadow other names are returned first. If a name includes a leading dot ('.'),
-// the name is treated as an absolute identifier which cannot be shadowed.
-//
-// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order:
-//
-// a.b.c.M.N.R.s
-// a.b.c.M.R.s
-// a.b.c.R.s
-// a.b.R.s
-// a.R.s
-// R.s
-//
-// If aliases or abbreviations are configured for the container, then alias names will take
-// precedence over containerized names.
-func (c *Container) ResolveCandidateNames(name string) []string {
- if strings.HasPrefix(name, ".") {
- qn := name[1:]
- alias, isAlias := c.findAlias(qn)
- if isAlias {
- return []string{alias}
- }
- return []string{qn}
- }
- alias, isAlias := c.findAlias(name)
- if isAlias {
- return []string{alias}
- }
- if c.Name() == "" {
- return []string{name}
- }
- nextCont := c.Name()
- candidates := []string{nextCont + "." + name}
- for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") {
- nextCont = nextCont[:i]
- candidates = append(candidates, nextCont+"."+name)
- }
- return append(candidates, name)
-}
-
-// aliasSet returns the alias to fully-qualified name mapping stored in the container.
-func (c *Container) aliasSet() map[string]string {
- if c == nil || c.aliases == nil {
- return noAliases
- }
- return c.aliases
-}
-
-// findAlias takes a name as input and returns an alias expansion if one exists.
-//
-// If the name is qualified, the first component of the qualified name is checked against known
-// aliases. Any alias that is found in a qualified name is expanded in the result:
-//
-// alias: R -> my.alias.R
-// name: R.S.T
-// output: my.alias.R.S.T
-//
-// Note, the name must not have a leading dot.
-func (c *Container) findAlias(name string) (string, bool) {
- // If an alias exists for the name, ensure it is searched last.
- simple := name
- qualifier := ""
- dot := strings.Index(name, ".")
- if dot >= 0 {
- simple = name[0:dot]
- qualifier = name[dot:]
- }
- alias, found := c.aliasSet()[simple]
- if !found {
- return "", false
- }
- return alias + qualifier, true
-}
-
-// ContainerOption specifies a functional configuration option for a Container.
-//
-// Note, ContainerOption implementations must be able to handle nil container inputs.
-type ContainerOption func(*Container) (*Container, error)
-
-// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
-//
-// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
-// Abbreviations can be useful when working with variables, functions, and especially types from
-// multiple namespaces:
-//
-// // CEL object construction
-// qual.pkg.version.ObjTypeName{
-// field: alt.container.ver.FieldTypeName{value: ...}
-// }
-//
-// Only one the qualified names above may be used as the CEL container, so at least one of these
-// references must be a long qualified name within an otherwise short CEL program. Using the
-// following abbreviations, the program becomes much simpler:
-//
-// // CEL Go option
-// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
-// // Simplified Object construction
-// ObjTypeName{field: FieldTypeName{value: ...}}
-//
-// There are a few rules for the qualified names and the simple abbreviations generated from them:
-// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
-// - The last element in the qualified name is the abbreviation.
-// - Abbreviations must not collide with each other.
-// - The abbreviation must not collide with unqualified names in use.
-//
-// Abbreviations are distinct from container-based references in the following important ways:
-// - Abbreviations must expand to a fully-qualified name.
-// - Expanded abbreviations do not participate in namespace resolution.
-// - Abbreviation expansion is done instead of the container search for a matching identifier.
-// - Containers follow C++ namespace resolution rules with searches from the most qualified name
-// to the least qualified name.
-// - Container references within the CEL program may be relative, and are resolved to fully
-// qualified names at either type-check time or program plan time, whichever comes first.
-//
-// If there is ever a case where an identifier could be in both the container and as an
-// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
-// preserved between compilations even as the container evolves.
-func Abbrevs(qualifiedNames ...string) ContainerOption {
- return func(c *Container) (*Container, error) {
- for _, qn := range qualifiedNames {
- ind := strings.LastIndex(qn, ".")
- if ind <= 0 || ind >= len(qn)-1 {
- return nil, fmt.Errorf(
- "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
- }
- alias := qn[ind+1:]
- var err error
- c, err = aliasAs("abbreviation", qn, alias)(c)
- if err != nil {
- return nil, err
- }
- }
- return c, nil
- }
-}
-
-// Alias associates a fully-qualified name with a user-defined alias.
-//
-// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option
-// are more easily traced back to source code. The Alias option is useful for propagating alias
-// configuration from one Container instance to another, and may also be useful for remapping
-// poorly chosen protobuf message / package names.
-//
-// Note: all of the rules that apply to Abbrevs also apply to Alias.
-func Alias(qualifiedName, alias string) ContainerOption {
- return aliasAs("alias", qualifiedName, alias)
-}
-
-func aliasAs(kind, qualifiedName, alias string) ContainerOption {
- return func(c *Container) (*Container, error) {
- if len(alias) == 0 || strings.Contains(alias, ".") {
- return nil, fmt.Errorf(
- "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias)
- }
-
- if qualifiedName[0:1] == "." {
- return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s",
- qualifiedName)
- }
- ind := strings.LastIndex(qualifiedName, ".")
- if ind <= 0 || ind == len(qualifiedName)-1 {
- return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
- kind, qualifiedName)
- }
- aliasRef, found := c.aliasSet()[alias]
- if found {
- return nil, fmt.Errorf(
- "%s collides with existing reference: name=%s, %s=%s, existing=%s",
- kind, qualifiedName, kind, alias, aliasRef)
- }
- if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias {
- return nil, fmt.Errorf(
- "%s collides with container name: name=%s, %s=%s, container=%s",
- kind, qualifiedName, kind, alias, c.Name())
- }
- if c == nil {
- c = &Container{}
- }
- if c.aliases == nil {
- c.aliases = make(map[string]string)
- }
- c.aliases[alias] = qualifiedName
- return c, nil
- }
-}
-
-// Name sets the fully-qualified name of the Container.
-func Name(name string) ContainerOption {
- return func(c *Container) (*Container, error) {
- if len(name) > 0 && name[0:1] == "." {
- return nil, fmt.Errorf("container name must not contain a leading '.': %s", name)
- }
- if c.Name() == name {
- return c, nil
- }
- if c == nil {
- return &Container{name: name}, nil
- }
- c.name = name
- return c, nil
- }
-}
-
-// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
-// 'found' value that indicates if the conversion is successful.
-func ToQualifiedName(e *exprpb.Expr) (string, bool) {
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- id := e.GetIdentExpr()
- return id.GetName(), true
- case *exprpb.Expr_SelectExpr:
- sel := e.GetSelectExpr()
- // Test only expressions are not valid as qualified names.
- if sel.GetTestOnly() {
- return "", false
- }
- if qual, found := ToQualifiedName(sel.GetOperand()); found {
- return qual + "." + sel.GetField(), true
- }
- }
- return "", false
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/cost.go b/etcd/vendor/github.com/google/cel-go/common/cost.go
deleted file mode 100644
index 5e24bd0f47..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/cost.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package common
-
-const (
- // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select.
- SelectAndIdentCost = 1
-
- // ConstCost is the cost of an operation that accesses a constant.
- ConstCost = 0
-
- // ListCreateBaseCost is the base cost of any operation that creates a new list.
- ListCreateBaseCost = 10
-
- // MapCreateBaseCost is the base cost of any operation that creates a new map.
- MapCreateBaseCost = 30
-
- // StructCreateBaseCost is the base cost of any operation that creates a new struct.
- StructCreateBaseCost = 40
-
- // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire
- // string once.
- StringTraversalCostFactor = 0.1
-
- // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of
- // applying the regex to a string of unit cost.
- RegexStringLengthCostFactor = 0.25
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
deleted file mode 100644
index cf5c5d2467..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
+++ /dev/null
@@ -1,18 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "debug.go",
- ],
- importpath = "github.com/google/cel-go/common/debug",
- deps = [
- "//common:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/debug/debug.go b/etcd/vendor/github.com/google/cel-go/common/debug/debug.go
deleted file mode 100644
index bec885424b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/debug/debug.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package debug provides tools to print a parsed expression graph and
-// adorn each expression element with additional metadata.
-package debug
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Adorner returns debug metadata that will be tacked on to the string
-// representation of an expression.
-type Adorner interface {
- // GetMetadata for the input context.
- GetMetadata(ctx interface{}) string
-}
-
-// Writer manages writing expressions to an internal string.
-type Writer interface {
- fmt.Stringer
-
- // Buffer pushes an expression into an internal queue of expressions to
- // write to a string.
- Buffer(e *exprpb.Expr)
-}
-
-type emptyDebugAdorner struct {
-}
-
-var emptyAdorner Adorner = &emptyDebugAdorner{}
-
-func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
- return ""
-}
-
-// ToDebugString gives the unadorned string representation of the Expr.
-func ToDebugString(e *exprpb.Expr) string {
- return ToAdornedDebugString(e, emptyAdorner)
-}
-
-// ToAdornedDebugString gives the adorned string representation of the Expr.
-func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
- w := newDebugWriter(adorner)
- w.Buffer(e)
- return w.String()
-}
-
-// debugWriter is used to print out pretty-printed debug strings.
-type debugWriter struct {
- adorner Adorner
- buffer bytes.Buffer
- indent int
- lineStart bool
-}
-
-func newDebugWriter(a Adorner) *debugWriter {
- return &debugWriter{
- adorner: a,
- indent: 0,
- lineStart: true,
- }
-}
-
-func (w *debugWriter) Buffer(e *exprpb.Expr) {
- if e == nil {
- return
- }
- switch e.ExprKind.(type) {
- case *exprpb.Expr_ConstExpr:
- w.append(formatLiteral(e.GetConstExpr()))
- case *exprpb.Expr_IdentExpr:
- w.append(e.GetIdentExpr().Name)
- case *exprpb.Expr_SelectExpr:
- w.appendSelect(e.GetSelectExpr())
- case *exprpb.Expr_CallExpr:
- w.appendCall(e.GetCallExpr())
- case *exprpb.Expr_ListExpr:
- w.appendList(e.GetListExpr())
- case *exprpb.Expr_StructExpr:
- w.appendStruct(e.GetStructExpr())
- case *exprpb.Expr_ComprehensionExpr:
- w.appendComprehension(e.GetComprehensionExpr())
- }
- w.adorn(e)
-}
-
-func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
- w.Buffer(sel.GetOperand())
- w.append(".")
- w.append(sel.GetField())
- if sel.TestOnly {
- w.append("~test-only~")
- }
-}
-
-func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
- if call.Target != nil {
- w.Buffer(call.GetTarget())
- w.append(".")
- }
- w.append(call.GetFunction())
- w.append("(")
- if len(call.GetArgs()) > 0 {
- w.addIndent()
- w.appendLine()
- for i, arg := range call.GetArgs() {
- if i > 0 {
- w.append(",")
- w.appendLine()
- }
- w.Buffer(arg)
- }
- w.removeIndent()
- w.appendLine()
- }
- w.append(")")
-}
-
-func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
- w.append("[")
- if len(list.GetElements()) > 0 {
- w.appendLine()
- w.addIndent()
- for i, elem := range list.GetElements() {
- if i > 0 {
- w.append(",")
- w.appendLine()
- }
- w.Buffer(elem)
- }
- w.removeIndent()
- w.appendLine()
- }
- w.append("]")
-}
-
-func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
- if obj.MessageName != "" {
- w.appendObject(obj)
- } else {
- w.appendMap(obj)
- }
-}
-
-func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
- w.append(obj.GetMessageName())
- w.append("{")
- if len(obj.GetEntries()) > 0 {
- w.appendLine()
- w.addIndent()
- for i, entry := range obj.GetEntries() {
- if i > 0 {
- w.append(",")
- w.appendLine()
- }
- w.append(entry.GetFieldKey())
- w.append(":")
- w.Buffer(entry.GetValue())
- w.adorn(entry)
- }
- w.removeIndent()
- w.appendLine()
- }
- w.append("}")
-}
-
-func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
- w.append("{")
- if len(obj.GetEntries()) > 0 {
- w.appendLine()
- w.addIndent()
- for i, entry := range obj.GetEntries() {
- if i > 0 {
- w.append(",")
- w.appendLine()
- }
- w.Buffer(entry.GetMapKey())
- w.append(":")
- w.Buffer(entry.GetValue())
- w.adorn(entry)
- }
- w.removeIndent()
- w.appendLine()
- }
- w.append("}")
-}
-
-func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
- w.append("__comprehension__(")
- w.addIndent()
- w.appendLine()
- w.append("// Variable")
- w.appendLine()
- w.append(comprehension.GetIterVar())
- w.append(",")
- w.appendLine()
- w.append("// Target")
- w.appendLine()
- w.Buffer(comprehension.GetIterRange())
- w.append(",")
- w.appendLine()
- w.append("// Accumulator")
- w.appendLine()
- w.append(comprehension.GetAccuVar())
- w.append(",")
- w.appendLine()
- w.append("// Init")
- w.appendLine()
- w.Buffer(comprehension.GetAccuInit())
- w.append(",")
- w.appendLine()
- w.append("// LoopCondition")
- w.appendLine()
- w.Buffer(comprehension.GetLoopCondition())
- w.append(",")
- w.appendLine()
- w.append("// LoopStep")
- w.appendLine()
- w.Buffer(comprehension.GetLoopStep())
- w.append(",")
- w.appendLine()
- w.append("// Result")
- w.appendLine()
- w.Buffer(comprehension.GetResult())
- w.append(")")
- w.removeIndent()
-}
-
-func formatLiteral(c *exprpb.Constant) string {
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- return fmt.Sprintf("%t", c.GetBoolValue())
- case *exprpb.Constant_BytesValue:
- return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
- case *exprpb.Constant_DoubleValue:
- return fmt.Sprintf("%v", c.GetDoubleValue())
- case *exprpb.Constant_Int64Value:
- return fmt.Sprintf("%d", c.GetInt64Value())
- case *exprpb.Constant_StringValue:
- return strconv.Quote(c.GetStringValue())
- case *exprpb.Constant_Uint64Value:
- return fmt.Sprintf("%du", c.GetUint64Value())
- case *exprpb.Constant_NullValue:
- return "null"
- default:
- panic("Unknown constant type")
- }
-}
-
-func (w *debugWriter) append(s string) {
- w.doIndent()
- w.buffer.WriteString(s)
-}
-
-func (w *debugWriter) appendFormat(f string, args ...interface{}) {
- w.append(fmt.Sprintf(f, args...))
-}
-
-func (w *debugWriter) doIndent() {
- if w.lineStart {
- w.lineStart = false
- w.buffer.WriteString(strings.Repeat(" ", w.indent))
- }
-}
-
-func (w *debugWriter) adorn(e interface{}) {
- w.append(w.adorner.GetMetadata(e))
-}
-
-func (w *debugWriter) appendLine() {
- w.buffer.WriteString("\n")
- w.lineStart = true
-}
-
-func (w *debugWriter) addIndent() {
- w.indent++
-}
-
-func (w *debugWriter) removeIndent() {
- w.indent--
- if w.indent < 0 {
- panic("negative indent")
- }
-}
-
-func (w *debugWriter) String() string {
- return w.buffer.String()
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/doc.go b/etcd/vendor/github.com/google/cel-go/common/doc.go
deleted file mode 100644
index 5362fdfe4b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package common defines types and utilities common to expression parsing,
-// checking, and interpretation
-package common
diff --git a/etcd/vendor/github.com/google/cel-go/common/error.go b/etcd/vendor/github.com/google/cel-go/common/error.go
deleted file mode 100644
index f91f7f8d10..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/error.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package common
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-
- "golang.org/x/text/width"
-)
-
-// Error type which references a location within source and a message.
-type Error struct {
- Location Location
- Message string
-}
-
-const (
- dot = "."
- ind = "^"
-
- // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet.
- maxSnippetLength = 16384
-)
-
-var (
- wideDot = width.Widen.String(dot)
- wideInd = width.Widen.String(ind)
-)
-
-// ToDisplayString decorates the error message with the source location.
-func (e *Error) ToDisplayString(source Source) string {
- var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
- source.Description(),
- e.Location.Line(),
- e.Location.Column()+1, // add one to the 0-based column for display
- e.Message)
- if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength {
- snippet := strings.Replace(snippet, "\t", " ", -1)
- srcLine := "\n | " + snippet
- var bytes = []byte(snippet)
- var indLine = "\n | "
- for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
- _, sz := utf8.DecodeRune(bytes)
- bytes = bytes[sz:]
- if sz > 1 {
- indLine += wideDot
- } else {
- indLine += dot
- }
- }
- if _, sz := utf8.DecodeRune(bytes); sz > 1 {
- indLine += wideInd
- } else {
- indLine += ind
- }
- result += srcLine + indLine
- }
- return result
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/errors.go b/etcd/vendor/github.com/google/cel-go/common/errors.go
deleted file mode 100644
index daebba8609..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/errors.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package common
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// Errors type which contains a list of errors observed during parsing.
-type Errors struct {
- errors []Error
- source Source
- numErrors int
- maxErrorsToReport int
-}
-
-// NewErrors creates a new instance of the Errors type.
-func NewErrors(source Source) *Errors {
- return &Errors{
- errors: []Error{},
- source: source,
- maxErrorsToReport: 100,
- }
-}
-
-// ReportError records an error at a source location.
-func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
- e.numErrors++
- if e.numErrors > e.maxErrorsToReport {
- return
- }
- err := Error{
- Location: l,
- Message: fmt.Sprintf(format, args...),
- }
- e.errors = append(e.errors, err)
-}
-
-// GetErrors returns the list of observed errors.
-func (e *Errors) GetErrors() []Error {
- return e.errors[:]
-}
-
-// Append creates a new Errors object with the current and input errors.
-func (e *Errors) Append(errs []Error) *Errors {
- return &Errors{
- errors: append(e.errors, errs...),
- source: e.source,
- numErrors: e.numErrors + len(errs),
- maxErrorsToReport: e.maxErrorsToReport,
- }
-}
-
-// ToDisplayString returns the error set to a newline delimited string.
-func (e *Errors) ToDisplayString() string {
- errorsInString := e.maxErrorsToReport
- if e.numErrors > e.maxErrorsToReport {
- // add one more error to indicate the number of errors truncated.
- errorsInString++
- } else {
- // otherwise the error set will just contain the number of errors.
- errorsInString = e.numErrors
- }
-
- result := make([]string, errorsInString)
- sort.SliceStable(e.errors, func(i, j int) bool {
- ei := e.errors[i].Location
- ej := e.errors[j].Location
- return ei.Line() < ej.Line() ||
- (ei.Line() == ej.Line() && ei.Column() < ej.Column())
- })
- for i, err := range e.errors {
- // This can happen during the append of two errors objects
- if i >= e.maxErrorsToReport {
- break
- }
- result[i] = err.ToDisplayString(e.source)
- }
- if e.numErrors > e.maxErrorsToReport {
- result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport)
- }
- return strings.Join(result, "\n")
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/location.go b/etcd/vendor/github.com/google/cel-go/common/location.go
deleted file mode 100644
index ec3fa7cb50..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/location.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package common
-
-// Location interface to represent a location within Source.
-type Location interface {
- Line() int // 1-based line number within source.
- Column() int // 0-based column number within source.
-}
-
-// SourceLocation helper type to manually construct a location.
-type SourceLocation struct {
- line int
- column int
-}
-
-var (
- // Location implements the SourceLocation interface.
- _ Location = &SourceLocation{}
- // NoLocation is a particular illegal location.
- NoLocation = &SourceLocation{-1, -1}
-)
-
-// NewLocation creates a new location.
-func NewLocation(line, column int) Location {
- return &SourceLocation{
- line: line,
- column: column}
-}
-
-// Line returns the 1-based line of the location.
-func (l *SourceLocation) Line() int {
- return l.line
-}
-
-// Column returns the 0-based column number of the location.
-func (l *SourceLocation) Column() int {
- return l.column
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
deleted file mode 100644
index b5b67f0623..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
+++ /dev/null
@@ -1,14 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "operators.go",
- ],
- importpath = "github.com/google/cel-go/common/operators",
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/operators/operators.go b/etcd/vendor/github.com/google/cel-go/common/operators/operators.go
deleted file mode 100644
index fa25dfb7f0..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/operators/operators.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package operators defines the internal function names of operators.
-//
-// All operators in the expression language are modelled as function calls.
-package operators
-
-// String "names" for CEL operators.
-const (
- // Symbolic operators.
- Conditional = "_?_:_"
- LogicalAnd = "_&&_"
- LogicalOr = "_||_"
- LogicalNot = "!_"
- Equals = "_==_"
- NotEquals = "_!=_"
- Less = "_<_"
- LessEquals = "_<=_"
- Greater = "_>_"
- GreaterEquals = "_>=_"
- Add = "_+_"
- Subtract = "_-_"
- Multiply = "_*_"
- Divide = "_/_"
- Modulo = "_%_"
- Negate = "-_"
- Index = "_[_]"
-
- // Macros, must have a valid identifier.
- Has = "has"
- All = "all"
- Exists = "exists"
- ExistsOne = "exists_one"
- Map = "map"
- Filter = "filter"
-
- // Named operators, must not have be valid identifiers.
- NotStrictlyFalse = "@not_strictly_false"
- In = "@in"
-
- // Deprecated: named operators with valid identifiers.
- OldNotStrictlyFalse = "__not_strictly_false__"
- OldIn = "_in_"
-)
-
-var (
- operators = map[string]string{
- "+": Add,
- "/": Divide,
- "==": Equals,
- ">": Greater,
- ">=": GreaterEquals,
- "in": In,
- "<": Less,
- "<=": LessEquals,
- "%": Modulo,
- "*": Multiply,
- "!=": NotEquals,
- "-": Subtract,
- }
- // operatorMap of the operator symbol which refers to a struct containing the display name,
- // if applicable, the operator precedence, and the arity.
- //
- // If the symbol does not have a display name listed in the map, it is only because it requires
- // special casing to render properly as text.
- operatorMap = map[string]struct {
- displayName string
- precedence int
- arity int
- }{
- Conditional: {displayName: "", precedence: 8, arity: 3},
- LogicalOr: {displayName: "||", precedence: 7, arity: 2},
- LogicalAnd: {displayName: "&&", precedence: 6, arity: 2},
- Equals: {displayName: "==", precedence: 5, arity: 2},
- Greater: {displayName: ">", precedence: 5, arity: 2},
- GreaterEquals: {displayName: ">=", precedence: 5, arity: 2},
- In: {displayName: "in", precedence: 5, arity: 2},
- Less: {displayName: "<", precedence: 5, arity: 2},
- LessEquals: {displayName: "<=", precedence: 5, arity: 2},
- NotEquals: {displayName: "!=", precedence: 5, arity: 2},
- OldIn: {displayName: "in", precedence: 5, arity: 2},
- Add: {displayName: "+", precedence: 4, arity: 2},
- Subtract: {displayName: "-", precedence: 4, arity: 2},
- Divide: {displayName: "/", precedence: 3, arity: 2},
- Modulo: {displayName: "%", precedence: 3, arity: 2},
- Multiply: {displayName: "*", precedence: 3, arity: 2},
- LogicalNot: {displayName: "!", precedence: 2, arity: 1},
- Negate: {displayName: "-", precedence: 2, arity: 1},
- Index: {displayName: "", precedence: 1, arity: 2},
- }
-)
-
-// Find the internal function name for an operator, if the input text is one.
-func Find(text string) (string, bool) {
- op, found := operators[text]
- return op, found
-}
-
-// FindReverse returns the unmangled, text representation of the operator.
-func FindReverse(symbol string) (string, bool) {
- op, found := operatorMap[symbol]
- if !found {
- return "", false
- }
- return op.displayName, true
-}
-
-// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator.
-//
-// If the symbol does refer to an operator, but the operator does not have a display name the
-// result is false.
-func FindReverseBinaryOperator(symbol string) (string, bool) {
- op, found := operatorMap[symbol]
- if !found || op.arity != 2 {
- return "", false
- }
- if op.displayName == "" {
- return "", false
- }
- return op.displayName, true
-}
-
-// Precedence returns the operator precedence, where the higher the number indicates
-// higher precedence operations.
-func Precedence(symbol string) int {
- op, found := operatorMap[symbol]
- if !found {
- return 0
- }
- return op.precedence
-}
-
-// Arity returns the number of argument the operator takes
-// -1 is returned if an undefined symbol is provided
-func Arity(symbol string) int {
- op, found := operatorMap[symbol]
- if !found {
- return -1
- }
- return op.arity
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
deleted file mode 100644
index e46e2f4830..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
+++ /dev/null
@@ -1,14 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "overloads.go",
- ],
- importpath = "github.com/google/cel-go/common/overloads",
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/overloads/overloads.go b/etcd/vendor/github.com/google/cel-go/common/overloads/overloads.go
deleted file mode 100644
index 9ebaf6fabf..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/overloads/overloads.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package overloads defines the internal overload identifiers for function and
-// operator overloads.
-package overloads
-
-// Boolean logic overloads
-const (
- Conditional = "conditional"
- LogicalAnd = "logical_and"
- LogicalOr = "logical_or"
- LogicalNot = "logical_not"
- NotStrictlyFalse = "not_strictly_false"
- Equals = "equals"
- NotEquals = "not_equals"
- LessBool = "less_bool"
- LessInt64 = "less_int64"
- LessInt64Double = "less_int64_double"
- LessInt64Uint64 = "less_int64_uint64"
- LessUint64 = "less_uint64"
- LessUint64Double = "less_uint64_double"
- LessUint64Int64 = "less_uint64_int64"
- LessDouble = "less_double"
- LessDoubleInt64 = "less_double_int64"
- LessDoubleUint64 = "less_double_uint64"
- LessString = "less_string"
- LessBytes = "less_bytes"
- LessTimestamp = "less_timestamp"
- LessDuration = "less_duration"
- LessEqualsBool = "less_equals_bool"
- LessEqualsInt64 = "less_equals_int64"
- LessEqualsInt64Double = "less_equals_int64_double"
- LessEqualsInt64Uint64 = "less_equals_int64_uint64"
- LessEqualsUint64 = "less_equals_uint64"
- LessEqualsUint64Double = "less_equals_uint64_double"
- LessEqualsUint64Int64 = "less_equals_uint64_int64"
- LessEqualsDouble = "less_equals_double"
- LessEqualsDoubleInt64 = "less_equals_double_int64"
- LessEqualsDoubleUint64 = "less_equals_double_uint64"
- LessEqualsString = "less_equals_string"
- LessEqualsBytes = "less_equals_bytes"
- LessEqualsTimestamp = "less_equals_timestamp"
- LessEqualsDuration = "less_equals_duration"
- GreaterBool = "greater_bool"
- GreaterInt64 = "greater_int64"
- GreaterInt64Double = "greater_int64_double"
- GreaterInt64Uint64 = "greater_int64_uint64"
- GreaterUint64 = "greater_uint64"
- GreaterUint64Double = "greater_uint64_double"
- GreaterUint64Int64 = "greater_uint64_int64"
- GreaterDouble = "greater_double"
- GreaterDoubleInt64 = "greater_double_int64"
- GreaterDoubleUint64 = "greater_double_uint64"
- GreaterString = "greater_string"
- GreaterBytes = "greater_bytes"
- GreaterTimestamp = "greater_timestamp"
- GreaterDuration = "greater_duration"
- GreaterEqualsBool = "greater_equals_bool"
- GreaterEqualsInt64 = "greater_equals_int64"
- GreaterEqualsInt64Double = "greater_equals_int64_double"
- GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64"
- GreaterEqualsUint64 = "greater_equals_uint64"
- GreaterEqualsUint64Double = "greater_equals_uint64_double"
- GreaterEqualsUint64Int64 = "greater_equals_uint64_int64"
- GreaterEqualsDouble = "greater_equals_double"
- GreaterEqualsDoubleInt64 = "greater_equals_double_int64"
- GreaterEqualsDoubleUint64 = "greater_equals_double_uint64"
- GreaterEqualsString = "greater_equals_string"
- GreaterEqualsBytes = "greater_equals_bytes"
- GreaterEqualsTimestamp = "greater_equals_timestamp"
- GreaterEqualsDuration = "greater_equals_duration"
-)
-
-// Math overloads
-const (
- AddInt64 = "add_int64"
- AddUint64 = "add_uint64"
- AddDouble = "add_double"
- AddString = "add_string"
- AddBytes = "add_bytes"
- AddList = "add_list"
- AddTimestampDuration = "add_timestamp_duration"
- AddDurationTimestamp = "add_duration_timestamp"
- AddDurationDuration = "add_duration_duration"
- SubtractInt64 = "subtract_int64"
- SubtractUint64 = "subtract_uint64"
- SubtractDouble = "subtract_double"
- SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
- SubtractTimestampDuration = "subtract_timestamp_duration"
- SubtractDurationDuration = "subtract_duration_duration"
- MultiplyInt64 = "multiply_int64"
- MultiplyUint64 = "multiply_uint64"
- MultiplyDouble = "multiply_double"
- DivideInt64 = "divide_int64"
- DivideUint64 = "divide_uint64"
- DivideDouble = "divide_double"
- ModuloInt64 = "modulo_int64"
- ModuloUint64 = "modulo_uint64"
- NegateInt64 = "negate_int64"
- NegateDouble = "negate_double"
-)
-
-// Index overloads
-const (
- IndexList = "index_list"
- IndexMap = "index_map"
- IndexMessage = "index_message" // TODO: introduce concept of types.Message
-)
-
-// In operators
-const (
- DeprecatedIn = "in"
- InList = "in_list"
- InMap = "in_map"
- InMessage = "in_message" // TODO: introduce concept of types.Message
-)
-
-// Size overloads
-const (
- Size = "size"
- SizeString = "size_string"
- SizeBytes = "size_bytes"
- SizeList = "size_list"
- SizeMap = "size_map"
- SizeStringInst = "string_size"
- SizeBytesInst = "bytes_size"
- SizeListInst = "list_size"
- SizeMapInst = "map_size"
-)
-
-// String function names.
-const (
- Contains = "contains"
- EndsWith = "endsWith"
- Matches = "matches"
- StartsWith = "startsWith"
-)
-
-// String function overload names.
-const (
- ContainsString = "contains_string"
- EndsWithString = "ends_with_string"
- MatchesString = "matches_string"
- StartsWithString = "starts_with_string"
-)
-
-// Time-based functions.
-const (
- TimeGetFullYear = "getFullYear"
- TimeGetMonth = "getMonth"
- TimeGetDayOfYear = "getDayOfYear"
- TimeGetDate = "getDate"
- TimeGetDayOfMonth = "getDayOfMonth"
- TimeGetDayOfWeek = "getDayOfWeek"
- TimeGetHours = "getHours"
- TimeGetMinutes = "getMinutes"
- TimeGetSeconds = "getSeconds"
- TimeGetMilliseconds = "getMilliseconds"
-)
-
-// Timestamp overloads for time functions without timezones.
-const (
- TimestampToYear = "timestamp_to_year"
- TimestampToMonth = "timestamp_to_month"
- TimestampToDayOfYear = "timestamp_to_day_of_year"
- TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
- TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based"
- TimestampToDayOfWeek = "timestamp_to_day_of_week"
- TimestampToHours = "timestamp_to_hours"
- TimestampToMinutes = "timestamp_to_minutes"
- TimestampToSeconds = "timestamp_to_seconds"
- TimestampToMilliseconds = "timestamp_to_milliseconds"
-)
-
-// Timestamp overloads for time functions with timezones.
-const (
- TimestampToYearWithTz = "timestamp_to_year_with_tz"
- TimestampToMonthWithTz = "timestamp_to_month_with_tz"
- TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz"
- TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
- TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz"
- TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz"
- TimestampToHoursWithTz = "timestamp_to_hours_with_tz"
- TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz"
- TimestampToSecondsWithTz = "timestamp_to_seconds_tz"
- TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz"
-)
-
-// Duration overloads for time functions.
-const (
- DurationToHours = "duration_to_hours"
- DurationToMinutes = "duration_to_minutes"
- DurationToSeconds = "duration_to_seconds"
- DurationToMilliseconds = "duration_to_milliseconds"
-)
-
-// Type conversion methods and overloads
-const (
- TypeConvertInt = "int"
- TypeConvertUint = "uint"
- TypeConvertDouble = "double"
- TypeConvertBool = "bool"
- TypeConvertString = "string"
- TypeConvertBytes = "bytes"
- TypeConvertTimestamp = "timestamp"
- TypeConvertDuration = "duration"
- TypeConvertType = "type"
- TypeConvertDyn = "dyn"
-)
-
-// Int conversion functions.
-const (
- IntToInt = "int64_to_int64"
- UintToInt = "uint64_to_int64"
- DoubleToInt = "double_to_int64"
- StringToInt = "string_to_int64"
- TimestampToInt = "timestamp_to_int64"
- DurationToInt = "duration_to_int64"
-)
-
-// Uint conversion functions.
-const (
- UintToUint = "uint64_to_uint64"
- IntToUint = "int64_to_uint64"
- DoubleToUint = "double_to_uint64"
- StringToUint = "string_to_uint64"
-)
-
-// Double conversion functions.
-const (
- DoubleToDouble = "double_to_double"
- IntToDouble = "int64_to_double"
- UintToDouble = "uint64_to_double"
- StringToDouble = "string_to_double"
-)
-
-// Bool conversion functions.
-const (
- BoolToBool = "bool_to_bool"
- StringToBool = "string_to_bool"
-)
-
-// Bytes conversion functions.
-const (
- BytesToBytes = "bytes_to_bytes"
- StringToBytes = "string_to_bytes"
-)
-
-// String conversion functions.
-const (
- StringToString = "string_to_string"
- BoolToString = "bool_to_string"
- IntToString = "int64_to_string"
- UintToString = "uint64_to_string"
- DoubleToString = "double_to_string"
- BytesToString = "bytes_to_string"
- TimestampToString = "timestamp_to_string"
- DurationToString = "duration_to_string"
-)
-
-// Timestamp conversion functions
-const (
- TimestampToTimestamp = "timestamp_to_timestamp"
- StringToTimestamp = "string_to_timestamp"
- IntToTimestamp = "int64_to_timestamp"
-)
-
-// Convert duration from string
-const (
- DurationToDuration = "duration_to_duration"
- StringToDuration = "string_to_duration"
- IntToDuration = "int64_to_duration"
-)
-
-// Convert to dyn
-const (
- ToDyn = "to_dyn"
-)
-
-// Comprehensions helper methods, not directly accessible via a developer.
-const (
- Iterator = "@iterator"
- HasNext = "@hasNext"
- Next = "@next"
-)
-
-// IsTypeConversionFunction returns whether the input function is a standard library type
-// conversion function.
-func IsTypeConversionFunction(function string) bool {
- switch function {
- case TypeConvertBool,
- TypeConvertBytes,
- TypeConvertDouble,
- TypeConvertDuration,
- TypeConvertDyn,
- TypeConvertInt,
- TypeConvertString,
- TypeConvertTimestamp,
- TypeConvertType,
- TypeConvertUint:
- return true
- default:
- return false
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
deleted file mode 100644
index bb30242cfa..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "buffer.go",
- ],
- importpath = "github.com/google/cel-go/common/runes",
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "buffer_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/runes/buffer.go b/etcd/vendor/github.com/google/cel-go/common/runes/buffer.go
deleted file mode 100644
index 50aac0b273..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/runes/buffer.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package runes provides interfaces and utilities for working with runes.
-package runes
-
-import (
- "strings"
- "unicode/utf8"
-)
-
-// Buffer is an interface for accessing a contiguous array of code points.
-type Buffer interface {
- Get(i int) rune
- Slice(i, j int) string
- Len() int
-}
-
-type emptyBuffer struct{}
-
-func (e *emptyBuffer) Get(i int) rune {
- panic("slice index out of bounds")
-}
-
-func (e *emptyBuffer) Slice(i, j int) string {
- if i != 0 || i != j {
- panic("slice index out of bounds")
- }
- return ""
-}
-
-func (e *emptyBuffer) Len() int {
- return 0
-}
-
-var _ Buffer = &emptyBuffer{}
-
-// asciiBuffer is an implementation for an array of code points that contain code points only from
-// the ASCII character set.
-type asciiBuffer struct {
- arr []byte
-}
-
-func (a *asciiBuffer) Get(i int) rune {
- return rune(uint32(a.arr[i]))
-}
-
-func (a *asciiBuffer) Slice(i, j int) string {
- return string(a.arr[i:j])
-}
-
-func (a *asciiBuffer) Len() int {
- return len(a.arr)
-}
-
-var _ Buffer = &asciiBuffer{}
-
-// basicBuffer is an implementation for an array of code points that contain code points from both
-// the Latin-1 character set and Basic Multilingual Plane.
-type basicBuffer struct {
- arr []uint16
-}
-
-func (b *basicBuffer) Get(i int) rune {
- return rune(uint32(b.arr[i]))
-}
-
-func (b *basicBuffer) Slice(i, j int) string {
- var str strings.Builder
- str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3.
- for ; i < j; i++ {
- str.WriteRune(rune(uint32(b.arr[i])))
- }
- return str.String()
-}
-
-func (b *basicBuffer) Len() int {
- return len(b.arr)
-}
-
-var _ Buffer = &basicBuffer{}
-
-// supplementalBuffer is an implementation for an array of code points that contain code points from
-// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane.
-type supplementalBuffer struct {
- arr []rune
-}
-
-func (s *supplementalBuffer) Get(i int) rune {
- return rune(uint32(s.arr[i]))
-}
-
-func (s *supplementalBuffer) Slice(i, j int) string {
- return string(s.arr[i:j])
-}
-
-func (s *supplementalBuffer) Len() int {
- return len(s.arr)
-}
-
-var _ Buffer = &supplementalBuffer{}
-
-var nilBuffer = &emptyBuffer{}
-
-// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of
-// the encoded code points contained within.
-//
-// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
-// each index represents a code point by itself without needing to use an array of rune. At first
-// we assume all code points are less than or equal to '\u007f'. If this holds true, the
-// underlying storage is a byte array containing only ASCII characters. If we encountered a code
-// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
-// elements of previous byte array to the uint16 array, and continue. If this holds true, the
-// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
-// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
-// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
-// containing any Unicode character.
-func NewBuffer(data string) Buffer {
- if len(data) == 0 {
- return nilBuffer
- }
- var (
- idx = 0
- buf8 = make([]byte, 0, len(data))
- buf16 []uint16
- buf32 []rune
- )
- for idx < len(data) {
- r, s := utf8.DecodeRuneInString(data[idx:])
- idx += s
- if r < utf8.RuneSelf {
- buf8 = append(buf8, byte(r))
- continue
- }
- if r <= 0xffff {
- buf16 = make([]uint16, len(buf8), len(data))
- for i, v := range buf8 {
- buf16[i] = uint16(v)
- }
- buf8 = nil
- buf16 = append(buf16, uint16(r))
- goto copy16
- }
- buf32 = make([]rune, len(buf8), len(data))
- for i, v := range buf8 {
- buf32[i] = rune(uint32(v))
- }
- buf8 = nil
- buf32 = append(buf32, r)
- goto copy32
- }
- return &asciiBuffer{
- arr: buf8,
- }
-copy16:
- for idx < len(data) {
- r, s := utf8.DecodeRuneInString(data[idx:])
- idx += s
- if r <= 0xffff {
- buf16 = append(buf16, uint16(r))
- continue
- }
- buf32 = make([]rune, len(buf16), len(data))
- for i, v := range buf16 {
- buf32[i] = rune(uint32(v))
- }
- buf16 = nil
- buf32 = append(buf32, r)
- goto copy32
- }
- return &basicBuffer{
- arr: buf16,
- }
-copy32:
- for idx < len(data) {
- r, s := utf8.DecodeRuneInString(data[idx:])
- idx += s
- buf32 = append(buf32, r)
- }
- return &supplementalBuffer{
- arr: buf32,
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/source.go b/etcd/vendor/github.com/google/cel-go/common/source.go
deleted file mode 100644
index 52377d9308..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/source.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package common
-
-import (
- "strings"
- "unicode/utf8"
-
- "github.com/google/cel-go/common/runes"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Source interface for filter source contents.
-type Source interface {
- // Content returns the source content represented as a string.
- // Examples contents are the single file contents, textbox field,
- // or url parameter.
- Content() string
-
- // Description gives a brief description of the source.
- // Example descriptions are a file name or ui element.
- Description() string
-
- // LineOffsets gives the character offsets at which lines occur.
- // The zero-th entry should refer to the break between the first
- // and second line, or EOF if there is only one line of source.
- LineOffsets() []int32
-
- // LocationOffset translates a Location to an offset.
- // Given the line and column of the Location returns the
- // Location's character offset in the Source, and a bool
- // indicating whether the Location was found.
- LocationOffset(location Location) (int32, bool)
-
- // OffsetLocation translates a character offset to a Location, or
- // false if the conversion was not feasible.
- OffsetLocation(offset int32) (Location, bool)
-
- // NewLocation takes an input line and column and produces a Location.
- // The default behavior is to treat the line and column as absolute,
- // but concrete derivations may use this method to convert a relative
- // line and column position into an absolute location.
- NewLocation(line, col int) Location
-
- // Snippet returns a line of content and whether the line was found.
- Snippet(line int) (string, bool)
-}
-
-// The sourceImpl type implementation of the Source interface.
-type sourceImpl struct {
- runes.Buffer
- description string
- lineOffsets []int32
- idOffsets map[int64]int32
-}
-
-var _ runes.Buffer = &sourceImpl{}
-
-// TODO(jimlarson) "Character offsets" should index the code points
-// within the UTF-8 encoded string. It currently indexes bytes.
-// Can be accomplished by using rune[] instead of string for contents.
-
-// NewTextSource creates a new Source from the input text string.
-func NewTextSource(text string) Source {
- return NewStringSource(text, " ")
-}
-
-// NewStringSource creates a new Source from the given contents and description.
-func NewStringSource(contents string, description string) Source {
- // Compute line offsets up front as they are referred to frequently.
- lines := strings.Split(contents, "\n")
- offsets := make([]int32, len(lines))
- var offset int32
- for i, line := range lines {
- offset = offset + int32(utf8.RuneCountInString(line)) + 1
- offsets[int32(i)] = offset
- }
- return &sourceImpl{
- Buffer: runes.NewBuffer(contents),
- description: description,
- lineOffsets: offsets,
- idOffsets: map[int64]int32{},
- }
-}
-
-// NewInfoSource creates a new Source from a SourceInfo.
-func NewInfoSource(info *exprpb.SourceInfo) Source {
- return &sourceImpl{
- Buffer: runes.NewBuffer(""),
- description: info.GetLocation(),
- lineOffsets: info.GetLineOffsets(),
- idOffsets: info.GetPositions(),
- }
-}
-
-// Content implements the Source interface method.
-func (s *sourceImpl) Content() string {
- return s.Slice(0, s.Len())
-}
-
-// Description implements the Source interface method.
-func (s *sourceImpl) Description() string {
- return s.description
-}
-
-// LineOffsets implements the Source interface method.
-func (s *sourceImpl) LineOffsets() []int32 {
- return s.lineOffsets
-}
-
-// LocationOffset implements the Source interface method.
-func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
- if lineOffset, found := s.findLineOffset(location.Line()); found {
- return lineOffset + int32(location.Column()), true
- }
- return -1, false
-}
-
-// NewLocation implements the Source interface method.
-func (s *sourceImpl) NewLocation(line, col int) Location {
- return NewLocation(line, col)
-}
-
-// OffsetLocation implements the Source interface method.
-func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
- line, lineOffset := s.findLine(offset)
- return NewLocation(int(line), int(offset-lineOffset)), true
-}
-
-// Snippet implements the Source interface method.
-func (s *sourceImpl) Snippet(line int) (string, bool) {
- charStart, found := s.findLineOffset(line)
- if !found || s.Len() == 0 {
- return "", false
- }
- charEnd, found := s.findLineOffset(line + 1)
- if found {
- return s.Slice(int(charStart), int(charEnd-1)), true
- }
- return s.Slice(int(charStart), s.Len()), true
-}
-
-// findLineOffset returns the offset where the (1-indexed) line begins,
-// or false if line doesn't exist.
-func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
- if line == 1 {
- return 0, true
- }
- if line > 1 && line <= int(len(s.lineOffsets)) {
- offset := s.lineOffsets[line-2]
- return offset, true
- }
- return -1, false
-}
-
-// findLine finds the line that contains the given character offset and
-// returns the line number and offset of the beginning of that line.
-// Note that the last line is treated as if it contains all offsets
-// beyond the end of the actual source.
-func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
- var line int32 = 1
- for _, lineOffset := range s.lineOffsets {
- if lineOffset > characterOffset {
- break
- } else {
- line++
- }
- }
- if line == 1 {
- return line, 0
- }
- return line, s.lineOffsets[line-2]
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/types/BUILD.bazel
deleted file mode 100644
index 5f1b1cd1fd..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/BUILD.bazel
+++ /dev/null
@@ -1,89 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "any_value.go",
- "bool.go",
- "bytes.go",
- "compare.go",
- "double.go",
- "duration.go",
- "err.go",
- "int.go",
- "iterator.go",
- "json_value.go",
- "list.go",
- "map.go",
- "null.go",
- "object.go",
- "overflow.go",
- "provider.go",
- "string.go",
- "timestamp.go",
- "type.go",
- "uint.go",
- "unknown.go",
- "util.go",
- ],
- importpath = "github.com/google/cel-go/common/types",
- deps = [
- "//common/overloads:go_default_library",
- "//common/types/pb:go_default_library",
- "//common/types/ref:go_default_library",
- "//common/types/traits:go_default_library",
- "@com_github_stoewer_go_strcase//:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- "@org_golang_google_protobuf//encoding/protojson:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
- "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "bool_test.go",
- "bytes_test.go",
- "double_test.go",
- "duration_test.go",
- "int_test.go",
- "json_list_test.go",
- "json_struct_test.go",
- "list_test.go",
- "map_test.go",
- "null_test.go",
- "object_test.go",
- "provider_test.go",
- "string_test.go",
- "timestamp_test.go",
- "type_test.go",
- "uint_test.go",
- "util_test.go",
- ],
- embed = [":go_default_library"],
- deps = [
- "//common/types/ref:go_default_library",
- "//test:go_default_library",
- "//test/proto3pb:test_all_types_go_proto",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//encoding/protojson:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
- "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/any_value.go b/etcd/vendor/github.com/google/cel-go/common/types/any_value.go
deleted file mode 100644
index cda0f13acf..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/any_value.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-// anyValueType constant representing the reflected type of google.protobuf.Any.
-var anyValueType = reflect.TypeOf(&anypb.Any{})
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/bool.go b/etcd/vendor/github.com/google/cel-go/common/types/bool.go
deleted file mode 100644
index 1b55ba9529..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/bool.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
- "strconv"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Bool type that implements ref.Val and supports comparison and negation.
-type Bool bool
-
-var (
- // BoolType singleton.
- BoolType = NewTypeValue("bool",
- traits.ComparerType,
- traits.NegatorType)
-
- // boolWrapperType golang reflected type for protobuf bool wrapper type.
- boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
-)
-
-// Boolean constants
-const (
- False = Bool(false)
- True = Bool(true)
-)
-
-// Compare implements the traits.Comparer interface method.
-func (b Bool) Compare(other ref.Val) ref.Val {
- otherBool, ok := other.(Bool)
- if !ok {
- return ValOrErr(other, "no such overload")
- }
- if b == otherBool {
- return IntZero
- }
- if !b && otherBool {
- return IntNegOne
- }
- return IntOne
-}
-
-// ConvertToNative implements the ref.Val interface method.
-func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Bool:
- return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any.
- return anypb.New(wrapperspb.Bool(bool(b)))
- case boolWrapperType:
- // Convert the bool to a wrapperspb.BoolValue.
- return wrapperspb.Bool(bool(b)), nil
- case jsonValueType:
- // Return the bool as a new structpb.Value.
- return structpb.NewBoolValue(bool(b)), nil
- default:
- if typeDesc.Elem().Kind() == reflect.Bool {
- p := bool(b)
- return &p, nil
- }
- }
- case reflect.Interface:
- bv := b.Value()
- if reflect.TypeOf(bv).Implements(typeDesc) {
- return bv, nil
- }
- if reflect.TypeOf(b).Implements(typeDesc) {
- return b, nil
- }
- }
- return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case StringType:
- return String(strconv.FormatBool(bool(b)))
- case BoolType:
- return b
- case TypeType:
- return BoolType
- }
- return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (b Bool) Equal(other ref.Val) ref.Val {
- otherBool, ok := other.(Bool)
- return Bool(ok && b == otherBool)
-}
-
-// Negate implements the traits.Negater interface method.
-func (b Bool) Negate() ref.Val {
- return !b
-}
-
-// Type implements the ref.Val interface method.
-func (b Bool) Type() ref.Type {
- return BoolType
-}
-
-// Value implements the ref.Val interface method.
-func (b Bool) Value() interface{} {
- return bool(b)
-}
-
-// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
-func IsBool(elem ref.Val) bool {
- switch v := elem.(type) {
- case Bool:
- return true
- case ref.Val:
- return v.Type() == BoolType
- default:
- return false
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/bytes.go b/etcd/vendor/github.com/google/cel-go/common/types/bytes.go
deleted file mode 100644
index 3575717ec7..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/bytes.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "reflect"
- "unicode/utf8"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Bytes type that implements ref.Val and supports add, compare, and size
-// operations.
-type Bytes []byte
-
-var (
- // BytesType singleton.
- BytesType = NewTypeValue("bytes",
- traits.AdderType,
- traits.ComparerType,
- traits.SizerType)
-
- // byteWrapperType golang reflected type for protobuf bytes wrapper type.
- byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
-)
-
-// Add implements traits.Adder interface method by concatenating byte sequences.
-func (b Bytes) Add(other ref.Val) ref.Val {
- otherBytes, ok := other.(Bytes)
- if !ok {
- return ValOrErr(other, "no such overload")
- }
- return append(b, otherBytes...)
-}
-
-// Compare implements traits.Comparer interface method by lexicographic ordering.
-func (b Bytes) Compare(other ref.Val) ref.Val {
- otherBytes, ok := other.(Bytes)
- if !ok {
- return ValOrErr(other, "no such overload")
- }
- return Int(bytes.Compare(b, otherBytes))
-}
-
-// ConvertToNative implements the ref.Val interface method.
-func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Array, reflect.Slice:
- return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped before being set on an Any field.
- return anypb.New(wrapperspb.Bytes([]byte(b)))
- case byteWrapperType:
- // Convert the bytes to a wrapperspb.BytesValue.
- return wrapperspb.Bytes([]byte(b)), nil
- case jsonValueType:
- // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64.
- // The encoding below matches the golang 'encoding/json' behavior during marshaling,
- // which uses base64.StdEncoding.
- str := base64.StdEncoding.EncodeToString([]byte(b))
- return structpb.NewStringValue(str), nil
- }
- case reflect.Interface:
- bv := b.Value()
- if reflect.TypeOf(bv).Implements(typeDesc) {
- return bv, nil
- }
- if reflect.TypeOf(b).Implements(typeDesc) {
- return b, nil
- }
- }
- return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case StringType:
- if !utf8.Valid(b) {
- return NewErr("invalid UTF-8 in bytes, cannot convert to string")
- }
- return String(b)
- case BytesType:
- return b
- case TypeType:
- return BytesType
- }
- return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (b Bytes) Equal(other ref.Val) ref.Val {
- otherBytes, ok := other.(Bytes)
- return Bool(ok && bytes.Equal(b, otherBytes))
-}
-
-// Size implements the traits.Sizer interface method.
-func (b Bytes) Size() ref.Val {
- return Int(len(b))
-}
-
-// Type implements the ref.Val interface method.
-func (b Bytes) Type() ref.Type {
- return BytesType
-}
-
-// Value implements the ref.Val interface method.
-func (b Bytes) Value() interface{} {
- return []byte(b)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/compare.go b/etcd/vendor/github.com/google/cel-go/common/types/compare.go
deleted file mode 100644
index e196826180..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/compare.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "math"
-
- "github.com/google/cel-go/common/types/ref"
-)
-
-func compareDoubleInt(d Double, i Int) Int {
- if d < math.MinInt64 {
- return IntNegOne
- }
- if d > math.MaxInt64 {
- return IntOne
- }
- return compareDouble(d, Double(i))
-}
-
-func compareIntDouble(i Int, d Double) Int {
- return -compareDoubleInt(d, i)
-}
-
-func compareDoubleUint(d Double, u Uint) Int {
- if d < 0 {
- return IntNegOne
- }
- if d > math.MaxUint64 {
- return IntOne
- }
- return compareDouble(d, Double(u))
-}
-
-func compareUintDouble(u Uint, d Double) Int {
- return -compareDoubleUint(d, u)
-}
-
-func compareIntUint(i Int, u Uint) Int {
- if i < 0 || u > math.MaxInt64 {
- return IntNegOne
- }
- cmp := i - Int(u)
- if cmp < 0 {
- return IntNegOne
- }
- if cmp > 0 {
- return IntOne
- }
- return IntZero
-}
-
-func compareUintInt(u Uint, i Int) Int {
- return -compareIntUint(i, u)
-}
-
-func compareDouble(a, b Double) Int {
- if a < b {
- return IntNegOne
- }
- if a > b {
- return IntOne
- }
- return IntZero
-}
-
-func compareInt(a, b Int) ref.Val {
- if a < b {
- return IntNegOne
- }
- if a > b {
- return IntOne
- }
- return IntZero
-}
-
-func compareUint(a, b Uint) ref.Val {
- if a < b {
- return IntNegOne
- }
- if a > b {
- return IntOne
- }
- return IntZero
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/doc.go b/etcd/vendor/github.com/google/cel-go/common/types/doc.go
deleted file mode 100644
index 5f641d7043..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package types contains the types, traits, and utilities common to all
-// components of expression handling.
-package types
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/double.go b/etcd/vendor/github.com/google/cel-go/common/types/double.go
deleted file mode 100644
index a6ec52a0f9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/double.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "math"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Double type that implements ref.Val, comparison, and mathematical
-// operations.
-type Double float64
-
-var (
- // DoubleType singleton.
- DoubleType = NewTypeValue("double",
- traits.AdderType,
- traits.ComparerType,
- traits.DividerType,
- traits.MultiplierType,
- traits.NegatorType,
- traits.SubtractorType)
-
- // doubleWrapperType reflected type for protobuf double wrapper type.
- doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})
-
- // floatWrapperType reflected type for protobuf float wrapper type.
- floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{})
-)
-
-// Add implements traits.Adder.Add.
-func (d Double) Add(other ref.Val) ref.Val {
- otherDouble, ok := other.(Double)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- return d + otherDouble
-}
-
-// Compare implements traits.Comparer.Compare.
-func (d Double) Compare(other ref.Val) ref.Val {
- if math.IsNaN(float64(d)) {
- return NewErr("NaN values cannot be ordered")
- }
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return NewErr("NaN values cannot be ordered")
- }
- return compareDouble(d, ov)
- case Int:
- return compareDoubleInt(d, ov)
- case Uint:
- return compareDoubleUint(d, ov)
- default:
- return MaybeNoSuchOverloadErr(other)
- }
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Float32:
- v := float32(d)
- return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
- case reflect.Float64:
- v := float64(d)
- return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped before being set on an Any field.
- return anypb.New(wrapperspb.Double(float64(d)))
- case doubleWrapperType:
- // Convert to a wrapperspb.DoubleValue
- return wrapperspb.Double(float64(d)), nil
- case floatWrapperType:
- // Convert to a wrapperspb.FloatValue (with truncation).
- return wrapperspb.Float(float32(d)), nil
- case jsonValueType:
- // Note, there are special cases for proto3 to json conversion that
- // expect the floating point value to be converted to a NaN,
- // Infinity, or -Infinity string values, but the jsonpb string
- // marshaling of the protobuf.Value will handle this conversion.
- return structpb.NewNumberValue(float64(d)), nil
- }
- switch typeDesc.Elem().Kind() {
- case reflect.Float32:
- v := float32(d)
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- case reflect.Float64:
- v := float64(d)
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- }
- case reflect.Interface:
- dv := d.Value()
- if reflect.TypeOf(dv).Implements(typeDesc) {
- return dv, nil
- }
- if reflect.TypeOf(d).Implements(typeDesc) {
- return d, nil
- }
- }
- return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case IntType:
- i, err := doubleToInt64Checked(float64(d))
- if err != nil {
- return wrapErr(err)
- }
- return Int(i)
- case UintType:
- i, err := doubleToUint64Checked(float64(d))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(i)
- case DoubleType:
- return d
- case StringType:
- return String(fmt.Sprintf("%g", float64(d)))
- case TypeType:
- return DoubleType
- }
- return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
-}
-
-// Divide implements traits.Divider.Divide.
-func (d Double) Divide(other ref.Val) ref.Val {
- otherDouble, ok := other.(Double)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- return d / otherDouble
-}
-
-// Equal implements ref.Val.Equal.
-func (d Double) Equal(other ref.Val) ref.Val {
- if math.IsNaN(float64(d)) {
- return False
- }
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return False
- }
- return Bool(d == ov)
- case Int:
- return Bool(compareDoubleInt(d, ov) == 0)
- case Uint:
- return Bool(compareDoubleUint(d, ov) == 0)
- default:
- return False
- }
-}
-
-// Multiply implements traits.Multiplier.Multiply.
-func (d Double) Multiply(other ref.Val) ref.Val {
- otherDouble, ok := other.(Double)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- return d * otherDouble
-}
-
-// Negate implements traits.Negater.Negate.
-func (d Double) Negate() ref.Val {
- return -d
-}
-
-// Subtract implements traits.Subtractor.Subtract.
-func (d Double) Subtract(subtrahend ref.Val) ref.Val {
- subtraDouble, ok := subtrahend.(Double)
- if !ok {
- return MaybeNoSuchOverloadErr(subtrahend)
- }
- return d - subtraDouble
-}
-
-// Type implements ref.Val.Type.
-func (d Double) Type() ref.Type {
- return DoubleType
-}
-
-// Value implements ref.Val.Value.
-func (d Double) Value() interface{} {
- return float64(d)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/duration.go b/etcd/vendor/github.com/google/cel-go/common/types/duration.go
deleted file mode 100644
index 418349fa6c..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/duration.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "time"
-
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- dpb "google.golang.org/protobuf/types/known/durationpb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// Duration type that implements ref.Val and supports add, compare, negate,
-// and subtract operators. This type is also a receiver which means it can
-// participate in dispatch to receiver functions.
-type Duration struct {
- time.Duration
-}
-
-func durationOf(d time.Duration) Duration {
- return Duration{Duration: d}
-}
-
-var (
- // DurationType singleton.
- DurationType = NewTypeValue("google.protobuf.Duration",
- traits.AdderType,
- traits.ComparerType,
- traits.NegatorType,
- traits.ReceiverType,
- traits.SubtractorType)
-)
-
-// Add implements traits.Adder.Add.
-func (d Duration) Add(other ref.Val) ref.Val {
- switch other.Type() {
- case DurationType:
- dur2 := other.(Duration)
- val, err := addDurationChecked(d.Duration, dur2.Duration)
- if err != nil {
- return wrapErr(err)
- }
- return durationOf(val)
- case TimestampType:
- ts := other.(Timestamp).Time
- val, err := addTimeDurationChecked(ts, d.Duration)
- if err != nil {
- return wrapErr(err)
- }
- return timestampOf(val)
- }
- return MaybeNoSuchOverloadErr(other)
-}
-
-// Compare implements traits.Comparer.Compare.
-func (d Duration) Compare(other ref.Val) ref.Val {
- otherDur, ok := other.(Duration)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- d1 := d.Duration
- d2 := otherDur.Duration
- switch {
- case d1 < d2:
- return IntNegOne
- case d1 > d2:
- return IntOne
- default:
- return IntZero
- }
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // If the duration is already assignable to the desired type return it.
- if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
- return d.Duration, nil
- }
- if reflect.TypeOf(d).AssignableTo(typeDesc) {
- return d, nil
- }
- switch typeDesc {
- case anyValueType:
- // Pack the duration as a dpb.Duration into an Any value.
- return anypb.New(dpb.New(d.Duration))
- case durationValueType:
- // Unwrap the CEL value to its underlying proto value.
- return dpb.New(d.Duration), nil
- case jsonValueType:
- // CEL follows the proto3 to JSON conversion.
- // Note, using jsonpb would wrap the result in extra double quotes.
- v := d.ConvertToType(StringType)
- if IsError(v) {
- return nil, v.(*Err)
- }
- return structpb.NewStringValue(string(v.(String))), nil
- }
- return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case StringType:
- return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s")
- case IntType:
- return Int(d.Duration)
- case DurationType:
- return d
- case TypeType:
- return DurationType
- }
- return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
-}
-
-// Equal implements ref.Val.Equal.
-func (d Duration) Equal(other ref.Val) ref.Val {
- otherDur, ok := other.(Duration)
- return Bool(ok && d.Duration == otherDur.Duration)
-}
-
-// Negate implements traits.Negater.Negate.
-func (d Duration) Negate() ref.Val {
- val, err := negateDurationChecked(d.Duration)
- if err != nil {
- return wrapErr(err)
- }
- return durationOf(val)
-}
-
-// Receive implements traits.Receiver.Receive.
-func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
- if len(args) == 0 {
- if f, found := durationZeroArgOverloads[function]; found {
- return f(d.Duration)
- }
- }
- return NoSuchOverloadErr()
-}
-
-// Subtract implements traits.Subtractor.Subtract.
-func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
- subtraDur, ok := subtrahend.(Duration)
- if !ok {
- return MaybeNoSuchOverloadErr(subtrahend)
- }
- val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
- if err != nil {
- return wrapErr(err)
- }
- return durationOf(val)
-}
-
-// Type implements ref.Val.Type.
-func (d Duration) Type() ref.Type {
- return DurationType
-}
-
-// Value implements ref.Val.Value.
-func (d Duration) Value() interface{} {
- return d.Duration
-}
-
-var (
- durationValueType = reflect.TypeOf(&dpb.Duration{})
-
- durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{
- overloads.TimeGetHours: func(dur time.Duration) ref.Val {
- return Int(dur.Hours())
- },
- overloads.TimeGetMinutes: func(dur time.Duration) ref.Val {
- return Int(dur.Minutes())
- },
- overloads.TimeGetSeconds: func(dur time.Duration) ref.Val {
- return Int(dur.Seconds())
- },
- overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val {
- return Int(dur.Milliseconds())
- }}
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/err.go b/etcd/vendor/github.com/google/cel-go/common/types/err.go
deleted file mode 100644
index 93d79cdcbc..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/err.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Err type which extends the built-in go error and implements ref.Val.
-type Err struct {
- error
-}
-
-var (
- // ErrType singleton.
- ErrType = NewTypeValue("error")
-
- // errDivideByZero is an error indicating a division by zero of an integer value.
- errDivideByZero = errors.New("division by zero")
- // errModulusByZero is an error indicating a modulus by zero of an integer value.
- errModulusByZero = errors.New("modulus by zero")
- // errIntOverflow is an error representing integer overflow.
- errIntOverflow = errors.New("integer overflow")
- // errUintOverflow is an error representing unsigned integer overflow.
- errUintOverflow = errors.New("unsigned integer overflow")
- // errDurationOverflow is an error representing duration overflow.
- errDurationOverflow = errors.New("duration overflow")
- // errTimestampOverflow is an error representing timestamp overflow.
- errTimestampOverflow = errors.New("timestamp overflow")
- celErrTimestampOverflow = &Err{error: errTimestampOverflow}
-
- // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature.
- celErrNoSuchOverload = NewErr("no such overload")
-)
-
-// NewErr creates a new Err described by the format string and args.
-// TODO: Audit the use of this function and standardize the error messages and codes.
-func NewErr(format string, args ...interface{}) ref.Val {
- return &Err{fmt.Errorf(format, args...)}
-}
-
-// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
-func NoSuchOverloadErr() ref.Val {
- return celErrNoSuchOverload
-}
-
-// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
-// message that indicates that the native value could not be converted to a CEL ref.Val.
-func UnsupportedRefValConversionErr(val interface{}) ref.Val {
- return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
-}
-
-// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types,
-// else a new no such overload error.
-func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
- return ValOrErr(val, "no such overload")
-}
-
-// ValOrErr either returns the existing error or creates a new one.
-// TODO: Audit the use of this function and standardize the error messages and codes.
-func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
- if val == nil || !IsUnknownOrError(val) {
- return NewErr(format, args...)
- }
- return val
-}
-
-// wrapErr wraps an existing Go error value into a CEL Err value.
-func wrapErr(err error) ref.Val {
- return &Err{error: err}
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- return nil, e.error
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
- // Errors are not convertible to other representations.
- return e
-}
-
-// Equal implements ref.Val.Equal.
-func (e *Err) Equal(other ref.Val) ref.Val {
- // An error cannot be equal to any other value, so it returns itself.
- return e
-}
-
-// String implements fmt.Stringer.
-func (e *Err) String() string {
- return e.error.Error()
-}
-
-// Type implements ref.Val.Type.
-func (e *Err) Type() ref.Type {
- return ErrType
-}
-
-// Value implements ref.Val.Value.
-func (e *Err) Value() interface{} {
- return e.error
-}
-
-// IsError returns whether the input element ref.Type or ref.Val is equal to
-// the ErrType singleton.
-func IsError(val ref.Val) bool {
- switch val.(type) {
- case *Err:
- return true
- default:
- return false
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/int.go b/etcd/vendor/github.com/google/cel-go/common/types/int.go
deleted file mode 100644
index 95f25dcd80..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/int.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "math"
- "reflect"
- "strconv"
- "time"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Int type that implements ref.Val as well as comparison and math operators.
-type Int int64
-
-// Int constants used for comparison results.
-const (
- // IntZero is the zero-value for Int
- IntZero = Int(0)
- IntOne = Int(1)
- IntNegOne = Int(-1)
-)
-
-var (
- // IntType singleton.
- IntType = NewTypeValue("int",
- traits.AdderType,
- traits.ComparerType,
- traits.DividerType,
- traits.ModderType,
- traits.MultiplierType,
- traits.NegatorType,
- traits.SubtractorType)
-
- // int32WrapperType reflected type for protobuf int32 wrapper type.
- int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})
-
- // int64WrapperType reflected type for protobuf int64 wrapper type.
- int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{})
-)
-
-// Add implements traits.Adder.Add.
-func (i Int) Add(other ref.Val) ref.Val {
- otherInt, ok := other.(Int)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := addInt64Checked(int64(i), int64(otherInt))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Compare implements traits.Comparer.Compare.
-func (i Int) Compare(other ref.Val) ref.Val {
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return NewErr("NaN values cannot be ordered")
- }
- return compareIntDouble(i, ov)
- case Int:
- return compareInt(i, ov)
- case Uint:
- return compareIntUint(i, ov)
- default:
- return MaybeNoSuchOverloadErr(other)
- }
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Int, reflect.Int32:
- // Enums are also mapped as int32 derivations.
- // Note, the code doesn't convert to the enum value directly since this is not known, but
- // the net effect with respect to proto-assignment is handled correctly by the reflection
- // Convert method.
- v, err := int64ToInt32Checked(int64(i))
- if err != nil {
- return nil, err
- }
- return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
- case reflect.Int64:
- return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped before being set on an Any field.
- return anypb.New(wrapperspb.Int64(int64(i)))
- case int32WrapperType:
- // Convert the value to a wrapperspb.Int32Value, error on overflow.
- v, err := int64ToInt32Checked(int64(i))
- if err != nil {
- return nil, err
- }
- return wrapperspb.Int32(v), nil
- case int64WrapperType:
- // Convert the value to a wrapperspb.Int64Value.
- return wrapperspb.Int64(int64(i)), nil
- case jsonValueType:
- // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON
- // decimal strings. Because CEL ints might come from the automatic widening of 32-bit
- // values in protos, the JSON type is chosen dynamically based on the value.
- //
- // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers.
- // - Integers outside this range are encoded as JSON strings.
- //
- // The integer to float range represents the largest interval where such a conversion
- // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON
- // number as with protobuf. Those consuming JSON from a 64-bit source must be able to
- // handle either a JSON number or a JSON decimal string. To handle these cases safely
- // the string values must be explicitly converted to int() within a CEL expression;
- // however, it is best to simply stay within the JSON number range when building JSON
- // objects in CEL.
- if i.isJSONSafe() {
- return structpb.NewNumberValue(float64(i)), nil
- }
- // Proto3 to JSON conversion requires string-formatted int64 values
- // since the conversion to floating point would result in truncation.
- return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil
- }
- switch typeDesc.Elem().Kind() {
- case reflect.Int32:
- // Convert the value to a wrapperspb.Int32Value, error on overflow.
- v, err := int64ToInt32Checked(int64(i))
- if err != nil {
- return nil, err
- }
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- case reflect.Int64:
- v := int64(i)
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- }
- case reflect.Interface:
- iv := i.Value()
- if reflect.TypeOf(iv).Implements(typeDesc) {
- return iv, nil
- }
- if reflect.TypeOf(i).Implements(typeDesc) {
- return i, nil
- }
- }
- return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case IntType:
- return i
- case UintType:
- u, err := int64ToUint64Checked(int64(i))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(u)
- case DoubleType:
- return Double(i)
- case StringType:
- return String(fmt.Sprintf("%d", int64(i)))
- case TimestampType:
- // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number
- // of seconds between year 1 and year 1970. See comments on unixToInternal.
- if int64(i) < minUnixTime || int64(i) > maxUnixTime {
- return celErrTimestampOverflow
- }
- return timestampOf(time.Unix(int64(i), 0).UTC())
- case TypeType:
- return IntType
- }
- return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
-}
-
-// Divide implements traits.Divider.Divide.
-func (i Int) Divide(other ref.Val) ref.Val {
- otherInt, ok := other.(Int)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := divideInt64Checked(int64(i), int64(otherInt))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Equal implements ref.Val.Equal.
-func (i Int) Equal(other ref.Val) ref.Val {
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return False
- }
- return Bool(compareIntDouble(i, ov) == 0)
- case Int:
- return Bool(i == ov)
- case Uint:
- return Bool(compareIntUint(i, ov) == 0)
- default:
- return False
- }
-}
-
-// Modulo implements traits.Modder.Modulo.
-func (i Int) Modulo(other ref.Val) ref.Val {
- otherInt, ok := other.(Int)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := moduloInt64Checked(int64(i), int64(otherInt))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Multiply implements traits.Multiplier.Multiply.
-func (i Int) Multiply(other ref.Val) ref.Val {
- otherInt, ok := other.(Int)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := multiplyInt64Checked(int64(i), int64(otherInt))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Negate implements traits.Negater.Negate.
-func (i Int) Negate() ref.Val {
- val, err := negateInt64Checked(int64(i))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Subtract implements traits.Subtractor.Subtract.
-func (i Int) Subtract(subtrahend ref.Val) ref.Val {
- subtraInt, ok := subtrahend.(Int)
- if !ok {
- return MaybeNoSuchOverloadErr(subtrahend)
- }
- val, err := subtractInt64Checked(int64(i), int64(subtraInt))
- if err != nil {
- return wrapErr(err)
- }
- return Int(val)
-}
-
-// Type implements ref.Val.Type.
-func (i Int) Type() ref.Type {
- return IntType
-}
-
-// Value implements ref.Val.Value.
-func (i Int) Value() interface{} {
- return int64(i)
-}
-
-// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
-func (i Int) isJSONSafe() bool {
- return i >= minIntJSON && i <= maxIntJSON
-}
-
-const (
- // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6.
- maxIntJSON = 1<<53 - 1
- // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6.
- minIntJSON = -maxIntJSON
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/iterator.go b/etcd/vendor/github.com/google/cel-go/common/types/iterator.go
deleted file mode 100644
index 4906627783..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/iterator.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-var (
- // IteratorType singleton.
- IteratorType = NewTypeValue("iterator", traits.IteratorType)
-)
-
-// baseIterator is the basis for list, map, and object iterators.
-//
-// An iterator in and of itself should not be a valid value for comparison, but must implement the
-// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
-// interpreter.
-type baseIterator struct{}
-
-func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- return nil, fmt.Errorf("type conversion on iterators not supported")
-}
-
-func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
- return NewErr("no such overload")
-}
-
-func (*baseIterator) Equal(other ref.Val) ref.Val {
- return NewErr("no such overload")
-}
-
-func (*baseIterator) Type() ref.Type {
- return IteratorType
-}
-
-func (*baseIterator) Value() interface{} {
- return nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/json_value.go b/etcd/vendor/github.com/google/cel-go/common/types/json_value.go
deleted file mode 100644
index cd63b51944..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/json_value.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
-
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// JSON type constants representing the reflected types of protobuf JSON values.
-var (
- jsonValueType = reflect.TypeOf(&structpb.Value{})
- jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
- jsonStructType = reflect.TypeOf(&structpb.Struct{})
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/list.go b/etcd/vendor/github.com/google/cel-go/common/types/list.go
deleted file mode 100644
index 7230f7ea12..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/list.go
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-var (
- // ListType singleton.
- ListType = NewTypeValue("list",
- traits.AdderType,
- traits.ContainerType,
- traits.IndexerType,
- traits.IterableType,
- traits.SizerType)
-)
-
-// NewDynamicList returns a traits.Lister with heterogenous elements.
-// value should be an array of "native" types, i.e. any type that
-// NativeToValue() can convert to a ref.Val.
-func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister {
- refValue := reflect.ValueOf(value)
- return &baseList{
- TypeAdapter: adapter,
- value: value,
- size: refValue.Len(),
- get: func(i int) interface{} {
- return refValue.Index(i).Interface()
- },
- }
-}
-
-// NewStringList returns a traits.Lister containing only strings.
-func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister {
- return &baseList{
- TypeAdapter: adapter,
- value: elems,
- size: len(elems),
- get: func(i int) interface{} { return elems[i] },
- }
-}
-
-// NewRefValList returns a traits.Lister with ref.Val elements.
-//
-// This type specialization is used with list literals within CEL expressions.
-func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister {
- return &baseList{
- TypeAdapter: adapter,
- value: elems,
- size: len(elems),
- get: func(i int) interface{} { return elems[i] },
- }
-}
-
-// NewProtoList returns a traits.Lister based on a pb.List instance.
-func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister {
- return &baseList{
- TypeAdapter: adapter,
- value: list,
- size: list.Len(),
- get: func(i int) interface{} { return list.Get(i).Interface() },
- }
-}
-
-// NewJSONList returns a traits.Lister based on structpb.ListValue instance.
-func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister {
- vals := l.GetValues()
- return &baseList{
- TypeAdapter: adapter,
- value: l,
- size: len(vals),
- get: func(i int) interface{} { return vals[i] },
- }
-}
-
-// NewMutableList creates a new mutable list whose internal state can be modified.
-func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
- var mutableValues []ref.Val
- return &mutableList{
- baseList: &baseList{
- TypeAdapter: adapter,
- value: mutableValues,
- size: 0,
- get: func(i int) interface{} { return mutableValues[i] },
- },
- mutableValues: mutableValues,
- }
-}
-
-// baseList points to a list containing elements of any type.
-// The `value` is an array of native values, and refValue is its reflection object.
-// The `ref.TypeAdapter` enables native type to CEL type conversions.
-type baseList struct {
- ref.TypeAdapter
- value interface{}
-
- // size indicates the number of elements within the list.
- // Since objects are immutable the size of a list is static.
- size int
-
- // get returns a value at the specified integer index.
- // The index is guaranteed to be checked against the list index range.
- get func(int) interface{}
-}
-
-// Add implements the traits.Adder interface method.
-func (l *baseList) Add(other ref.Val) ref.Val {
- otherList, ok := other.(traits.Lister)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- if l.Size() == IntZero {
- return other
- }
- if otherList.Size() == IntZero {
- return l
- }
- return &concatList{
- TypeAdapter: l.TypeAdapter,
- prevList: l,
- nextList: otherList}
-}
-
-// Contains implements the traits.Container interface method.
-func (l *baseList) Contains(elem ref.Val) ref.Val {
- for i := 0; i < l.size; i++ {
- val := l.NativeToValue(l.get(i))
- cmp := elem.Equal(val)
- b, ok := cmp.(Bool)
- if ok && b == True {
- return True
- }
- }
- return False
-}
-
-// ConvertToNative implements the ref.Val interface method.
-func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // If the underlying list value is assignable to the reflected type return it.
- if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
- return l.value, nil
- }
- // If the list wrapper is assignable to the desired type return it.
- if reflect.TypeOf(l).AssignableTo(typeDesc) {
- return l, nil
- }
- // Attempt to convert the list to a set of well known protobuf types.
- switch typeDesc {
- case anyValueType:
- json, err := l.ConvertToNative(jsonListValueType)
- if err != nil {
- return nil, err
- }
- return anypb.New(json.(proto.Message))
- case jsonValueType, jsonListValueType:
- jsonValues, err :=
- l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
- if err != nil {
- return nil, err
- }
- jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
- if typeDesc == jsonListValueType {
- return jsonList, nil
- }
- return structpb.NewListValue(jsonList), nil
- }
- // Non-list conversion.
- if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
- return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
- }
-
- // List conversion.
- // Allow the element ConvertToNative() function to determine whether conversion is possible.
- otherElemType := typeDesc.Elem()
- elemCount := l.size
- nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
- for i := 0; i < elemCount; i++ {
- elem := l.NativeToValue(l.get(i))
- nativeElemVal, err := elem.ConvertToNative(otherElemType)
- if err != nil {
- return nil, err
- }
- nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
- }
- return nativeList.Interface(), nil
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case ListType:
- return l
- case TypeType:
- return ListType
- }
- return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (l *baseList) Equal(other ref.Val) ref.Val {
- otherList, ok := other.(traits.Lister)
- if !ok {
- return False
- }
- if l.Size() != otherList.Size() {
- return False
- }
- for i := IntZero; i < l.Size().(Int); i++ {
- thisElem := l.Get(i)
- otherElem := otherList.Get(i)
- elemEq := Equal(thisElem, otherElem)
- if elemEq == False {
- return False
- }
- }
- return True
-}
-
-// Get implements the traits.Indexer interface method.
-func (l *baseList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
- if err != nil {
- return ValOrErr(index, err.Error())
- }
- if ind < 0 || ind >= l.size {
- return NewErr("index '%d' out of range in list size '%d'", ind, l.Size())
- }
- return l.NativeToValue(l.get(ind))
-}
-
-// Iterator implements the traits.Iterable interface method.
-func (l *baseList) Iterator() traits.Iterator {
- return newListIterator(l)
-}
-
-// Size implements the traits.Sizer interface method.
-func (l *baseList) Size() ref.Val {
- return Int(l.size)
-}
-
-// Type implements the ref.Val interface method.
-func (l *baseList) Type() ref.Type {
- return ListType
-}
-
-// Value implements the ref.Val interface method.
-func (l *baseList) Value() interface{} {
- return l.value
-}
-
-// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
-type mutableList struct {
- *baseList
- mutableValues []ref.Val
-}
-
-// Add copies elements from the other list into the internal storage of the mutable list.
-// The ref.Val returned by Add is the receiver.
-func (l *mutableList) Add(other ref.Val) ref.Val {
- switch otherList := other.(type) {
- case *mutableList:
- l.mutableValues = append(l.mutableValues, otherList.mutableValues...)
- l.size += len(otherList.mutableValues)
- case traits.Lister:
- for i := IntZero; i < otherList.Size().(Int); i++ {
- l.size++
- l.mutableValues = append(l.mutableValues, otherList.Get(i))
- }
- default:
- return MaybeNoSuchOverloadErr(otherList)
- }
- return l
-}
-
-// ToImmutableList returns an immutable list based on the internal storage of the mutable list.
-func (l *mutableList) ToImmutableList() traits.Lister {
- // The reference to internal state is guaranteed to be safe as this call is only performed
- // when mutations have been completed.
- return NewRefValList(l.TypeAdapter, l.mutableValues)
-}
-
-// concatList combines two list implementations together into a view.
-// The `ref.TypeAdapter` enables native type to CEL type conversions.
-type concatList struct {
- ref.TypeAdapter
- value interface{}
- prevList traits.Lister
- nextList traits.Lister
-}
-
-// Add implements the traits.Adder interface method.
-func (l *concatList) Add(other ref.Val) ref.Val {
- otherList, ok := other.(traits.Lister)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- if l.Size() == IntZero {
- return other
- }
- if otherList.Size() == IntZero {
- return l
- }
- return &concatList{
- TypeAdapter: l.TypeAdapter,
- prevList: l,
- nextList: otherList}
-}
-
-// Contains implements the traits.Container interface method.
-func (l *concatList) Contains(elem ref.Val) ref.Val {
- // The concat list relies on the IsErrorOrUnknown checks against the input element to be
- // performed by the `prevList` and/or `nextList`.
- prev := l.prevList.Contains(elem)
- // Short-circuit the return if the elem was found in the prev list.
- if prev == True {
- return prev
- }
- // Return if the elem was found in the next list.
- next := l.nextList.Contains(elem)
- if next == True {
- return next
- }
- // Handle the case where an error or unknown was encountered before checking next.
- if IsUnknownOrError(prev) {
- return prev
- }
- // Otherwise, rely on the next value as the representative result.
- return next
-}
-
-// ConvertToNative implements the ref.Val interface method.
-func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- combined := NewDynamicList(l.TypeAdapter, l.Value().([]interface{}))
- return combined.ConvertToNative(typeDesc)
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case ListType:
- return l
- case TypeType:
- return ListType
- }
- return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (l *concatList) Equal(other ref.Val) ref.Val {
- otherList, ok := other.(traits.Lister)
- if !ok {
- return False
- }
- if l.Size() != otherList.Size() {
- return False
- }
- var maybeErr ref.Val
- for i := IntZero; i < l.Size().(Int); i++ {
- thisElem := l.Get(i)
- otherElem := otherList.Get(i)
- elemEq := Equal(thisElem, otherElem)
- if elemEq == False {
- return False
- }
- if maybeErr == nil && IsUnknownOrError(elemEq) {
- maybeErr = elemEq
- }
- }
- if maybeErr != nil {
- return maybeErr
- }
- return True
-}
-
-// Get implements the traits.Indexer interface method.
-func (l *concatList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
- if err != nil {
- return ValOrErr(index, err.Error())
- }
- i := Int(ind)
- if i < l.prevList.Size().(Int) {
- return l.prevList.Get(i)
- }
- offset := i - l.prevList.Size().(Int)
- return l.nextList.Get(offset)
-}
-
-// Iterator implements the traits.Iterable interface method.
-func (l *concatList) Iterator() traits.Iterator {
- return newListIterator(l)
-}
-
-// Size implements the traits.Sizer interface method.
-func (l *concatList) Size() ref.Val {
- return l.prevList.Size().(Int).Add(l.nextList.Size())
-}
-
-// Type implements the ref.Val interface method.
-func (l *concatList) Type() ref.Type {
- return ListType
-}
-
-// Value implements the ref.Val interface method.
-func (l *concatList) Value() interface{} {
- if l.value == nil {
- merged := make([]interface{}, l.Size().(Int))
- prevLen := l.prevList.Size().(Int)
- for i := Int(0); i < prevLen; i++ {
- merged[i] = l.prevList.Get(i).Value()
- }
- nextLen := l.nextList.Size().(Int)
- for j := Int(0); j < nextLen; j++ {
- merged[prevLen+j] = l.nextList.Get(j).Value()
- }
- l.value = merged
- }
- return l.value
-}
-
-func newListIterator(listValue traits.Lister) traits.Iterator {
- return &listIterator{
- listValue: listValue,
- len: listValue.Size().(Int),
- }
-}
-
-type listIterator struct {
- *baseIterator
- listValue traits.Lister
- cursor Int
- len Int
-}
-
-// HasNext implements the traits.Iterator interface method.
-func (it *listIterator) HasNext() ref.Val {
- return Bool(it.cursor < it.len)
-}
-
-// Next implements the traits.Iterator interface method.
-func (it *listIterator) Next() ref.Val {
- if it.HasNext() == True {
- index := it.cursor
- it.cursor++
- return it.listValue.Get(index)
- }
- return nil
-}
-
-func indexOrError(index ref.Val) (int, error) {
- switch iv := index.(type) {
- case Int:
- return int(iv), nil
- case Double:
- if ik, ok := doubleToInt64Lossless(float64(iv)); ok {
- return int(ik), nil
- }
- return -1, fmt.Errorf("unsupported index value %v in list", index)
- case Uint:
- if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok {
- return int(ik), nil
- }
- return -1, fmt.Errorf("unsupported index value %v in list", index)
- default:
- return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type())
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/map.go b/etcd/vendor/github.com/google/cel-go/common/types/map.go
deleted file mode 100644
index 5865594024..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/map.go
+++ /dev/null
@@ -1,832 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "github.com/stoewer/go-strcase"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
-func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper {
- refValue := reflect.ValueOf(value)
- return &baseMap{
- TypeAdapter: adapter,
- mapAccessor: newReflectMapAccessor(adapter, refValue),
- value: value,
- size: refValue.Len(),
- }
-}
-
-// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
-// encoded in protocol buffer form.
-//
-// The `adapter` argument provides type adaptation capabilities from proto to CEL.
-func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper {
- fields := value.GetFields()
- return &baseMap{
- TypeAdapter: adapter,
- mapAccessor: newJSONStructAccessor(adapter, fields),
- value: value,
- size: len(fields),
- }
-}
-
-// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
-func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper {
- return &baseMap{
- TypeAdapter: adapter,
- mapAccessor: newRefValMapAccessor(value),
- value: value,
- size: len(value),
- }
-}
-
-// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
-func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]interface{}) traits.Mapper {
- return &baseMap{
- TypeAdapter: adapter,
- mapAccessor: newStringIfaceMapAccessor(adapter, value),
- value: value,
- size: len(value),
- }
-}
-
-// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
-func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper {
- return &baseMap{
- TypeAdapter: adapter,
- mapAccessor: newStringMapAccessor(value),
- value: value,
- size: len(value),
- }
-}
-
-// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
-func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper {
- return &protoMap{
- TypeAdapter: adapter,
- value: value,
- }
-}
-
-var (
- // MapType singleton.
- MapType = NewTypeValue("map",
- traits.ContainerType,
- traits.IndexerType,
- traits.IterableType,
- traits.SizerType)
-)
-
-// mapAccessor is a private interface for finding values within a map and iterating over the keys.
-// This interface implements portions of the API surface area required by the traits.Mapper
-// interface.
-type mapAccessor interface {
- // Find returns a value, if one exists, for the input key.
- //
- // If the key is not found the function returns (nil, false).
- Find(ref.Val) (ref.Val, bool)
-
- // Iterator returns an Iterator over the map key set.
- Iterator() traits.Iterator
-}
-
-// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
-//
-// Since CEL is side-effect free, the base map represents an immutable object.
-type baseMap struct {
- // TypeAdapter used to convert keys and values accessed within the map.
- ref.TypeAdapter
-
- // mapAccessor interface implementation used to find and iterate over map keys.
- mapAccessor
-
- // value is the native Go value upon which the map type operators.
- value interface{}
-
- // size is the number of entries in the map.
- size int
-}
-
-// Contains implements the traits.Container interface method.
-func (m *baseMap) Contains(index ref.Val) ref.Val {
- _, found := m.Find(index)
- return Bool(found)
-}
-
-// ConvertToNative implements the ref.Val interface method.
-func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // If the map is already assignable to the desired type return it, e.g. interfaces and
- // maps with the same key value types.
- if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
- return m.value, nil
- }
- if reflect.TypeOf(m).AssignableTo(typeDesc) {
- return m, nil
- }
- switch typeDesc {
- case anyValueType:
- json, err := m.ConvertToNative(jsonStructType)
- if err != nil {
- return nil, err
- }
- return anypb.New(json.(proto.Message))
- case jsonValueType, jsonStructType:
- jsonEntries, err :=
- m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
- if err != nil {
- return nil, err
- }
- jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)}
- if typeDesc == jsonStructType {
- return jsonMap, nil
- }
- return structpb.NewStructValue(jsonMap), nil
- }
-
- // Unwrap pointers, but track their use.
- isPtr := false
- if typeDesc.Kind() == reflect.Ptr {
- tk := typeDesc
- typeDesc = typeDesc.Elem()
- if typeDesc.Kind() == reflect.Ptr {
- return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
- }
- isPtr = true
- }
- switch typeDesc.Kind() {
- // Map conversion.
- case reflect.Map:
- otherKey := typeDesc.Key()
- otherElem := typeDesc.Elem()
- nativeMap := reflect.MakeMapWithSize(typeDesc, m.size)
- it := m.Iterator()
- for it.HasNext() == True {
- key := it.Next()
- refKeyValue, err := key.ConvertToNative(otherKey)
- if err != nil {
- return nil, err
- }
- refElemValue, err := m.Get(key).ConvertToNative(otherElem)
- if err != nil {
- return nil, err
- }
- nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue))
- }
- return nativeMap.Interface(), nil
- case reflect.Struct:
- nativeStructPtr := reflect.New(typeDesc)
- nativeStruct := nativeStructPtr.Elem()
- it := m.Iterator()
- for it.HasNext() == True {
- key := it.Next()
- // Ensure the field name being referenced is exported.
- // Only exported (public) field names can be set by reflection, where the name
- // must be at least one character in length and start with an upper-case letter.
- fieldName := key.ConvertToType(StringType)
- if IsError(fieldName) {
- return nil, fieldName.(*Err)
- }
- name := string(fieldName.(String))
- name = strcase.UpperCamelCase(name)
- fieldRef := nativeStruct.FieldByName(name)
- if !fieldRef.IsValid() {
- return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc)
- }
- fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type())
- if err != nil {
- return nil, err
- }
- fieldRef.Set(reflect.ValueOf(fieldValue))
- }
- if isPtr {
- return nativeStructPtr.Interface(), nil
- }
- return nativeStruct.Interface(), nil
- }
- return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc)
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case MapType:
- return m
- case TypeType:
- return MapType
- }
- return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (m *baseMap) Equal(other ref.Val) ref.Val {
- otherMap, ok := other.(traits.Mapper)
- if !ok {
- return False
- }
- if m.Size() != otherMap.Size() {
- return False
- }
- it := m.Iterator()
- for it.HasNext() == True {
- key := it.Next()
- thisVal, _ := m.Find(key)
- otherVal, found := otherMap.Find(key)
- if !found {
- return False
- }
- valEq := Equal(thisVal, otherVal)
- if valEq == False {
- return False
- }
- }
- return True
-}
-
-// Get implements the traits.Indexer interface method.
-func (m *baseMap) Get(key ref.Val) ref.Val {
- v, found := m.Find(key)
- if !found {
- return ValOrErr(v, "no such key: %v", key)
- }
- return v
-}
-
-// Size implements the traits.Sizer interface method.
-func (m *baseMap) Size() ref.Val {
- return Int(m.size)
-}
-
-// Type implements the ref.Val interface method.
-func (m *baseMap) Type() ref.Type {
- return MapType
-}
-
-// Value implements the ref.Val interface method.
-func (m *baseMap) Value() interface{} {
- return m.value
-}
-
-func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor {
- return &jsonStructAccessor{
- TypeAdapter: adapter,
- st: st,
- }
-}
-
-type jsonStructAccessor struct {
- ref.TypeAdapter
- st map[string]*structpb.Value
-}
-
-// Find searches the json struct field map for the input key value and returns (value, true) if
-// found.
-//
-// If the key is not found the function returns (nil, false).
-func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) {
- strKey, ok := key.(String)
- if !ok {
- return nil, false
- }
- keyVal, found := a.st[string(strKey)]
- if !found {
- return nil, false
- }
- return a.NativeToValue(keyVal), true
-}
-
-// Iterator creates a new traits.Iterator from the set of JSON struct field names.
-func (a *jsonStructAccessor) Iterator() traits.Iterator {
- // Copy the keys to make their order stable.
- mapKeys := make([]string, len(a.st))
- i := 0
- for k := range a.st {
- mapKeys[i] = k
- i++
- }
- return &stringKeyIterator{
- mapKeys: mapKeys,
- len: len(mapKeys),
- }
-}
-
-func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor {
- keyType := value.Type().Key()
- return &reflectMapAccessor{
- TypeAdapter: adapter,
- refValue: value,
- keyType: keyType,
- }
-}
-
-type reflectMapAccessor struct {
- ref.TypeAdapter
- refValue reflect.Value
- keyType reflect.Type
-}
-
-// Find converts the input key to a native Golang type and then uses reflection to find the key,
-// returning (value, true) if present.
-//
-// If the key is not found the function returns (nil, false).
-func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) {
- if m.refValue.Len() == 0 {
- return nil, false
- }
- if keyVal, found := m.findInternal(key); found {
- return keyVal, true
- }
- switch k := key.(type) {
- // Double is not a valid proto map key type, so check for the key as an int or uint.
- case Double:
- if ik, ok := doubleToInt64Lossless(float64(k)); ok {
- if keyVal, found := m.findInternal(Int(ik)); found {
- return keyVal, true
- }
- }
- if uk, ok := doubleToUint64Lossless(float64(k)); ok {
- return m.findInternal(Uint(uk))
- }
- // map keys of type double are not supported.
- case Int:
- if uk, ok := int64ToUint64Lossless(int64(k)); ok {
- return m.findInternal(Uint(uk))
- }
- case Uint:
- if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
- return m.findInternal(Int(ik))
- }
- }
- return nil, false
-}
-
-// findInternal attempts to convert the incoming key to the map's internal native type
-// and then returns the value, if found.
-func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) {
- k, err := key.ConvertToNative(m.keyType)
- if err != nil {
- return nil, false
- }
- refKey := reflect.ValueOf(k)
- val := m.refValue.MapIndex(refKey)
- if val.IsValid() {
- return m.NativeToValue(val.Interface()), true
- }
- return nil, false
-}
-
-// Iterator creates a Golang reflection based traits.Iterator.
-func (m *reflectMapAccessor) Iterator() traits.Iterator {
- return &mapIterator{
- TypeAdapter: m.TypeAdapter,
- mapKeys: m.refValue.MapRange(),
- len: m.refValue.Len(),
- }
-}
-
-func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
- return &refValMapAccessor{mapVal: mapVal}
-}
-
-type refValMapAccessor struct {
- mapVal map[ref.Val]ref.Val
-}
-
-// Find uses native map accesses to find the key, returning (value, true) if present.
-//
-// If the key is not found the function returns (nil, false).
-func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
- if len(a.mapVal) == 0 {
- return nil, false
- }
- if keyVal, found := a.mapVal[key]; found {
- return keyVal, true
- }
- switch k := key.(type) {
- case Double:
- if ik, ok := doubleToInt64Lossless(float64(k)); ok {
- if keyVal, found := a.mapVal[Int(ik)]; found {
- return keyVal, found
- }
- }
- if uk, ok := doubleToUint64Lossless(float64(k)); ok {
- keyVal, found := a.mapVal[Uint(uk)]
- return keyVal, found
- }
- // map keys of type double are not supported.
- case Int:
- if uk, ok := int64ToUint64Lossless(int64(k)); ok {
- keyVal, found := a.mapVal[Uint(uk)]
- return keyVal, found
- }
- case Uint:
- if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
- keyVal, found := a.mapVal[Int(ik)]
- return keyVal, found
- }
- }
- return nil, false
-}
-
-// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
-func (a *refValMapAccessor) Iterator() traits.Iterator {
- return &mapIterator{
- TypeAdapter: DefaultTypeAdapter,
- mapKeys: reflect.ValueOf(a.mapVal).MapRange(),
- len: len(a.mapVal),
- }
-}
-
-func newStringMapAccessor(strMap map[string]string) mapAccessor {
- return &stringMapAccessor{mapVal: strMap}
-}
-
-type stringMapAccessor struct {
- mapVal map[string]string
-}
-
-// Find uses native map accesses to find the key, returning (value, true) if present.
-//
-// If the key is not found the function returns (nil, false).
-func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) {
- strKey, ok := key.(String)
- if !ok {
- return nil, false
- }
- keyVal, found := a.mapVal[string(strKey)]
- if !found {
- return nil, false
- }
- return String(keyVal), true
-}
-
-// Iterator creates a new traits.Iterator from the string key set of the map.
-func (a *stringMapAccessor) Iterator() traits.Iterator {
- // Copy the keys to make their order stable.
- mapKeys := make([]string, len(a.mapVal))
- i := 0
- for k := range a.mapVal {
- mapKeys[i] = k
- i++
- }
- return &stringKeyIterator{
- mapKeys: mapKeys,
- len: len(mapKeys),
- }
-}
-
-func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interface{}) mapAccessor {
- return &stringIfaceMapAccessor{
- TypeAdapter: adapter,
- mapVal: mapVal,
- }
-}
-
-type stringIfaceMapAccessor struct {
- ref.TypeAdapter
- mapVal map[string]interface{}
-}
-
-// Find uses native map accesses to find the key, returning (value, true) if present.
-//
-// If the key is not found the function returns (nil, false).
-func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) {
- strKey, ok := key.(String)
- if !ok {
- return nil, false
- }
- keyVal, found := a.mapVal[string(strKey)]
- if !found {
- return nil, false
- }
- return a.NativeToValue(keyVal), true
-}
-
-// Iterator creates a new traits.Iterator from the string key set of the map.
-func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
- // Copy the keys to make their order stable.
- mapKeys := make([]string, len(a.mapVal))
- i := 0
- for k := range a.mapVal {
- mapKeys[i] = k
- i++
- }
- return &stringKeyIterator{
- mapKeys: mapKeys,
- len: len(mapKeys),
- }
-}
-
-// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
-// accessing protoreflect.Map values.
-type protoMap struct {
- ref.TypeAdapter
- value *pb.Map
-}
-
-// Contains returns whether the map contains the given key.
-func (m *protoMap) Contains(key ref.Val) ref.Val {
- _, found := m.Find(key)
- return Bool(found)
-}
-
-// ConvertToNative implements the ref.Val interface method.
-//
-// Note, assignment to Golang struct types is not yet supported.
-func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // If the map is already assignable to the desired type return it, e.g. interfaces and
- // maps with the same key value types.
- switch typeDesc {
- case anyValueType:
- json, err := m.ConvertToNative(jsonStructType)
- if err != nil {
- return nil, err
- }
- return anypb.New(json.(proto.Message))
- case jsonValueType, jsonStructType:
- jsonEntries, err :=
- m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
- if err != nil {
- return nil, err
- }
- jsonMap := &structpb.Struct{
- Fields: jsonEntries.(map[string]*structpb.Value)}
- if typeDesc == jsonStructType {
- return jsonMap, nil
- }
- return structpb.NewStructValue(jsonMap), nil
- }
- switch typeDesc.Kind() {
- case reflect.Struct, reflect.Ptr:
- if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
- return m.value, nil
- }
- if reflect.TypeOf(m).AssignableTo(typeDesc) {
- return m, nil
- }
- }
- if typeDesc.Kind() != reflect.Map {
- return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc)
- }
-
- keyType := m.value.KeyType.ReflectType()
- valType := m.value.ValueType.ReflectType()
- otherKeyType := typeDesc.Key()
- otherValType := typeDesc.Elem()
- mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len())
- var err error
- m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
- ntvKey := key.Interface()
- ntvVal := val.Interface()
- switch ntvVal.(type) {
- case protoreflect.Message:
- ntvVal = ntvVal.(protoreflect.Message).Interface()
- }
- if keyType == otherKeyType && valType == otherValType {
- mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
- return true
- }
- celKey := m.NativeToValue(ntvKey)
- celVal := m.NativeToValue(ntvVal)
- ntvKey, err = celKey.ConvertToNative(otherKeyType)
- if err != nil {
- // early terminate the range loop.
- return false
- }
- ntvVal, err = celVal.ConvertToNative(otherValType)
- if err != nil {
- // early terminate the range loop.
- return false
- }
- mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
- return true
- })
- if err != nil {
- return nil, err
- }
- return mapVal.Interface(), nil
-}
-
-// ConvertToType implements the ref.Val interface method.
-func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case MapType:
- return m
- case TypeType:
- return MapType
- }
- return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
-}
-
-// Equal implements the ref.Val interface method.
-func (m *protoMap) Equal(other ref.Val) ref.Val {
- otherMap, ok := other.(traits.Mapper)
- if !ok {
- return False
- }
- if m.value.Map.Len() != int(otherMap.Size().(Int)) {
- return False
- }
- var retVal ref.Val = True
- m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
- keyVal := m.NativeToValue(key.Interface())
- valVal := m.NativeToValue(val)
- otherVal, found := otherMap.Find(keyVal)
- if !found {
- retVal = False
- return false
- }
- valEq := Equal(valVal, otherVal)
- if valEq != True {
- retVal = valEq
- return false
- }
- return true
- })
- return retVal
-}
-
-// Find returns whether the protoreflect.Map contains the input key.
-//
-// If the key is not found the function returns (nil, false).
-func (m *protoMap) Find(key ref.Val) (ref.Val, bool) {
- if keyVal, found := m.findInternal(key); found {
- return keyVal, true
- }
- switch k := key.(type) {
- // Double is not a valid proto map key type, so check for the key as an int or uint.
- case Double:
- if ik, ok := doubleToInt64Lossless(float64(k)); ok {
- if keyVal, found := m.findInternal(Int(ik)); found {
- return keyVal, true
- }
- }
- if uk, ok := doubleToUint64Lossless(float64(k)); ok {
- return m.findInternal(Uint(uk))
- }
- // map keys of type double are not supported.
- case Int:
- if uk, ok := int64ToUint64Lossless(int64(k)); ok {
- return m.findInternal(Uint(uk))
- }
- case Uint:
- if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
- return m.findInternal(Int(ik))
- }
- }
- return nil, false
-}
-
-// findInternal attempts to convert the incoming key to the map's internal native type
-// and then returns the value, if found.
-func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) {
- // Convert the input key to the expected protobuf key type.
- ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType())
- if err != nil {
- return nil, false
- }
- // Use protoreflection to get the key value.
- val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey())
- if !val.IsValid() {
- return nil, false
- }
- // Perform nominal type unwrapping from the input value.
- switch v := val.Interface().(type) {
- case protoreflect.List, protoreflect.Map:
- // Maps do not support list or map values
- return nil, false
- default:
- return m.NativeToValue(v), true
- }
-}
-
-// Get implements the traits.Indexer interface method.
-func (m *protoMap) Get(key ref.Val) ref.Val {
- v, found := m.Find(key)
- if !found {
- return ValOrErr(v, "no such key: %v", key)
- }
- return v
-}
-
-// Iterator implements the traits.Iterable interface method.
-func (m *protoMap) Iterator() traits.Iterator {
- // Copy the keys to make their order stable.
- mapKeys := make([]protoreflect.MapKey, 0, m.value.Len())
- m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- mapKeys = append(mapKeys, k)
- return true
- })
- return &protoMapIterator{
- TypeAdapter: m.TypeAdapter,
- mapKeys: mapKeys,
- len: m.value.Len(),
- }
-}
-
-// Size returns the number of entries in the protoreflect.Map.
-func (m *protoMap) Size() ref.Val {
- return Int(m.value.Len())
-}
-
-// Type implements the ref.Val interface method.
-func (m *protoMap) Type() ref.Type {
- return MapType
-}
-
-// Value implements the ref.Val interface method.
-func (m *protoMap) Value() interface{} {
- return m.value
-}
-
-type mapIterator struct {
- *baseIterator
- ref.TypeAdapter
- mapKeys *reflect.MapIter
- cursor int
- len int
-}
-
-// HasNext implements the traits.Iterator interface method.
-func (it *mapIterator) HasNext() ref.Val {
- return Bool(it.cursor < it.len)
-}
-
-// Next implements the traits.Iterator interface method.
-func (it *mapIterator) Next() ref.Val {
- if it.HasNext() == True && it.mapKeys.Next() {
- it.cursor++
- refKey := it.mapKeys.Key()
- return it.NativeToValue(refKey.Interface())
- }
- return nil
-}
-
-type protoMapIterator struct {
- *baseIterator
- ref.TypeAdapter
- mapKeys []protoreflect.MapKey
- cursor int
- len int
-}
-
-// HasNext implements the traits.Iterator interface method.
-func (it *protoMapIterator) HasNext() ref.Val {
- return Bool(it.cursor < it.len)
-}
-
-// Next implements the traits.Iterator interface method.
-func (it *protoMapIterator) Next() ref.Val {
- if it.HasNext() == True {
- index := it.cursor
- it.cursor++
- refKey := it.mapKeys[index]
- return it.NativeToValue(refKey.Interface())
- }
- return nil
-}
-
-type stringKeyIterator struct {
- *baseIterator
- mapKeys []string
- cursor int
- len int
-}
-
-// HasNext implements the traits.Iterator interface method.
-func (it *stringKeyIterator) HasNext() ref.Val {
- return Bool(it.cursor < it.len)
-}
-
-// Next implements the traits.Iterator interface method.
-func (it *stringKeyIterator) Next() ref.Val {
- if it.HasNext() == True {
- index := it.cursor
- it.cursor++
- return String(it.mapKeys[index])
- }
- return nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/null.go b/etcd/vendor/github.com/google/cel-go/common/types/null.go
deleted file mode 100644
index 3d3503c275..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/null.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
- "google.golang.org/protobuf/proto"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// Null type implementation.
-type Null structpb.NullValue
-
-var (
- // NullType singleton.
- NullType = NewTypeValue("null_type")
- // NullValue singleton.
- NullValue = Null(structpb.NullValue_NULL_VALUE)
-
- jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
-)
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Int32:
- return reflect.ValueOf(n).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Convert to a JSON-null before packing to an Any field since the enum value for JSON
- // null cannot be packed directly.
- pb, err := n.ConvertToNative(jsonValueType)
- if err != nil {
- return nil, err
- }
- return anypb.New(pb.(proto.Message))
- case jsonValueType:
- return structpb.NewNullValue(), nil
- }
- case reflect.Interface:
- nv := n.Value()
- if reflect.TypeOf(nv).Implements(typeDesc) {
- return nv, nil
- }
- if reflect.TypeOf(n).Implements(typeDesc) {
- return n, nil
- }
- }
- // If the type conversion isn't supported return an error.
- return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case StringType:
- return String("null")
- case NullType:
- return n
- case TypeType:
- return NullType
- }
- return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
-}
-
-// Equal implements ref.Val.Equal.
-func (n Null) Equal(other ref.Val) ref.Val {
- return Bool(NullType == other.Type())
-}
-
-// Type implements ref.Val.Type.
-func (n Null) Type() ref.Type {
- return NullType
-}
-
-// Value implements ref.Val.Value.
-func (n Null) Value() interface{} {
- return structpb.NullValue_NULL_VALUE
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/object.go b/etcd/vendor/github.com/google/cel-go/common/types/object.go
deleted file mode 100644
index 5faf855110..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/object.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-type protoObj struct {
- ref.TypeAdapter
- value proto.Message
- typeDesc *pb.TypeDescription
- typeValue *TypeValue
-}
-
-// NewObject returns an object based on a proto.Message value which handles
-// conversion between protobuf type values and expression type values.
-// Objects support indexing and iteration.
-//
-// Note: the type value is pulled from the list of registered types within the
-// type provider. If the proto type is not registered within the type provider,
-// then this will result in an error within the type adapter / provider.
-func NewObject(adapter ref.TypeAdapter,
- typeDesc *pb.TypeDescription,
- typeValue *TypeValue,
- value proto.Message) ref.Val {
- return &protoObj{
- TypeAdapter: adapter,
- value: value,
- typeDesc: typeDesc,
- typeValue: typeValue}
-}
-
-func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- srcPB := o.value
- if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
- return srcPB, nil
- }
- if reflect.TypeOf(o).AssignableTo(typeDesc) {
- return o, nil
- }
- switch typeDesc {
- case anyValueType:
- _, isAny := srcPB.(*anypb.Any)
- if isAny {
- return srcPB, nil
- }
- return anypb.New(srcPB)
- case jsonValueType:
- // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no
- // support for direct conversion from proto.Message to protobuf.Value.
- bytes, err := protojson.Marshal(srcPB)
- if err != nil {
- return nil, err
- }
- json := &structpb.Value{}
- err = protojson.Unmarshal(bytes, json)
- if err != nil {
- return nil, err
- }
- return json, nil
- default:
- if typeDesc == o.typeDesc.ReflectType() {
- return o.value, nil
- }
- if typeDesc.Kind() == reflect.Ptr {
- val := reflect.New(typeDesc.Elem()).Interface()
- dstPB, ok := val.(proto.Message)
- if ok {
- err := pb.Merge(dstPB, srcPB)
- if err != nil {
- return nil, fmt.Errorf("type conversion error: %v", err)
- }
- return dstPB, nil
- }
- }
- }
- return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc)
-}
-
-func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- default:
- if o.Type().TypeName() == typeVal.TypeName() {
- return o
- }
- case TypeType:
- return o.typeValue
- }
- return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal)
-}
-
-func (o *protoObj) Equal(other ref.Val) ref.Val {
- otherPB, ok := other.Value().(proto.Message)
- return Bool(ok && pb.Equal(o.value, otherPB))
-}
-
-// IsSet tests whether a field which is defined is set to a non-default value.
-func (o *protoObj) IsSet(field ref.Val) ref.Val {
- protoFieldName, ok := field.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(field)
- }
- protoFieldStr := string(protoFieldName)
- fd, found := o.typeDesc.FieldByName(protoFieldStr)
- if !found {
- return NewErr("no such field '%s'", field)
- }
- if fd.IsSet(o.value) {
- return True
- }
- return False
-}
-
-func (o *protoObj) Get(index ref.Val) ref.Val {
- protoFieldName, ok := index.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(index)
- }
- protoFieldStr := string(protoFieldName)
- fd, found := o.typeDesc.FieldByName(protoFieldStr)
- if !found {
- return NewErr("no such field '%s'", index)
- }
- fv, err := fd.GetFrom(o.value)
- if err != nil {
- return NewErr(err.Error())
- }
- return o.NativeToValue(fv)
-}
-
-func (o *protoObj) Type() ref.Type {
- return o.typeValue
-}
-
-func (o *protoObj) Value() interface{} {
- return o.value
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/overflow.go b/etcd/vendor/github.com/google/cel-go/common/types/overflow.go
deleted file mode 100644
index c68a921826..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/overflow.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "math"
- "time"
-)
-
-var (
- doubleTwoTo64 = math.Ldexp(1.0, 64)
-)
-
-// addInt64Checked performs addition with overflow detection of two int64 values.
-//
-// If the operation fails the error return value will be non-nil.
-func addInt64Checked(x, y int64) (int64, error) {
- if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) {
- return 0, errIntOverflow
- }
- return x + y, nil
-}
-
-// subtractInt64Checked performs subtraction with overflow detection of two int64 values.
-//
-// If the operation fails the error return value will be non-nil.
-func subtractInt64Checked(x, y int64) (int64, error) {
- if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) {
- return 0, errIntOverflow
- }
- return x - y, nil
-}
-
-// negateInt64Checked performs negation with overflow detection of an int64.
-//
-// If the operation fails the error return value will be non-nil.
-func negateInt64Checked(x int64) (int64, error) {
- // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
- if x == math.MinInt64 {
- return 0, errIntOverflow
- }
- return -x, nil
-}
-
-// multiplyInt64Checked performs multiplication with overflow detection of two int64 value.
-//
-// If the operation fails the error return value will be non-nil.
-func multiplyInt64Checked(x, y int64) (int64, error) {
- // Detecting multiplication overflow is more complicated than the others. The first two detect
- // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal
- // overflow conditions.
- if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) ||
- // x is positive, y is positive
- (x > 0 && y > 0 && x > math.MaxInt64/y) ||
- // x is positive, y is negative
- (x > 0 && y < 0 && y < math.MinInt64/x) ||
- // x is negative, y is positive
- (x < 0 && y > 0 && x < math.MinInt64/y) ||
- // x is negative, y is negative
- (x < 0 && y < 0 && y < math.MaxInt64/x) {
- return 0, errIntOverflow
- }
- return x * y, nil
-}
-
-// divideInt64Checked performs division with overflow detection of two int64 values,
-// as well as a division by zero check.
-//
-// If the operation fails the error return value will be non-nil.
-func divideInt64Checked(x, y int64) (int64, error) {
- // Division by zero.
- if y == 0 {
- return 0, errDivideByZero
- }
- // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
- if x == math.MinInt64 && y == -1 {
- return 0, errIntOverflow
- }
- return x / y, nil
-}
-
-// moduloInt64Checked performs modulo with overflow detection of two int64 values
-// as well as a modulus by zero check.
-//
-// If the operation fails the error return value will be non-nil.
-func moduloInt64Checked(x, y int64) (int64, error) {
- // Modulus by zero.
- if y == 0 {
- return 0, errModulusByZero
- }
- // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
- if x == math.MinInt64 && y == -1 {
- return 0, errIntOverflow
- }
- return x % y, nil
-}
-
-// addUint64Checked performs addition with overflow detection of two uint64 values.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func addUint64Checked(x, y uint64) (uint64, error) {
- if y > 0 && x > math.MaxUint64-y {
- return 0, errUintOverflow
- }
- return x + y, nil
-}
-
-// subtractUint64Checked performs subtraction with overflow detection of two uint64 values.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func subtractUint64Checked(x, y uint64) (uint64, error) {
- if y > x {
- return 0, errUintOverflow
- }
- return x - y, nil
-}
-
-// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func multiplyUint64Checked(x, y uint64) (uint64, error) {
- if y != 0 && x > math.MaxUint64/y {
- return 0, errUintOverflow
- }
- return x * y, nil
-}
-
-// divideUint64Checked performs division with a test for division by zero.
-//
-// If the operation fails the error return value will be non-nil.
-func divideUint64Checked(x, y uint64) (uint64, error) {
- if y == 0 {
- return 0, errDivideByZero
- }
- return x / y, nil
-}
-
-// moduloUint64Checked performs modulo with a test for modulus by zero.
-//
-// If the operation fails the error return value will be non-nil.
-func moduloUint64Checked(x, y uint64) (uint64, error) {
- if y == 0 {
- return 0, errModulusByZero
- }
- return x % y, nil
-}
-
-// addDurationChecked performs addition with overflow detection of two time.Durations.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func addDurationChecked(x, y time.Duration) (time.Duration, error) {
- val, err := addInt64Checked(int64(x), int64(y))
- if err != nil {
- return time.Duration(0), err
- }
- return time.Duration(val), nil
-}
-
-// subtractDurationChecked performs subtraction with overflow detection of two time.Durations.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func subtractDurationChecked(x, y time.Duration) (time.Duration, error) {
- val, err := subtractInt64Checked(int64(x), int64(y))
- if err != nil {
- return time.Duration(0), err
- }
- return time.Duration(val), nil
-}
-
-// negateDurationChecked performs negation with overflow detection of a time.Duration.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func negateDurationChecked(x time.Duration) (time.Duration, error) {
- val, err := negateInt64Checked(int64(x))
- if err != nil {
- return time.Duration(0), err
- }
- return time.Duration(val), nil
-}
-
-// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
- // This is tricky. A time is represented as (int64, int32) where the first is seconds and second
- // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64
- // as it could potentially overflow. The only way to proceed is to break time and duration into
- // second and nanosecond components.
-
- // First we break time into its components by truncating and subtracting.
- sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
- nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
-
- // Second we break duration into its components by dividing and modulo.
- sec2 := int64(y) / int64(time.Second) // Truncate to seconds.
- nsec2 := int64(y) % int64(time.Second) // Get remainder.
-
- // Add seconds first, detecting any overflow.
- sec, err := addInt64Checked(sec1, sec2)
- if err != nil {
- return time.Time{}, err
- }
- // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
- nsec := nsec1 + nsec2
-
- // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds.
- // Adapted from time.Unix(int64, int64).
- if nsec < 0 || nsec >= int64(time.Second) {
- // Add seconds.
- sec, err = addInt64Checked(sec, nsec/int64(time.Second))
- if err != nil {
- return time.Time{}, err
- }
-
- nsec -= (nsec / int64(time.Second)) * int64(time.Second)
- if nsec < 0 {
- // Subtract an extra second
- sec, err = addInt64Checked(sec, -1)
- if err != nil {
- return time.Time{}, err
- }
- nsec += int64(time.Second)
- }
- }
-
- // Check if the the number of seconds from Unix epoch is within our acceptable range.
- if sec < minUnixTime || sec > maxUnixTime {
- return time.Time{}, errTimestampOverflow
- }
-
- // Return resulting time and propagate time zone.
- return time.Unix(sec, nsec).In(x.Location()), nil
-}
-
-// subtractTimeChecked performs subtraction with overflow detection of two time.Time.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func subtractTimeChecked(x, y time.Time) (time.Duration, error) {
- // Similar to addTimeDurationOverflow() above.
-
- // First we break time into its components by truncating and subtracting.
- sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
- nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
-
- // Second we break duration into its components by truncating and subtracting.
- sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds.
- nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
-
- // Subtract seconds first, detecting any overflow.
- sec, err := subtractInt64Checked(sec1, sec2)
- if err != nil {
- return time.Duration(0), err
- }
-
- // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
- nsec := nsec1 - nsec2
-
- // Scale seconds to nanoseconds detecting overflow.
- tsec, err := multiplyInt64Checked(sec, int64(time.Second))
- if err != nil {
- return time.Duration(0), err
- }
-
- // Lastly we need to add the two nanoseconds together.
- val, err := addInt64Checked(tsec, nsec)
- if err != nil {
- return time.Duration(0), err
- }
-
- return time.Duration(val), nil
-}
-
-// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and
-// time.Duration.
-//
-// If the operation fails due to overflow the error return value will be non-nil.
-func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
- // The easiest way to implement this is to negate y and add them.
- // x - y = x + -y
- val, err := negateDurationChecked(y)
- if err != nil {
- return time.Time{}, err
- }
- return addTimeDurationChecked(x, val)
-}
-
-// doubleToInt64Checked converts a double to an int64 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func doubleToInt64Checked(v float64) (int64, error) {
- if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) {
- return 0, errIntOverflow
- }
- return int64(v), nil
-}
-
-// doubleToInt64Checked converts a double to a uint64 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func doubleToUint64Checked(v float64) (uint64, error) {
- if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 {
- return 0, errUintOverflow
- }
- return uint64(v), nil
-}
-
-// int64ToUint64Checked converts an int64 to a uint64 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func int64ToUint64Checked(v int64) (uint64, error) {
- if v < 0 {
- return 0, errUintOverflow
- }
- return uint64(v), nil
-}
-
-// int64ToInt32Checked converts an int64 to an int32 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func int64ToInt32Checked(v int64) (int32, error) {
- if v < math.MinInt32 || v > math.MaxInt32 {
- return 0, errIntOverflow
- }
- return int32(v), nil
-}
-
-// uint64ToUint32Checked converts a uint64 to a uint32 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func uint64ToUint32Checked(v uint64) (uint32, error) {
- if v > math.MaxUint32 {
- return 0, errUintOverflow
- }
- return uint32(v), nil
-}
-
-// uint64ToInt64Checked converts a uint64 to an int64 value.
-//
-// If the conversion fails due to overflow the error return value will be non-nil.
-func uint64ToInt64Checked(v uint64) (int64, error) {
- if v > math.MaxInt64 {
- return 0, errIntOverflow
- }
- return int64(v), nil
-}
-
-func doubleToUint64Lossless(v float64) (uint64, bool) {
- u, err := doubleToUint64Checked(v)
- if err != nil {
- return 0, false
- }
- if float64(u) != v {
- return 0, false
- }
- return u, true
-}
-
-func doubleToInt64Lossless(v float64) (int64, bool) {
- i, err := doubleToInt64Checked(v)
- if err != nil {
- return 0, false
- }
- if float64(i) != v {
- return 0, false
- }
- return i, true
-}
-
-func int64ToUint64Lossless(v int64) (uint64, bool) {
- u, err := int64ToUint64Checked(v)
- return u, err == nil
-}
-
-func uint64ToInt64Lossless(v uint64) (int64, bool) {
- i, err := uint64ToInt64Checked(v)
- return i, err == nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
deleted file mode 100644
index f23ac9c0e2..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
+++ /dev/null
@@ -1,53 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "checked.go",
- "enum.go",
- "equal.go",
- "file.go",
- "pb.go",
- "type.go",
- ],
- importpath = "github.com/google/cel-go/common/types/pb",
- deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//encoding/protowire:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
- "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
- "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
- "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
- "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "equal_test.go",
- "file_test.go",
- "pb_test.go",
- "type_test.go",
- ],
- embed = [":go_default_library"],
- deps = [
- "//checker/decls:go_default_library",
- "//test/proto2pb:test_all_types_go_proto",
- "//test/proto3pb:test_all_types_go_proto",
- "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
- "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
- "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/checked.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/checked.go
deleted file mode 100644
index 312a6a072f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/checked.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pb
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-var (
- // CheckedPrimitives map from proto field descriptor type to expr.Type.
- CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{
- protoreflect.BoolKind: checkedBool,
- protoreflect.BytesKind: checkedBytes,
- protoreflect.DoubleKind: checkedDouble,
- protoreflect.FloatKind: checkedDouble,
- protoreflect.Int32Kind: checkedInt,
- protoreflect.Int64Kind: checkedInt,
- protoreflect.Sint32Kind: checkedInt,
- protoreflect.Sint64Kind: checkedInt,
- protoreflect.Uint32Kind: checkedUint,
- protoreflect.Uint64Kind: checkedUint,
- protoreflect.Fixed32Kind: checkedUint,
- protoreflect.Fixed64Kind: checkedUint,
- protoreflect.Sfixed32Kind: checkedInt,
- protoreflect.Sfixed64Kind: checkedInt,
- protoreflect.StringKind: checkedString}
-
- // CheckedWellKnowns map from qualified proto type name to expr.Type for
- // well-known proto types.
- CheckedWellKnowns = map[string]*exprpb.Type{
- // Wrapper types.
- "google.protobuf.BoolValue": checkedWrap(checkedBool),
- "google.protobuf.BytesValue": checkedWrap(checkedBytes),
- "google.protobuf.DoubleValue": checkedWrap(checkedDouble),
- "google.protobuf.FloatValue": checkedWrap(checkedDouble),
- "google.protobuf.Int64Value": checkedWrap(checkedInt),
- "google.protobuf.Int32Value": checkedWrap(checkedInt),
- "google.protobuf.UInt64Value": checkedWrap(checkedUint),
- "google.protobuf.UInt32Value": checkedWrap(checkedUint),
- "google.protobuf.StringValue": checkedWrap(checkedString),
- // Well-known types.
- "google.protobuf.Any": checkedAny,
- "google.protobuf.Duration": checkedDuration,
- "google.protobuf.Timestamp": checkedTimestamp,
- // Json types.
- "google.protobuf.ListValue": checkedListDyn,
- "google.protobuf.NullValue": checkedNull,
- "google.protobuf.Struct": checkedMapStringDyn,
- "google.protobuf.Value": checkedDyn,
- }
-
- // common types
- checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
- // Wrapper and primitive types.
- checkedBool = checkedPrimitive(exprpb.Type_BOOL)
- checkedBytes = checkedPrimitive(exprpb.Type_BYTES)
- checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
- checkedInt = checkedPrimitive(exprpb.Type_INT64)
- checkedString = checkedPrimitive(exprpb.Type_STRING)
- checkedUint = checkedPrimitive(exprpb.Type_UINT64)
- // Well-known type equivalents.
- checkedAny = checkedWellKnown(exprpb.Type_ANY)
- checkedDuration = checkedWellKnown(exprpb.Type_DURATION)
- checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
- // Json-based type equivalents.
- checkedNull = &exprpb.Type{
- TypeKind: &exprpb.Type_Null{
- Null: structpb.NullValue_NULL_VALUE}}
- checkedListDyn = &exprpb.Type{
- TypeKind: &exprpb.Type_ListType_{
- ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
- checkedMapStringDyn = &exprpb.Type{
- TypeKind: &exprpb.Type_MapType_{
- MapType: &exprpb.Type_MapType{
- KeyType: checkedString,
- ValueType: checkedDyn}}}
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/enum.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/enum.go
deleted file mode 100644
index 4a26b5c7c3..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/enum.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pb
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// NewEnumValueDescription produces an enum value description with the fully qualified enum value
-// name and the enum value descriptor.
-func NewEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
- return &EnumValueDescription{
- enumValueName: name,
- desc: desc,
- }
-}
-
-// EnumValueDescription maps a fully-qualified enum value name to its numeric value.
-type EnumValueDescription struct {
- enumValueName string
- desc protoreflect.EnumValueDescriptor
-}
-
-// Name returns the fully-qualified identifier name for the enum value.
-func (ed *EnumValueDescription) Name() string {
- return ed.enumValueName
-}
-
-// Value returns the (numeric) value of the enum.
-func (ed *EnumValueDescription) Value() int32 {
- return int32(ed.desc.Number())
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/equal.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/equal.go
deleted file mode 100644
index 76893d85ea..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/equal.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pb
-
-import (
- "bytes"
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-// Equal returns whether two proto.Message instances are equal using the following criteria:
-//
-// - Messages must share the same instance of the type descriptor
-// - Known set fields are compared using semantics equality
-// - Bytes are compared using bytes.Equal
-// - Scalar values are compared with operator ==
-// - List and map types are equal if they have the same length and all elements are equal
-// - Messages are equal if they share the same descriptor and all set fields are equal
-// - Unknown fields are compared using byte equality
-// - NaN values are not equal to each other
-// - google.protobuf.Any values are unpacked before comparison
-// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than
-// semantic equality.
-//
-// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer
-// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods
-// behaviors which needed to treat NaN values as equal due to Java semantics.
-func Equal(x, y proto.Message) bool {
- if x == nil || y == nil {
- return x == nil && y == nil
- }
- xRef := x.ProtoReflect()
- yRef := y.ProtoReflect()
- return equalMessage(xRef, yRef)
-}
-
-func equalMessage(mx, my protoreflect.Message) bool {
- // Note, the original proto.Equal upon which this implementation is based does not specifically handle the
- // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise
- // comparison will be used, though the semantics of validity are neither clear, nor promised within the
- // proto.Equal implementation.
- if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() {
- return false
- }
-
- // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison
- // as otherwise the Any values are compared by bytes rather than structurally.
- if isAny(mx) && isAny(my) {
- ax := mx.Interface().(*anypb.Any)
- ay := my.Interface().(*anypb.Any)
- // If the values are not the same type url, return false.
- if ax.GetTypeUrl() != ay.GetTypeUrl() {
- return false
- }
- // If the values are byte equal, then return true.
- if bytes.Equal(ax.GetValue(), ay.GetValue()) {
- return true
- }
- // Otherwise fall through to the semantic comparison of the any values.
- x, err := ax.UnmarshalNew()
- if err != nil {
- return false
- }
- y, err := ay.UnmarshalNew()
- if err != nil {
- return false
- }
- // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly.
- return equalMessage(x.ProtoReflect(), y.ProtoReflect())
- }
-
- // Walk the set fields to determine field-wise equality
- nx := 0
- equal := true
- mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
- nx++
- equal = my.Has(fd) && equalField(fd, vx, my.Get(fd))
- return equal
- })
- if !equal {
- return false
- }
- // Establish the count of set fields on message y
- ny := 0
- my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
- ny++
- return true
- })
- // If the number of set fields is not equal return false.
- if nx != ny {
- return false
- }
-
- return equalUnknown(mx.GetUnknown(), my.GetUnknown())
-}
-
-func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
- switch {
- case fd.IsMap():
- return equalMap(fd, x.Map(), y.Map())
- case fd.IsList():
- return equalList(fd, x.List(), y.List())
- default:
- return equalValue(fd, x, y)
- }
-}
-
-func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
- if x.Len() != y.Len() {
- return false
- }
- equal := true
- x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
- vy := y.Get(k)
- equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
- return equal
- })
- return equal
-}
-
-func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
- if x.Len() != y.Len() {
- return false
- }
- for i := x.Len() - 1; i >= 0; i-- {
- if !equalValue(fd, x.Get(i), y.Get(i)) {
- return false
- }
- }
- return true
-}
-
-func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
- switch fd.Kind() {
- case protoreflect.BoolKind:
- return x.Bool() == y.Bool()
- case protoreflect.EnumKind:
- return x.Enum() == y.Enum()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind,
- protoreflect.Int64Kind, protoreflect.Sint64Kind,
- protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
- return x.Int() == y.Int()
- case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
- protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
- return x.Uint() == y.Uint()
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- return x.Float() == y.Float()
- case protoreflect.StringKind:
- return x.String() == y.String()
- case protoreflect.BytesKind:
- return bytes.Equal(x.Bytes(), y.Bytes())
- case protoreflect.MessageKind, protoreflect.GroupKind:
- return equalMessage(x.Message(), y.Message())
- default:
- return x.Interface() == y.Interface()
- }
-}
-
-func equalUnknown(x, y protoreflect.RawFields) bool {
- lenX := len(x)
- lenY := len(y)
- if lenX != lenY {
- return false
- }
- if lenX == 0 {
- return true
- }
- if bytes.Equal([]byte(x), []byte(y)) {
- return true
- }
-
- mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
- my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
- for len(x) > 0 {
- fnum, _, n := protowire.ConsumeField(x)
- mx[fnum] = append(mx[fnum], x[:n]...)
- x = x[n:]
- }
- for len(y) > 0 {
- fnum, _, n := protowire.ConsumeField(y)
- my[fnum] = append(my[fnum], y[:n]...)
- y = y[n:]
- }
- return reflect.DeepEqual(mx, my)
-}
-
-func isAny(m protoreflect.Message) bool {
- return string(m.Descriptor().FullName()) == "google.protobuf.Any"
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/file.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/file.go
deleted file mode 100644
index 0bcade75f9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/file.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pb
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// NewFileDescription returns a FileDescription instance with a complete listing of all the message
-// types and enum values declared within any scope in the file.
-func NewFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) *FileDescription {
- metadata := collectFileMetadata(fileDesc)
- enums := make(map[string]*EnumValueDescription)
- for name, enumVal := range metadata.enumValues {
- enums[name] = NewEnumValueDescription(name, enumVal)
- }
- types := make(map[string]*TypeDescription)
- for name, msgType := range metadata.msgTypes {
- types[name] = NewTypeDescription(name, msgType)
- }
- return &FileDescription{
- types: types,
- enums: enums,
- }
-}
-
-// FileDescription holds a map of all types and enum values declared within a proto file.
-type FileDescription struct {
- types map[string]*TypeDescription
- enums map[string]*EnumValueDescription
-}
-
-// GetEnumDescription returns an EnumDescription for a qualified enum value
-// name declared within the .proto file.
-func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
- ed, found := fd.enums[sanitizeProtoName(enumName)]
- return ed, found
-}
-
-// GetEnumNames returns the string names of all enum values in the file.
-func (fd *FileDescription) GetEnumNames() []string {
- enumNames := make([]string, len(fd.enums))
- i := 0
- for _, e := range fd.enums {
- enumNames[i] = e.Name()
- i++
- }
- return enumNames
-}
-
-// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name
-// declared within the .proto file.
-func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) {
- td, found := fd.types[sanitizeProtoName(typeName)]
- return td, found
-}
-
-// GetTypeNames returns the list of all type names contained within the file.
-func (fd *FileDescription) GetTypeNames() []string {
- typeNames := make([]string, len(fd.types))
- i := 0
- for _, t := range fd.types {
- typeNames[i] = t.Name()
- i++
- }
- return typeNames
-}
-
-// sanitizeProtoName strips the leading '.' from the proto message name.
-func sanitizeProtoName(name string) string {
- if name != "" && name[0] == '.' {
- return name[1:]
- }
- return name
-}
-
-// fileMetadata is a flattened view of message types and enum values within a file descriptor.
-type fileMetadata struct {
- // msgTypes maps from fully-qualified message name to descriptor.
- msgTypes map[string]protoreflect.MessageDescriptor
- // enumValues maps from fully-qualified enum value to enum value descriptor.
- enumValues map[string]protoreflect.EnumValueDescriptor
- // TODO: support enum type definitions for use in future type-check enhancements.
-}
-
-// collectFileMetadata traverses the proto file object graph to collect message types and enum
-// values and index them by their fully qualified names.
-func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
- msgTypes := make(map[string]protoreflect.MessageDescriptor)
- enumValues := make(map[string]protoreflect.EnumValueDescriptor)
- collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues)
- collectEnumValues(fileDesc.Enums(), enumValues)
- return &fileMetadata{
- msgTypes: msgTypes,
- enumValues: enumValues,
- }
-}
-
-// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
-// fully qualified protobuf names to descriptors.
-func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, msgTypeMap map[string]protoreflect.MessageDescriptor, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
- for i := 0; i < msgTypes.Len(); i++ {
- msgType := msgTypes.Get(i)
- msgTypeMap[string(msgType.FullName())] = msgType
- nestedMsgTypes := msgType.Messages()
- if nestedMsgTypes.Len() != 0 {
- collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap)
- }
- nestedEnumTypes := msgType.Enums()
- if nestedEnumTypes.Len() != 0 {
- collectEnumValues(nestedEnumTypes, enumValueMap)
- }
- }
-}
-
-// collectEnumValues accumulates the enum values within an enum declaration.
-func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
- for i := 0; i < enumTypes.Len(); i++ {
- enumType := enumTypes.Get(i)
- enumTypeValues := enumType.Values()
- for j := 0; j < enumTypeValues.Len(); j++ {
- enumValue := enumTypeValues.Get(j)
- enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name()))
- enumValueMap[enumValueName] = enumValue
- }
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/pb.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/pb.go
deleted file mode 100644
index 457b47ceee..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/pb.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package pb reflects over protocol buffer descriptors to generate objects
-// that simplify type, enum, and field lookup.
-package pb
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- durpb "google.golang.org/protobuf/types/known/durationpb"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- tspb "google.golang.org/protobuf/types/known/timestamppb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Db maps from file / message / enum name to file description.
-//
-// Each Db is isolated from each other, and while information about protobuf descriptors may be
-// fetched from the global protobuf registry, no descriptors are added to this registry, else
-// the isolation guarantees of the Db object would be violated.
-type Db struct {
- revFileDescriptorMap map[string]*FileDescription
- // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
- files []*FileDescription
-}
-
-var (
- // DefaultDb used at evaluation time or unless overridden at check time.
- DefaultDb = &Db{
- revFileDescriptorMap: make(map[string]*FileDescription),
- files: []*FileDescription{},
- }
-)
-
-// Merge will copy the source proto message into the destination, or error if the merge cannot be completed.
-//
-// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not
-// share the same instance of their type descriptor.
-func Merge(dstPB, srcPB proto.Message) error {
- src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect()
- if src.Descriptor() == dst.Descriptor() {
- proto.Merge(dstPB, srcPB)
- return nil
- }
- if src.Descriptor().FullName() != dst.Descriptor().FullName() {
- return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v",
- dst.Descriptor().FullName(), src.Descriptor().FullName())
- }
- bytes, err := proto.Marshal(srcPB)
- if err != nil {
- return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err)
- }
- err = proto.Unmarshal(bytes, dstPB)
- if err != nil {
- return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err)
- }
- return nil
-}
-
-// NewDb creates a new `pb.Db` with an empty type name to file description map.
-func NewDb() *Db {
- pbdb := &Db{
- revFileDescriptorMap: make(map[string]*FileDescription),
- files: []*FileDescription{},
- }
- // The FileDescription objects in the default db contain lazily initialized TypeDescription
- // values which may point to the state contained in the DefaultDb irrespective of this shallow
- // copy; however, the type graph for a field is idempotently computed, and is guaranteed to
- // only be initialized once thanks to atomic values within the TypeDescription objects, so it
- // is safe to share these values across instances.
- for k, v := range DefaultDb.revFileDescriptorMap {
- pbdb.revFileDescriptorMap[k] = v
- }
- pbdb.files = append(pbdb.files, DefaultDb.files...)
- return pbdb
-}
-
-// Copy creates a copy of the current database with its own internal descriptor mapping.
-func (pbdb *Db) Copy() *Db {
- copy := NewDb()
- for k, v := range pbdb.revFileDescriptorMap {
- copy.revFileDescriptorMap[k] = v
- }
- for _, f := range pbdb.files {
- hasFile := false
- for _, f2 := range copy.files {
- if f2 == f {
- hasFile = true
- }
- }
- if !hasFile {
- copy.files = append(copy.files, f)
- }
- }
- return copy
-}
-
-// FileDescriptions returns the set of file descriptions associated with this db.
-func (pbdb *Db) FileDescriptions() []*FileDescription {
- return pbdb.files
-}
-
-// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the
-// message and enum types into the `pb.Db`.
-func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) {
- fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()]
- if found {
- return fd, nil
- }
- // Make sure to search the global registry to see if a protoreflect.FileDescriptor for
- // the file specified has been linked into the binary. If so, use the copy of the descriptor
- // from the global cache.
- //
- // Note: Proto reflection relies on descriptor values being object equal rather than object
- // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto
- // will be incompatible with the FieldDescriptor in the global registry and any message created
- // from that global registry.
- globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path())
- if err == nil {
- fileDesc = globalFD
- }
- fd = NewFileDescription(fileDesc, pbdb)
- for _, enumValName := range fd.GetEnumNames() {
- pbdb.revFileDescriptorMap[enumValName] = fd
- }
- for _, msgTypeName := range fd.GetTypeNames() {
- pbdb.revFileDescriptorMap[msgTypeName] = fd
- }
- pbdb.revFileDescriptorMap[fileDesc.Path()] = fd
-
- // Return the specific file descriptor registered.
- pbdb.files = append(pbdb.files, fd)
- return fd, nil
-}
-
-// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all
-// other definitions within the message file into the `pb.Db`.
-func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) {
- msgDesc := message.ProtoReflect().Descriptor()
- msgName := msgDesc.FullName()
- typeName := sanitizeProtoName(string(msgName))
- if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
- return fd, nil
- }
- return pbdb.RegisterDescriptor(msgDesc.ParentFile())
-}
-
-// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the
-// `pb.Db`.
-func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) {
- enumName = sanitizeProtoName(enumName)
- if fd, found := pbdb.revFileDescriptorMap[enumName]; found {
- return fd.GetEnumDescription(enumName)
- }
- return nil, false
-}
-
-// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`.
-func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) {
- typeName = sanitizeProtoName(typeName)
- if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
- return fd.GetTypeDescription(typeName)
- }
- return nil, false
-}
-
-// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input
-// message is declared.
-func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor {
- fdMap := map[string]protoreflect.FileDescriptor{}
- parentFile := message.ProtoReflect().Descriptor().ParentFile()
- fdMap[parentFile.Path()] = parentFile
- // Initialize list of dependencies
- deps := make([]protoreflect.FileImport, parentFile.Imports().Len())
- for i := 0; i < parentFile.Imports().Len(); i++ {
- deps[i] = parentFile.Imports().Get(i)
- }
- // Expand list for new dependencies
- for i := 0; i < len(deps); i++ {
- dep := deps[i]
- if _, found := fdMap[dep.Path()]; found {
- continue
- }
- fdMap[dep.Path()] = dep.FileDescriptor
- for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ {
- deps = append(deps, dep.FileDescriptor.Imports().Get(j))
- }
- }
- return fdMap
-}
-
-func init() {
- // Describe well-known types to ensure they can always be resolved by the check and interpret
- // execution phases.
- //
- // The following subset of message types is enough to ensure that all well-known types can
- // resolved in the runtime, since describing the value results in describing the whole file
- // where the message is declared.
- DefaultDb.RegisterMessage(&anypb.Any{})
- DefaultDb.RegisterMessage(&durpb.Duration{})
- DefaultDb.RegisterMessage(&emptypb.Empty{})
- DefaultDb.RegisterMessage(&tspb.Timestamp{})
- DefaultDb.RegisterMessage(&structpb.Value{})
- DefaultDb.RegisterMessage(&wrapperspb.BoolValue{})
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/pb/type.go b/etcd/vendor/github.com/google/cel-go/common/types/pb/type.go
deleted file mode 100644
index 912076fa48..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/pb/type.go
+++ /dev/null
@@ -1,552 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pb
-
-import (
- "fmt"
- "reflect"
-
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- dynamicpb "google.golang.org/protobuf/types/dynamicpb"
- anypb "google.golang.org/protobuf/types/known/anypb"
- dpb "google.golang.org/protobuf/types/known/durationpb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- tpb "google.golang.org/protobuf/types/known/timestamppb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// description is a private interface used to make it convenient to perform type unwrapping at
-// the TypeDescription or FieldDescription level.
-type description interface {
- // Zero returns an empty immutable protobuf message when the description is a protobuf message
- // type.
- Zero() proto.Message
-}
-
-// NewTypeDescription produces a TypeDescription value for the fully-qualified proto type name
-// with a given descriptor.
-func NewTypeDescription(typeName string, desc protoreflect.MessageDescriptor) *TypeDescription {
- msgType := dynamicpb.NewMessageType(desc)
- msgZero := dynamicpb.NewMessage(desc)
- fieldMap := map[string]*FieldDescription{}
- fields := desc.Fields()
- for i := 0; i < fields.Len(); i++ {
- f := fields.Get(i)
- fieldMap[string(f.Name())] = NewFieldDescription(f)
- }
- return &TypeDescription{
- typeName: typeName,
- desc: desc,
- msgType: msgType,
- fieldMap: fieldMap,
- reflectType: reflectTypeOf(msgZero),
- zeroMsg: zeroValueOf(msgZero),
- }
-}
-
-// TypeDescription is a collection of type metadata relevant to expression
-// checking and evaluation.
-type TypeDescription struct {
- typeName string
- desc protoreflect.MessageDescriptor
- msgType protoreflect.MessageType
- fieldMap map[string]*FieldDescription
- reflectType reflect.Type
- zeroMsg proto.Message
-}
-
-// FieldMap returns a string field name to FieldDescription map.
-func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
- return td.fieldMap
-}
-
-// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
-func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
- fd, found := td.fieldMap[name]
- if !found {
- return nil, false
- }
- return fd, true
-}
-
-// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
-//
-// This method returns the unwrapped value and 'true', else the original value and 'false'.
-func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (interface{}, bool, error) {
- return unwrap(td, msg)
-}
-
-// Name returns the fully-qualified name of the type.
-func (td *TypeDescription) Name() string {
- return string(td.desc.FullName())
-}
-
-// New returns a mutable proto message
-func (td *TypeDescription) New() protoreflect.Message {
- return td.msgType.New()
-}
-
-// ReflectType returns the Golang reflect.Type for this type.
-func (td *TypeDescription) ReflectType() reflect.Type {
- return td.reflectType
-}
-
-// Zero returns the zero proto.Message value for this type.
-func (td *TypeDescription) Zero() proto.Message {
- return td.zeroMsg
-}
-
-// NewFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
-func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
- var reflectType reflect.Type
- var zeroMsg proto.Message
- switch fieldDesc.Kind() {
- case protoreflect.EnumKind:
- reflectType = reflectTypeOf(protoreflect.EnumNumber(0))
- case protoreflect.GroupKind, protoreflect.MessageKind:
- zeroMsg = dynamicpb.NewMessage(fieldDesc.Message())
- reflectType = reflectTypeOf(zeroMsg)
- default:
- reflectType = reflectTypeOf(fieldDesc.Default().Interface())
- if fieldDesc.IsList() {
- parentMsg := dynamicpb.NewMessage(fieldDesc.ContainingMessage())
- listField := parentMsg.NewField(fieldDesc).List()
- elem := listField.NewElement().Interface()
- switch elemType := elem.(type) {
- case protoreflect.Message:
- elem = elemType.Interface()
- }
- reflectType = reflectTypeOf(elem)
- }
- }
- // Ensure the list type is appropriately reflected as a Go-native list.
- if fieldDesc.IsList() {
- reflectType = reflect.SliceOf(reflectType)
- }
- var keyType, valType *FieldDescription
- if fieldDesc.IsMap() {
- keyType = NewFieldDescription(fieldDesc.MapKey())
- valType = NewFieldDescription(fieldDesc.MapValue())
- }
- return &FieldDescription{
- desc: fieldDesc,
- KeyType: keyType,
- ValueType: valType,
- reflectType: reflectType,
- zeroMsg: zeroValueOf(zeroMsg),
- }
-}
-
-// FieldDescription holds metadata related to fields declared within a type.
-type FieldDescription struct {
- // KeyType holds the key FieldDescription for map fields.
- KeyType *FieldDescription
- // ValueType holds the value FieldDescription for map fields.
- ValueType *FieldDescription
-
- desc protoreflect.FieldDescriptor
- reflectType reflect.Type
- zeroMsg proto.Message
-}
-
-// CheckedType returns the type-definition used at type-check time.
-func (fd *FieldDescription) CheckedType() *exprpb.Type {
- if fd.desc.IsMap() {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_MapType_{
- MapType: &exprpb.Type_MapType{
- KeyType: fd.KeyType.typeDefToType(),
- ValueType: fd.ValueType.typeDefToType(),
- },
- },
- }
- }
- if fd.desc.IsList() {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_ListType_{
- ListType: &exprpb.Type_ListType{
- ElemType: fd.typeDefToType()}}}
- }
- return fd.typeDefToType()
-}
-
-// Descriptor returns the protoreflect.FieldDescriptor for this type.
-func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
- return fd.desc
-}
-
-// IsSet returns whether the field is set on the target value, per the proto presence conventions
-// of proto2 or proto3 accordingly.
-//
-// This function implements the FieldType.IsSet function contract which can be used to operate on
-// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) IsSet(target interface{}) bool {
- switch v := target.(type) {
- case proto.Message:
- pbRef := v.ProtoReflect()
- pbDesc := pbRef.Descriptor()
- if pbDesc == fd.desc.ContainingMessage() {
- // When the target protobuf shares the same message descriptor instance as the field
- // descriptor, use the cached field descriptor value.
- return pbRef.Has(fd.desc)
- }
- // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
- // instance as an attempt to use the cached field descriptor will result in a panic.
- return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name())))
- default:
- return false
- }
-}
-
-// GetFrom returns the accessor method associated with the field on the proto generated struct.
-//
-// If the field is not set, the proto default value is returned instead.
-//
-// This function implements the FieldType.GetFrom function contract which can be used to operate
-// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) GetFrom(target interface{}) (interface{}, error) {
- v, ok := target.(proto.Message)
- if !ok {
- return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
- }
- pbRef := v.ProtoReflect()
- pbDesc := pbRef.Descriptor()
- var fieldVal interface{}
- if pbDesc == fd.desc.ContainingMessage() {
- // When the target protobuf shares the same message descriptor instance as the field
- // descriptor, use the cached field descriptor value.
- fieldVal = pbRef.Get(fd.desc).Interface()
- } else {
- // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
- // instance as an attempt to use the cached field descriptor will result in a panic.
- fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface()
- }
- switch fv := fieldVal.(type) {
- // Fast-path return for primitive types.
- case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List:
- return fv, nil
- case protoreflect.EnumNumber:
- return int64(fv), nil
- case protoreflect.Map:
- // Return a wrapper around the protobuf-reflected Map types which carries additional
- // information about the key and value definitions of the map.
- return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil
- case protoreflect.Message:
- // Make sure to unwrap well-known protobuf types before returning.
- unwrapped, _, err := fd.MaybeUnwrapDynamic(fv)
- return unwrapped, err
- default:
- return fv, nil
- }
-}
-
-// IsEnum returns true if the field type refers to an enum value.
-func (fd *FieldDescription) IsEnum() bool {
- return fd.desc.Kind() == protoreflect.EnumKind
-}
-
-// IsMap returns true if the field is of map type.
-func (fd *FieldDescription) IsMap() bool {
- return fd.desc.IsMap()
-}
-
-// IsMessage returns true if the field is of message type.
-func (fd *FieldDescription) IsMessage() bool {
- kind := fd.desc.Kind()
- return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind
-}
-
-// IsOneof returns true if the field is declared within a oneof block.
-func (fd *FieldDescription) IsOneof() bool {
- return fd.desc.ContainingOneof() != nil
-}
-
-// IsList returns true if the field is a repeated value.
-//
-// This method will also return true for map values, so check whether the
-// field is also a map.
-func (fd *FieldDescription) IsList() bool {
- return fd.desc.IsList()
-}
-
-// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the
-// value can be unwrapped to a more primitive CEL type.
-//
-// This function returns the unwrapped value and 'true' on success, or the original value
-// and 'false' otherwise.
-func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (interface{}, bool, error) {
- return unwrapDynamic(fd, msg)
-}
-
-// Name returns the CamelCase name of the field within the proto-based struct.
-func (fd *FieldDescription) Name() string {
- return string(fd.desc.Name())
-}
-
-// ReflectType returns the Golang reflect.Type for this field.
-func (fd *FieldDescription) ReflectType() reflect.Type {
- return fd.reflectType
-}
-
-// String returns the fully qualified name of the field within its type as well as whether the
-// field occurs within a oneof.
-func (fd *FieldDescription) String() string {
- return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof())
-}
-
-// Zero returns the zero value for the protobuf message represented by this field.
-//
-// If the field is not a proto.Message type, the zero value is nil.
-func (fd *FieldDescription) Zero() proto.Message {
- return fd.zeroMsg
-}
-
-func (fd *FieldDescription) typeDefToType() *exprpb.Type {
- if fd.desc.Kind() == protoreflect.MessageKind || fd.desc.Kind() == protoreflect.GroupKind {
- msgType := string(fd.desc.Message().FullName())
- if wk, found := CheckedWellKnowns[msgType]; found {
- return wk
- }
- return checkedMessageType(msgType)
- }
- if fd.desc.Kind() == protoreflect.EnumKind {
- return checkedInt
- }
- return CheckedPrimitives[fd.desc.Kind()]
-}
-
-// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in
-// retrieving individual elements within CEL value data types.
-type Map struct {
- protoreflect.Map
- KeyType *FieldDescription
- ValueType *FieldDescription
-}
-
-func checkedMessageType(name string) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_MessageType{MessageType: name}}
-}
-
-func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Primitive{Primitive: primitive}}
-}
-
-func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}}
-}
-
-func checkedWrap(t *exprpb.Type) *exprpb.Type {
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}}
-}
-
-// unwrap unwraps the provided proto.Message value, potentially based on the description if the
-// input message is a *dynamicpb.Message which obscures the typing information from Go.
-//
-// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrap(desc description, msg proto.Message) (interface{}, bool, error) {
- switch v := msg.(type) {
- case *anypb.Any:
- dynMsg, err := v.UnmarshalNew()
- if err != nil {
- return v, false, err
- }
- return unwrapDynamic(desc, dynMsg.ProtoReflect())
- case *dynamicpb.Message:
- return unwrapDynamic(desc, v)
- case *dpb.Duration:
- return v.AsDuration(), true, nil
- case *tpb.Timestamp:
- return v.AsTime(), true, nil
- case *structpb.Value:
- switch v.GetKind().(type) {
- case *structpb.Value_BoolValue:
- return v.GetBoolValue(), true, nil
- case *structpb.Value_ListValue:
- return v.GetListValue(), true, nil
- case *structpb.Value_NullValue:
- return structpb.NullValue_NULL_VALUE, true, nil
- case *structpb.Value_NumberValue:
- return v.GetNumberValue(), true, nil
- case *structpb.Value_StringValue:
- return v.GetStringValue(), true, nil
- case *structpb.Value_StructValue:
- return v.GetStructValue(), true, nil
- default:
- return structpb.NullValue_NULL_VALUE, true, nil
- }
- case *wrapperspb.BoolValue:
- return v.GetValue(), true, nil
- case *wrapperspb.BytesValue:
- return v.GetValue(), true, nil
- case *wrapperspb.DoubleValue:
- return v.GetValue(), true, nil
- case *wrapperspb.FloatValue:
- return float64(v.GetValue()), true, nil
- case *wrapperspb.Int32Value:
- return int64(v.GetValue()), true, nil
- case *wrapperspb.Int64Value:
- return v.GetValue(), true, nil
- case *wrapperspb.StringValue:
- return v.GetValue(), true, nil
- case *wrapperspb.UInt32Value:
- return uint64(v.GetValue()), true, nil
- case *wrapperspb.UInt64Value:
- return v.GetValue(), true, nil
- }
- return msg, false, nil
-}
-
-// unwrapDynamic unwraps a reflected protobuf Message value.
-//
-// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{}, bool, error) {
- msg := refMsg.Interface()
- if !refMsg.IsValid() {
- msg = desc.Zero()
- }
- // In order to ensure that these wrapped types match the expectations of the CEL type system
- // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value.
- typeName := string(refMsg.Descriptor().FullName())
- switch typeName {
- case "google.protobuf.Any":
- // Note, Any values require further unwrapping; however, this unwrapping may or may not
- // be to a well-known type. If the unwrapped value is a well-known type it will be further
- // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object
- // represented by the Any will be returned.
- unwrappedAny := &anypb.Any{}
- err := Merge(unwrappedAny, msg)
- if err != nil {
- return nil, false, err
- }
- dynMsg, err := unwrappedAny.UnmarshalNew()
- if err != nil {
- // Allow the error to move further up the stack as it should result in an type
- // conversion error if the caller does not recover it somehow.
- return nil, false, err
- }
- // Attempt to unwrap the dynamic type, otherwise return the dynamic message.
- unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect())
- if err == nil && nested {
- return unwrapped, true, nil
- }
- return dynMsg, true, err
- case "google.protobuf.BoolValue",
- "google.protobuf.BytesValue",
- "google.protobuf.DoubleValue",
- "google.protobuf.FloatValue",
- "google.protobuf.Int32Value",
- "google.protobuf.Int64Value",
- "google.protobuf.StringValue",
- "google.protobuf.UInt32Value",
- "google.protobuf.UInt64Value":
- // The msg value is ignored when dealing with wrapper types as they have a null or value
- // behavior, rather than the standard zero value behavior of other proto message types.
- if !refMsg.IsValid() {
- return structpb.NullValue_NULL_VALUE, true, nil
- }
- valueField := refMsg.Descriptor().Fields().ByName("value")
- return refMsg.Get(valueField).Interface(), true, nil
- case "google.protobuf.Duration":
- unwrapped := &dpb.Duration{}
- err := Merge(unwrapped, msg)
- if err != nil {
- return nil, false, err
- }
- return unwrapped.AsDuration(), true, nil
- case "google.protobuf.ListValue":
- unwrapped := &structpb.ListValue{}
- err := Merge(unwrapped, msg)
- if err != nil {
- return nil, false, err
- }
- return unwrapped, true, nil
- case "google.protobuf.NullValue":
- return structpb.NullValue_NULL_VALUE, true, nil
- case "google.protobuf.Struct":
- unwrapped := &structpb.Struct{}
- err := Merge(unwrapped, msg)
- if err != nil {
- return nil, false, err
- }
- return unwrapped, true, nil
- case "google.protobuf.Timestamp":
- unwrapped := &tpb.Timestamp{}
- err := Merge(unwrapped, msg)
- if err != nil {
- return nil, false, err
- }
- return unwrapped.AsTime(), true, nil
- case "google.protobuf.Value":
- unwrapped := &structpb.Value{}
- err := Merge(unwrapped, msg)
- if err != nil {
- return nil, false, err
- }
- return unwrap(desc, unwrapped)
- }
- return msg, false, nil
-}
-
-// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
-// well-known protobuf reflected types expected by the CEL type system.
-func reflectTypeOf(val interface{}) reflect.Type {
- switch v := val.(type) {
- case proto.Message:
- return reflect.TypeOf(zeroValueOf(v))
- default:
- return reflect.TypeOf(v)
- }
-}
-
-// zeroValueOf will return the strongest possible proto.Message representing the default protobuf
-// message value of the input msg type.
-func zeroValueOf(msg proto.Message) proto.Message {
- if msg == nil {
- return nil
- }
- typeName := string(msg.ProtoReflect().Descriptor().FullName())
- zeroVal, found := zeroValueMap[typeName]
- if found {
- return zeroVal
- }
- return msg
-}
-
-var (
- zeroValueMap = map[string]proto.Message{
- "google.protobuf.Any": &anypb.Any{},
- "google.protobuf.Duration": &dpb.Duration{},
- "google.protobuf.ListValue": &structpb.ListValue{},
- "google.protobuf.Struct": &structpb.Struct{},
- "google.protobuf.Timestamp": &tpb.Timestamp{},
- "google.protobuf.Value": &structpb.Value{},
- "google.protobuf.BoolValue": wrapperspb.Bool(false),
- "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}),
- "google.protobuf.DoubleValue": wrapperspb.Double(0.0),
- "google.protobuf.FloatValue": wrapperspb.Float(0.0),
- "google.protobuf.Int32Value": wrapperspb.Int32(0),
- "google.protobuf.Int64Value": wrapperspb.Int64(0),
- "google.protobuf.StringValue": wrapperspb.String(""),
- "google.protobuf.UInt32Value": wrapperspb.UInt32(0),
- "google.protobuf.UInt64Value": wrapperspb.UInt64(0),
- }
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/provider.go b/etcd/vendor/github.com/google/cel-go/common/types/provider.go
deleted file mode 100644
index 02087d14e3..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/provider.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- anypb "google.golang.org/protobuf/types/known/anypb"
- dpb "google.golang.org/protobuf/types/known/durationpb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- tpb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-type protoTypeRegistry struct {
- revTypeMap map[string]ref.Type
- pbdb *pb.Db
-}
-
-// NewRegistry accepts a list of proto message instances and returns a type
-// provider which can create new instances of the provided message or any
-// message that proto depends upon in its FileDescriptor.
-func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) {
- p := &protoTypeRegistry{
- revTypeMap: make(map[string]ref.Type),
- pbdb: pb.NewDb(),
- }
- err := p.RegisterType(
- BoolType,
- BytesType,
- DoubleType,
- DurationType,
- IntType,
- ListType,
- MapType,
- NullType,
- StringType,
- TimestampType,
- TypeType,
- UintType)
- if err != nil {
- return nil, err
- }
- // This block ensures that the well-known protobuf types are registered by default.
- for _, fd := range p.pbdb.FileDescriptions() {
- err = p.registerAllTypes(fd)
- if err != nil {
- return nil, err
- }
- }
- for _, msgType := range types {
- err = p.RegisterMessage(msgType)
- if err != nil {
- return nil, err
- }
- }
- return p, nil
-}
-
-// NewEmptyRegistry returns a registry which is completely unconfigured.
-func NewEmptyRegistry() ref.TypeRegistry {
- return &protoTypeRegistry{
- revTypeMap: make(map[string]ref.Type),
- pbdb: pb.NewDb(),
- }
-}
-
-// Copy implements the ref.TypeRegistry interface method which copies the current state of the
-// registry into its own memory space.
-func (p *protoTypeRegistry) Copy() ref.TypeRegistry {
- copy := &protoTypeRegistry{
- revTypeMap: make(map[string]ref.Type),
- pbdb: p.pbdb.Copy(),
- }
- for k, v := range p.revTypeMap {
- copy.revTypeMap[k] = v
- }
- return copy
-}
-
-func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val {
- enumVal, found := p.pbdb.DescribeEnum(enumName)
- if !found {
- return NewErr("unknown enum name '%s'", enumName)
- }
- return Int(enumVal.Value())
-}
-
-func (p *protoTypeRegistry) FindFieldType(messageType string,
- fieldName string) (*ref.FieldType, bool) {
- msgType, found := p.pbdb.DescribeType(messageType)
- if !found {
- return nil, false
- }
- field, found := msgType.FieldByName(fieldName)
- if !found {
- return nil, false
- }
- return &ref.FieldType{
- Type: field.CheckedType(),
- IsSet: field.IsSet,
- GetFrom: field.GetFrom},
- true
-}
-
-func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) {
- if t, found := p.revTypeMap[identName]; found {
- return t.(ref.Val), true
- }
- if enumVal, found := p.pbdb.DescribeEnum(identName); found {
- return Int(enumVal.Value()), true
- }
- return nil, false
-}
-
-func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) {
- if _, found := p.pbdb.DescribeType(typeName); !found {
- return nil, false
- }
- if typeName != "" && typeName[0] == '.' {
- typeName = typeName[1:]
- }
- return &exprpb.Type{
- TypeKind: &exprpb.Type_Type{
- Type: &exprpb.Type{
- TypeKind: &exprpb.Type_MessageType{
- MessageType: typeName}}}}, true
-}
-
-func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
- td, found := p.pbdb.DescribeType(typeName)
- if !found {
- return NewErr("unknown type '%s'", typeName)
- }
- msg := td.New()
- fieldMap := td.FieldMap()
- for name, value := range fields {
- field, found := fieldMap[name]
- if !found {
- return NewErr("no such field: %s", name)
- }
- err := msgSetField(msg, field, value)
- if err != nil {
- return &Err{err}
- }
- }
- return p.NativeToValue(msg.Interface())
-}
-
-func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error {
- fd, err := p.pbdb.RegisterDescriptor(fileDesc)
- if err != nil {
- return err
- }
- return p.registerAllTypes(fd)
-}
-
-func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error {
- fd, err := p.pbdb.RegisterMessage(message)
- if err != nil {
- return err
- }
- return p.registerAllTypes(fd)
-}
-
-func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error {
- for _, t := range types {
- p.revTypeMap[t.TypeName()] = t
- }
- // TODO: generate an error when the type name is registered more than once.
- return nil
-}
-
-// NativeToValue converts various "native" types to ref.Val with this specific implementation
-// providing support for custom proto-based types.
-//
-// This method should be the inverse of ref.Val.ConvertToNative.
-func (p *protoTypeRegistry) NativeToValue(value interface{}) ref.Val {
- if val, found := nativeToValue(p, value); found {
- return val
- }
- switch v := value.(type) {
- case proto.Message:
- typeName := string(v.ProtoReflect().Descriptor().FullName())
- td, found := p.pbdb.DescribeType(typeName)
- if !found {
- return NewErr("unknown type: '%s'", typeName)
- }
- unwrapped, isUnwrapped, err := td.MaybeUnwrap(v)
- if err != nil {
- return UnsupportedRefValConversionErr(v)
- }
- if isUnwrapped {
- return p.NativeToValue(unwrapped)
- }
- typeVal, found := p.FindIdent(typeName)
- if !found {
- return NewErr("unknown type: '%s'", typeName)
- }
- return NewObject(p, td, typeVal.(*TypeValue), v)
- case *pb.Map:
- return NewProtoMap(p, v)
- case protoreflect.List:
- return NewProtoList(p, v)
- case protoreflect.Message:
- return p.NativeToValue(v.Interface())
- case protoreflect.Value:
- return p.NativeToValue(v.Interface())
- }
- return UnsupportedRefValConversionErr(value)
-}
-
-func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error {
- for _, typeName := range fd.GetTypeNames() {
- err := p.RegisterType(NewObjectTypeValue(typeName))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// defaultTypeAdapter converts go native types to CEL values.
-type defaultTypeAdapter struct{}
-
-var (
- // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values.
- DefaultTypeAdapter = &defaultTypeAdapter{}
-)
-
-// NativeToValue implements the ref.TypeAdapter interface.
-func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
- if val, found := nativeToValue(a, value); found {
- return val
- }
- return UnsupportedRefValConversionErr(value)
-}
-
-// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
-// otherwise (nil, false)
-func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) {
- switch v := value.(type) {
- case nil:
- return NullValue, true
- case *Bool:
- if v != nil {
- return *v, true
- }
- case *Bytes:
- if v != nil {
- return *v, true
- }
- case *Double:
- if v != nil {
- return *v, true
- }
- case *Int:
- if v != nil {
- return *v, true
- }
- case *String:
- if v != nil {
- return *v, true
- }
- case *Uint:
- if v != nil {
- return *v, true
- }
- case bool:
- return Bool(v), true
- case int:
- return Int(v), true
- case int32:
- return Int(v), true
- case int64:
- return Int(v), true
- case uint:
- return Uint(v), true
- case uint32:
- return Uint(v), true
- case uint64:
- return Uint(v), true
- case float32:
- return Double(v), true
- case float64:
- return Double(v), true
- case string:
- return String(v), true
- case *dpb.Duration:
- return Duration{Duration: v.AsDuration()}, true
- case time.Duration:
- return Duration{Duration: v}, true
- case *tpb.Timestamp:
- return Timestamp{Time: v.AsTime()}, true
- case time.Time:
- return Timestamp{Time: v}, true
- case *bool:
- if v != nil {
- return Bool(*v), true
- }
- case *float32:
- if v != nil {
- return Double(*v), true
- }
- case *float64:
- if v != nil {
- return Double(*v), true
- }
- case *int:
- if v != nil {
- return Int(*v), true
- }
- case *int32:
- if v != nil {
- return Int(*v), true
- }
- case *int64:
- if v != nil {
- return Int(*v), true
- }
- case *string:
- if v != nil {
- return String(*v), true
- }
- case *uint:
- if v != nil {
- return Uint(*v), true
- }
- case *uint32:
- if v != nil {
- return Uint(*v), true
- }
- case *uint64:
- if v != nil {
- return Uint(*v), true
- }
- case []byte:
- return Bytes(v), true
- // specializations for common lists types.
- case []string:
- return NewStringList(a, v), true
- case []ref.Val:
- return NewRefValList(a, v), true
- // specializations for common map types.
- case map[string]string:
- return NewStringStringMap(a, v), true
- case map[string]interface{}:
- return NewStringInterfaceMap(a, v), true
- case map[ref.Val]ref.Val:
- return NewRefValMap(a, v), true
- // additional specializations may be added upon request / need.
- case *anypb.Any:
- if v == nil {
- return UnsupportedRefValConversionErr(v), true
- }
- unpackedAny, err := v.UnmarshalNew()
- if err != nil {
- return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true
- }
- return a.NativeToValue(unpackedAny), true
- case *structpb.NullValue, structpb.NullValue:
- return NullValue, true
- case *structpb.ListValue:
- return NewJSONList(a, v), true
- case *structpb.Struct:
- return NewJSONStruct(a, v), true
- case ref.Val:
- return v, true
- case protoreflect.EnumNumber:
- return Int(v), true
- case proto.Message:
- if v == nil {
- return UnsupportedRefValConversionErr(v), true
- }
- typeName := string(v.ProtoReflect().Descriptor().FullName())
- td, found := pb.DefaultDb.DescribeType(typeName)
- if !found {
- return nil, false
- }
- val, unwrapped, err := td.MaybeUnwrap(v)
- if err != nil {
- return UnsupportedRefValConversionErr(v), true
- }
- if !unwrapped {
- return nil, false
- }
- return a.NativeToValue(val), true
- // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces
- // which means that this case must appear after handling a proto.Message type.
- case protoreflect.Message:
- return a.NativeToValue(v.Interface()), true
- default:
- refValue := reflect.ValueOf(v)
- if refValue.Kind() == reflect.Ptr {
- if refValue.IsNil() {
- return UnsupportedRefValConversionErr(v), true
- }
- refValue = refValue.Elem()
- }
- refKind := refValue.Kind()
- switch refKind {
- case reflect.Array, reflect.Slice:
- return NewDynamicList(a, v), true
- case reflect.Map:
- return NewDynamicMap(a, v), true
- // type aliases of primitive types cannot be asserted as that type, but rather need
- // to be downcast to int32 before being converted to a CEL representation.
- case reflect.Int32:
- intType := reflect.TypeOf(int32(0))
- return Int(refValue.Convert(intType).Interface().(int32)), true
- case reflect.Int64:
- intType := reflect.TypeOf(int64(0))
- return Int(refValue.Convert(intType).Interface().(int64)), true
- case reflect.Uint32:
- uintType := reflect.TypeOf(uint32(0))
- return Uint(refValue.Convert(uintType).Interface().(uint32)), true
- case reflect.Uint64:
- uintType := reflect.TypeOf(uint64(0))
- return Uint(refValue.Convert(uintType).Interface().(uint64)), true
- case reflect.Float32:
- doubleType := reflect.TypeOf(float32(0))
- return Double(refValue.Convert(doubleType).Interface().(float32)), true
- case reflect.Float64:
- doubleType := reflect.TypeOf(float64(0))
- return Double(refValue.Convert(doubleType).Interface().(float64)), true
- }
- }
- return nil, false
-}
-
-func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error {
- if field.IsList() {
- lv := target.NewField(field.Descriptor())
- list, ok := val.(traits.Lister)
- if !ok {
- return unsupportedTypeConversionError(field, val)
- }
- err := msgSetListField(lv.List(), field, list)
- if err != nil {
- return err
- }
- target.Set(field.Descriptor(), lv)
- return nil
- }
- if field.IsMap() {
- mv := target.NewField(field.Descriptor())
- mp, ok := val.(traits.Mapper)
- if !ok {
- return unsupportedTypeConversionError(field, val)
- }
- err := msgSetMapField(mv.Map(), field, mp)
- if err != nil {
- return err
- }
- target.Set(field.Descriptor(), mv)
- return nil
- }
- v, err := val.ConvertToNative(field.ReflectType())
- if err != nil {
- return fieldTypeConversionError(field, err)
- }
- switch v.(type) {
- case proto.Message:
- v = v.(proto.Message).ProtoReflect()
- }
- target.Set(field.Descriptor(), protoreflect.ValueOf(v))
- return nil
-}
-
-func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error {
- elemReflectType := listField.ReflectType().Elem()
- for i := Int(0); i < listVal.Size().(Int); i++ {
- elem := listVal.Get(i)
- elemVal, err := elem.ConvertToNative(elemReflectType)
- if err != nil {
- return fieldTypeConversionError(listField, err)
- }
- switch ev := elemVal.(type) {
- case proto.Message:
- elemVal = ev.ProtoReflect()
- }
- target.Append(protoreflect.ValueOf(elemVal))
- }
- return nil
-}
-
-func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error {
- targetKeyType := mapField.KeyType.ReflectType()
- targetValType := mapField.ValueType.ReflectType()
- it := mapVal.Iterator()
- for it.HasNext() == True {
- key := it.Next()
- val := mapVal.Get(key)
- k, err := key.ConvertToNative(targetKeyType)
- if err != nil {
- return fieldTypeConversionError(mapField, err)
- }
- v, err := val.ConvertToNative(targetValType)
- if err != nil {
- return fieldTypeConversionError(mapField, err)
- }
- switch v.(type) {
- case proto.Message:
- v = v.(proto.Message).ProtoReflect()
- }
- target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
- }
- return nil
-}
-
-func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error {
- msgName := field.Descriptor().ContainingMessage().FullName()
- return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type())
-}
-
-func fieldTypeConversionError(field *pb.FieldDescription, err error) error {
- msgName := field.Descriptor().ContainingMessage().FullName()
- return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
deleted file mode 100644
index 1d0f468993..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
+++ /dev/null
@@ -1,20 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "provider.go",
- "reference.go",
- ],
- importpath = "github.com/google/cel-go/common/types/ref",
- deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/ref/provider.go b/etcd/vendor/github.com/google/cel-go/common/types/ref/provider.go
deleted file mode 100644
index 91a711fa70..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/ref/provider.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ref
-
-import (
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// TypeProvider specifies functions for creating new object instances and for
-// resolving enum values by name.
-type TypeProvider interface {
- // EnumValue returns the numeric value of the given enum value name.
- EnumValue(enumName string) Val
-
- // FindIdent takes a qualified identifier name and returns a Value if one
- // exists.
- FindIdent(identName string) (Val, bool)
-
- // FindType looks up the Type given a qualified typeName. Returns false
- // if not found.
- //
- // Used during type-checking only.
- FindType(typeName string) (*exprpb.Type, bool)
-
- // FieldFieldType returns the field type for a checked type value. Returns
- // false if the field could not be found.
- //
- // Used during type-checking only.
- FindFieldType(messageType string, fieldName string) (*FieldType, bool)
-
- // NewValue creates a new type value from a qualified name and map of field
- // name to value.
- //
- // Note, for each value, the Val.ConvertToNative function will be invoked
- // to convert the Val to the field's native type. If an error occurs during
- // conversion, the NewValue will be a types.Err.
- NewValue(typeName string, fields map[string]Val) Val
-}
-
-// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
-type TypeAdapter interface {
- // NativeToValue converts the input `value` to a CEL `ref.Val`.
- NativeToValue(value interface{}) Val
-}
-
-// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
-// implementations support type-customization, so these features are optional. However, a
-// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
-// which are registered can be converted to CEL representations.
-type TypeRegistry interface {
- TypeAdapter
- TypeProvider
-
- // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
- RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error
-
- // RegisterMessage registers a protocol buffer message and its dependencies.
- RegisterMessage(message proto.Message) error
-
- // RegisterType registers a type value with the provider which ensures the
- // provider is aware of how to map the type to an identifier.
- //
- // If a type is provided more than once with an alternative definition, the
- // call will result in an error.
- RegisterType(types ...Type) error
-
- // Copy the TypeRegistry and return a new registry whose mutable state is isolated.
- Copy() TypeRegistry
-}
-
-// FieldType represents a field's type value and whether that field supports
-// presence detection.
-type FieldType struct {
- // Type of the field.
- Type *exprpb.Type
-
- // IsSet indicates whether the field is set on an input object.
- IsSet FieldTester
-
- // GetFrom retrieves the field value on the input object, if set.
- GetFrom FieldGetter
-}
-
-// FieldTester is used to test field presence on an input object.
-type FieldTester func(target interface{}) bool
-
-// FieldGetter is used to get the field value from an input object, if set.
-type FieldGetter func(target interface{}) (interface{}, error)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/ref/reference.go b/etcd/vendor/github.com/google/cel-go/common/types/ref/reference.go
deleted file mode 100644
index 3098580c91..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/ref/reference.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ref contains the reference interfaces used throughout the types components.
-package ref
-
-import (
- "reflect"
-)
-
-// Type interface indicate the name of a given type.
-type Type interface {
- // HasTrait returns whether the type has a given trait associated with it.
- //
- // See common/types/traits/traits.go for a list of supported traits.
- HasTrait(trait int) bool
-
- // TypeName returns the qualified type name of the type.
- //
- // The type name is also used as the type's identifier name at type-check and interpretation time.
- TypeName() string
-}
-
-// Val interface defines the functions supported by all expression values.
-// Val implementations may specialize the behavior of the value through the addition of traits.
-type Val interface {
- // ConvertToNative converts the Value to a native Go struct according to the
- // reflected type description, or error if the conversion is not feasible.
- ConvertToNative(typeDesc reflect.Type) (interface{}, error)
-
- // ConvertToType supports type conversions between value types supported by the expression language.
- ConvertToType(typeValue Type) Val
-
- // Equal returns true if the `other` value has the same type and content as the implementing struct.
- Equal(other Val) Val
-
- // Type returns the TypeValue of the value.
- Type() Type
-
- // Value returns the raw value of the instance which may not be directly compatible with the expression
- // language types.
- Value() interface{}
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/string.go b/etcd/vendor/github.com/google/cel-go/common/types/string.go
deleted file mode 100644
index b6d665683c..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/string.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// String type implementation which supports addition, comparison, matching,
-// and size functions.
-type String string
-
-var (
- // StringType singleton.
- StringType = NewTypeValue("string",
- traits.AdderType,
- traits.ComparerType,
- traits.MatcherType,
- traits.ReceiverType,
- traits.SizerType)
-
- stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{
- overloads.Contains: stringContains,
- overloads.EndsWith: stringEndsWith,
- overloads.StartsWith: stringStartsWith,
- }
-
- stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{})
-)
-
-// Add implements traits.Adder.Add.
-func (s String) Add(other ref.Val) ref.Val {
- otherString, ok := other.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- return s + otherString
-}
-
-// Compare implements traits.Comparer.Compare.
-func (s String) Compare(other ref.Val) ref.Val {
- otherString, ok := other.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- return Int(strings.Compare(s.Value().(string), otherString.Value().(string)))
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (s String) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.String:
- if reflect.TypeOf(s).AssignableTo(typeDesc) {
- return s, nil
- }
- return s.Value(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped before being set on an Any field.
- return anypb.New(wrapperspb.String(string(s)))
- case jsonValueType:
- // Convert to a protobuf representation of a JSON String.
- return structpb.NewStringValue(string(s)), nil
- case stringWrapperType:
- // Convert to a wrapperspb.StringValue.
- return wrapperspb.String(string(s)), nil
- }
- if typeDesc.Elem().Kind() == reflect.String {
- p := s.Value().(string)
- return &p, nil
- }
- case reflect.Interface:
- sv := s.Value()
- if reflect.TypeOf(sv).Implements(typeDesc) {
- return sv, nil
- }
- if reflect.TypeOf(s).Implements(typeDesc) {
- return s, nil
- }
- }
- return nil, fmt.Errorf(
- "unsupported native conversion from string to '%v'", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (s String) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case IntType:
- if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil {
- return Int(n)
- }
- case UintType:
- if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil {
- return Uint(n)
- }
- case DoubleType:
- if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil {
- return Double(n)
- }
- case BoolType:
- if b, err := strconv.ParseBool(s.Value().(string)); err == nil {
- return Bool(b)
- }
- case BytesType:
- return Bytes(s)
- case DurationType:
- if d, err := time.ParseDuration(s.Value().(string)); err == nil {
- return durationOf(d)
- }
- case TimestampType:
- if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil {
- if t.Unix() < minUnixTime || t.Unix() > maxUnixTime {
- return celErrTimestampOverflow
- }
- return timestampOf(t)
- }
- case StringType:
- return s
- case TypeType:
- return StringType
- }
- return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal)
-}
-
-// Equal implements ref.Val.Equal.
-func (s String) Equal(other ref.Val) ref.Val {
- otherString, ok := other.(String)
- return Bool(ok && s == otherString)
-}
-
-// Match implements traits.Matcher.Match.
-func (s String) Match(pattern ref.Val) ref.Val {
- pat, ok := pattern.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(pattern)
- }
- matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string))
- if err != nil {
- return &Err{err}
- }
- return Bool(matched)
-}
-
-// Receive implements traits.Receiver.Receive.
-func (s String) Receive(function string, overload string, args []ref.Val) ref.Val {
- switch len(args) {
- case 1:
- if f, found := stringOneArgOverloads[function]; found {
- return f(s, args[0])
- }
- }
- return NoSuchOverloadErr()
-}
-
-// Size implements traits.Sizer.Size.
-func (s String) Size() ref.Val {
- return Int(len([]rune(s.Value().(string))))
-}
-
-// Type implements ref.Val.Type.
-func (s String) Type() ref.Type {
- return StringType
-}
-
-// Value implements ref.Val.Value.
-func (s String) Value() interface{} {
- return string(s)
-}
-
-func stringContains(s String, sub ref.Val) ref.Val {
- subStr, ok := sub.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(sub)
- }
- return Bool(strings.Contains(string(s), string(subStr)))
-}
-
-func stringEndsWith(s String, suf ref.Val) ref.Val {
- sufStr, ok := suf.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(suf)
- }
- return Bool(strings.HasSuffix(string(s), string(sufStr)))
-}
-
-func stringStartsWith(s String, pre ref.Val) ref.Val {
- preStr, ok := pre.(String)
- if !ok {
- return MaybeNoSuchOverloadErr(pre)
- }
- return Bool(strings.HasPrefix(string(s), string(preStr)))
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/timestamp.go b/etcd/vendor/github.com/google/cel-go/common/types/timestamp.go
deleted file mode 100644
index 7513a1b210..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/timestamp.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- tpb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// Timestamp type implementation which supports add, compare, and subtract
-// operations. Timestamps are also capable of participating in dynamic
-// function dispatch to instance methods.
-type Timestamp struct {
- time.Time
-}
-
-func timestampOf(t time.Time) Timestamp {
- // Note that this function does not validate that time.Time is in our supported range.
- return Timestamp{Time: t}
-}
-
-const (
- // The number of seconds between year 1 and year 1970. This is borrowed from
- // https://golang.org/src/time/time.go.
- unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24)
-
- // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch.
- minUnixTime int64 = -62135596800
- // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch.
- maxUnixTime int64 = 253402300799
-)
-
-var (
- // TimestampType singleton.
- TimestampType = NewTypeValue("google.protobuf.Timestamp",
- traits.AdderType,
- traits.ComparerType,
- traits.ReceiverType,
- traits.SubtractorType)
-)
-
-// Add implements traits.Adder.Add.
-func (t Timestamp) Add(other ref.Val) ref.Val {
- switch other.Type() {
- case DurationType:
- return other.(Duration).Add(t)
- }
- return MaybeNoSuchOverloadErr(other)
-}
-
-// Compare implements traits.Comparer.Compare.
-func (t Timestamp) Compare(other ref.Val) ref.Val {
- if TimestampType != other.Type() {
- return MaybeNoSuchOverloadErr(other)
- }
- ts1 := t.Time
- ts2 := other.(Timestamp).Time
- switch {
- case ts1.Before(ts2):
- return IntNegOne
- case ts1.After(ts2):
- return IntOne
- default:
- return IntZero
- }
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // If the timestamp is already assignable to the desired type return it.
- if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
- return t.Time, nil
- }
- if reflect.TypeOf(t).AssignableTo(typeDesc) {
- return t, nil
- }
- switch typeDesc {
- case anyValueType:
- // Pack the underlying time as a tpb.Timestamp into an Any value.
- return anypb.New(tpb.New(t.Time))
- case jsonValueType:
- // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON
- // string.
- v := t.ConvertToType(StringType)
- if IsError(v) {
- return nil, v.(*Err)
- }
- return structpb.NewStringValue(string(v.(String))), nil
- case timestampValueType:
- // Unwrap the underlying tpb.Timestamp.
- return tpb.New(t.Time), nil
- }
- return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case StringType:
- return String(t.Format(time.RFC3339Nano))
- case IntType:
- // Return the Unix time in seconds since 1970
- return Int(t.Unix())
- case TimestampType:
- return t
- case TypeType:
- return TimestampType
- }
- return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal)
-}
-
-// Equal implements ref.Val.Equal.
-func (t Timestamp) Equal(other ref.Val) ref.Val {
- otherTime, ok := other.(Timestamp)
- return Bool(ok && t.Time.Equal(otherTime.Time))
-}
-
-// Receive implements traits.Receiver.Receive.
-func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
- switch len(args) {
- case 0:
- if f, found := timestampZeroArgOverloads[function]; found {
- return f(t.Time)
- }
- case 1:
- if f, found := timestampOneArgOverloads[function]; found {
- return f(t.Time, args[0])
- }
- }
- return NoSuchOverloadErr()
-}
-
-// Subtract implements traits.Subtractor.Subtract.
-func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
- switch subtrahend.Type() {
- case DurationType:
- dur := subtrahend.(Duration)
- val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
- if err != nil {
- return wrapErr(err)
- }
- return timestampOf(val)
- case TimestampType:
- t2 := subtrahend.(Timestamp).Time
- val, err := subtractTimeChecked(t.Time, t2)
- if err != nil {
- return wrapErr(err)
- }
- return durationOf(val)
- }
- return MaybeNoSuchOverloadErr(subtrahend)
-}
-
-// Type implements ref.Val.Type.
-func (t Timestamp) Type() ref.Type {
- return TimestampType
-}
-
-// Value implements ref.Val.Value.
-func (t Timestamp) Value() interface{} {
- return t.Time
-}
-
-var (
- timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
-
- timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{
- overloads.TimeGetFullYear: timestampGetFullYear,
- overloads.TimeGetMonth: timestampGetMonth,
- overloads.TimeGetDayOfYear: timestampGetDayOfYear,
- overloads.TimeGetDate: timestampGetDayOfMonthOneBased,
- overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased,
- overloads.TimeGetDayOfWeek: timestampGetDayOfWeek,
- overloads.TimeGetHours: timestampGetHours,
- overloads.TimeGetMinutes: timestampGetMinutes,
- overloads.TimeGetSeconds: timestampGetSeconds,
- overloads.TimeGetMilliseconds: timestampGetMilliseconds}
-
- timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{
- overloads.TimeGetFullYear: timestampGetFullYearWithTz,
- overloads.TimeGetMonth: timestampGetMonthWithTz,
- overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz,
- overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz,
- overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz,
- overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz,
- overloads.TimeGetHours: timestampGetHoursWithTz,
- overloads.TimeGetMinutes: timestampGetMinutesWithTz,
- overloads.TimeGetSeconds: timestampGetSecondsWithTz,
- overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz}
-)
-
-type timestampVisitor func(time.Time) ref.Val
-
-func timestampGetFullYear(t time.Time) ref.Val {
- return Int(t.Year())
-}
-func timestampGetMonth(t time.Time) ref.Val {
- // CEL spec indicates that the month should be 0-based, but the Time value
- // for Month() is 1-based.
- return Int(t.Month() - 1)
-}
-func timestampGetDayOfYear(t time.Time) ref.Val {
- return Int(t.YearDay() - 1)
-}
-func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val {
- return Int(t.Day() - 1)
-}
-func timestampGetDayOfMonthOneBased(t time.Time) ref.Val {
- return Int(t.Day())
-}
-func timestampGetDayOfWeek(t time.Time) ref.Val {
- return Int(t.Weekday())
-}
-func timestampGetHours(t time.Time) ref.Val {
- return Int(t.Hour())
-}
-func timestampGetMinutes(t time.Time) ref.Val {
- return Int(t.Minute())
-}
-func timestampGetSeconds(t time.Time) ref.Val {
- return Int(t.Second())
-}
-func timestampGetMilliseconds(t time.Time) ref.Val {
- return Int(t.Nanosecond() / 1000000)
-}
-
-func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetFullYear)(t)
-}
-func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetMonth)(t)
-}
-func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetDayOfYear)(t)
-}
-func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetDayOfMonthZeroBased)(t)
-}
-func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetDayOfMonthOneBased)(t)
-}
-func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetDayOfWeek)(t)
-}
-func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetHours)(t)
-}
-func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetMinutes)(t)
-}
-func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetSeconds)(t)
-}
-func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val {
- return timeZone(tz, timestampGetMilliseconds)(t)
-}
-
-func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
- return func(t time.Time) ref.Val {
- if StringType != tz.Type() {
- return MaybeNoSuchOverloadErr(tz)
- }
- val := string(tz.(String))
- ind := strings.Index(val, ":")
- if ind == -1 {
- loc, err := time.LoadLocation(val)
- if err != nil {
- return wrapErr(err)
- }
- return visitor(t.In(loc))
- }
-
- // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
- // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
- hr, err := strconv.Atoi(string(val[0:ind]))
- if err != nil {
- return wrapErr(err)
- }
- min, err := strconv.Atoi(string(val[ind+1:]))
- if err != nil {
- return wrapErr(err)
- }
- var offset int
- if string(val[0]) == "-" {
- offset = hr*60 - min
- } else {
- offset = hr*60 + min
- }
- secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
- timezone := time.FixedZone("", secondsEastOfUTC)
- return visitor(t.In(timezone))
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
deleted file mode 100644
index 86e54af61a..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
+++ /dev/null
@@ -1,28 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "comparer.go",
- "container.go",
- "field_tester.go",
- "indexer.go",
- "iterator.go",
- "lister.go",
- "mapper.go",
- "matcher.go",
- "math.go",
- "receiver.go",
- "sizer.go",
- "traits.go",
- ],
- importpath = "github.com/google/cel-go/common/types/traits",
- deps = [
- "//common/types/ref:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/comparer.go
deleted file mode 100644
index b531d9ae2b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/comparer.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Comparer interface for ordering comparisons between values in order to
-// support '<', '<=', '>=', '>' overloads.
-type Comparer interface {
- // Compare this value to the input other value, returning an Int:
- //
- // this < other -> Int(-1)
- // this == other -> Int(0)
- // this > other -> Int(1)
- //
- // If the comparison cannot be made or is not supported, an error should
- // be returned.
- Compare(other ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/container.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/container.go
deleted file mode 100644
index cf5c621ae9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/container.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Container interface which permits containment tests such as 'a in b'.
-type Container interface {
- // Contains returns true if the value exists within the object.
- Contains(value ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
deleted file mode 100644
index 816a956523..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// FieldTester indicates if a defined field on an object type is set to a
-// non-default value.
-//
-// For use with the `has()` macro.
-type FieldTester interface {
- // IsSet returns true if the field is defined and set to a non-default
- // value. The method will return false if defined and not set, and an error
- // if the field is not defined.
- IsSet(field ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/indexer.go
deleted file mode 100644
index 662c6836c3..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/indexer.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Indexer permits random access of elements by index 'a[b()]'.
-type Indexer interface {
- // Get the value at the specified index or error.
- Get(index ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/iterator.go
deleted file mode 100644
index 42dd371aa4..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/iterator.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Iterable aggregate types permit traversal over their elements.
-type Iterable interface {
- // Iterator returns a new iterator view of the struct.
- Iterator() Iterator
-}
-
-// Iterator permits safe traversal over the contents of an aggregate type.
-type Iterator interface {
- ref.Val
-
- // HasNext returns true if there are unvisited elements in the Iterator.
- HasNext() ref.Val
-
- // Next returns the next element.
- Next() ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/lister.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/lister.go
deleted file mode 100644
index 5cf2593f3b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/lister.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Lister interface which aggregates the traits of a list.
-type Lister interface {
- ref.Val
- Adder
- Container
- Indexer
- Iterable
- Sizer
-}
-
-// MutableLister interface which emits an immutable result after an intermediate computation.
-type MutableLister interface {
- Lister
- ToImmutableList() Lister
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/mapper.go
deleted file mode 100644
index 2f7c919a8b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/mapper.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Mapper interface which aggregates the traits of a maps.
-type Mapper interface {
- ref.Val
- Container
- Indexer
- Iterable
- Sizer
-
- // Find returns a value, if one exists, for the input key.
- //
- // If the key is not found the function returns (nil, false).
- // If the input key is not valid for the map, or is Err or Unknown the function returns
- // (Unknown|Err, false).
- Find(key ref.Val) (ref.Val, bool)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/matcher.go
deleted file mode 100644
index 085dc94ff4..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/matcher.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Matcher interface for supporting 'matches()' overloads.
-type Matcher interface {
- // Match returns true if the pattern matches the current value.
- Match(pattern ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/math.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/math.go
deleted file mode 100644
index 86d5b9137e..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/math.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Adder interface to support '+' operator overloads.
-type Adder interface {
- // Add returns a combination of the current value and other value.
- //
- // If the other value is an unsupported type, an error is returned.
- Add(other ref.Val) ref.Val
-}
-
-// Divider interface to support '/' operator overloads.
-type Divider interface {
- // Divide returns the result of dividing the current value by the input
- // denominator.
- //
- // A denominator value of zero results in an error.
- Divide(denominator ref.Val) ref.Val
-}
-
-// Modder interface to support '%' operator overloads.
-type Modder interface {
- // Modulo returns the result of taking the modulus of the current value
- // by the denominator.
- //
- // A denominator value of zero results in an error.
- Modulo(denominator ref.Val) ref.Val
-}
-
-// Multiplier interface to support '*' operator overloads.
-type Multiplier interface {
- // Multiply returns the result of multiplying the current and input value.
- Multiply(other ref.Val) ref.Val
-}
-
-// Negater interface to support unary '-' and '!' operator overloads.
-type Negater interface {
- // Negate returns the complement of the current value.
- Negate() ref.Val
-}
-
-// Subtractor interface to support binary '-' operator overloads.
-type Subtractor interface {
- // Subtract returns the result of subtracting the input from the current
- // value.
- Subtract(subtrahend ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/receiver.go
deleted file mode 100644
index 8f41db45e8..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/receiver.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Receiver interface for routing instance method calls within a value.
-type Receiver interface {
- // Receive accepts a function name, overload id, and arguments and returns
- // a value.
- Receive(function string, overload string, args []ref.Val) ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/sizer.go
deleted file mode 100644
index b80d25137a..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/sizer.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package traits
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Sizer interface for supporting 'size()' overloads.
-type Sizer interface {
- // Size returns the number of elements or length of the value.
- Size() ref.Val
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/traits/traits.go b/etcd/vendor/github.com/google/cel-go/common/types/traits/traits.go
deleted file mode 100644
index 6da3e6a3e1..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/traits/traits.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package traits defines interfaces that a type may implement to participate
-// in operator overloads and function dispatch.
-package traits
-
-const (
- // AdderType types provide a '+' operator overload.
- AdderType = 1 << iota
-
- // ComparerType types support ordering comparisons '<', '<=', '>', '>='.
- ComparerType
-
- // ContainerType types support 'in' operations.
- ContainerType
-
- // DividerType types support '/' operations.
- DividerType
-
- // FieldTesterType types support the detection of field value presence.
- FieldTesterType
-
- // IndexerType types support index access with dynamic values.
- IndexerType
-
- // IterableType types can be iterated over in comprehensions.
- IterableType
-
- // IteratorType types support iterator semantics.
- IteratorType
-
- // MatcherType types support pattern matching via 'matches' method.
- MatcherType
-
- // ModderType types support modulus operations '%'
- ModderType
-
- // MultiplierType types support '*' operations.
- MultiplierType
-
- // NegatorType types support either negation via '!' or '-'
- NegatorType
-
- // ReceiverType types support dynamic dispatch to instance methods.
- ReceiverType
-
- // SizerType types support the size() method.
- SizerType
-
- // SubtractorType type support '-' operations.
- SubtractorType
-)
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/type.go b/etcd/vendor/github.com/google/cel-go/common/types/type.go
deleted file mode 100644
index 21160974bb..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/type.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-var (
- // TypeType is the type of a TypeValue.
- TypeType = NewTypeValue("type")
-)
-
-// TypeValue is an instance of a Value that describes a value's type.
-type TypeValue struct {
- name string
- traitMask int
-}
-
-// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val.
-func NewTypeValue(name string, traits ...int) *TypeValue {
- traitMask := 0
- for _, trait := range traits {
- traitMask |= trait
- }
- return &TypeValue{
- name: name,
- traitMask: traitMask}
-}
-
-// NewObjectTypeValue returns a *TypeValue based on the input name, which is
-// annotated with the traits relevant to all objects.
-func NewObjectTypeValue(name string) *TypeValue {
- return NewTypeValue(name,
- traits.FieldTesterType,
- traits.IndexerType)
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- // TODO: replace the internal type representation with a proto-value.
- return nil, fmt.Errorf("type conversion not supported for 'type'")
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case TypeType:
- return TypeType
- case StringType:
- return String(t.TypeName())
- }
- return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
-}
-
-// Equal implements ref.Val.Equal.
-func (t *TypeValue) Equal(other ref.Val) ref.Val {
- otherType, ok := other.(ref.Type)
- return Bool(ok && t.TypeName() == otherType.TypeName())
-}
-
-// HasTrait indicates whether the type supports the given trait.
-// Trait codes are defined in the traits package, e.g. see traits.AdderType.
-func (t *TypeValue) HasTrait(trait int) bool {
- return trait&t.traitMask == trait
-}
-
-// String implements fmt.Stringer.
-func (t *TypeValue) String() string {
- return t.name
-}
-
-// Type implements ref.Val.Type.
-func (t *TypeValue) Type() ref.Type {
- return TypeType
-}
-
-// TypeName gives the type's name as a string.
-func (t *TypeValue) TypeName() string {
- return t.name
-}
-
-// Value implements ref.Val.Value.
-func (t *TypeValue) Value() interface{} {
- return t.name
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/uint.go b/etcd/vendor/github.com/google/cel-go/common/types/uint.go
deleted file mode 100644
index ca266e0457..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/uint.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "fmt"
- "math"
- "reflect"
- "strconv"
-
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- anypb "google.golang.org/protobuf/types/known/anypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-)
-
-// Uint type implementation which supports comparison and math operators.
-type Uint uint64
-
-var (
- // UintType singleton.
- UintType = NewTypeValue("uint",
- traits.AdderType,
- traits.ComparerType,
- traits.DividerType,
- traits.ModderType,
- traits.MultiplierType,
- traits.SubtractorType)
-
- uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{})
-
- uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{})
-)
-
-// Uint constants
-const (
- uintZero = Uint(0)
-)
-
-// Add implements traits.Adder.Add.
-func (i Uint) Add(other ref.Val) ref.Val {
- otherUint, ok := other.(Uint)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := addUint64Checked(uint64(i), uint64(otherUint))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(val)
-}
-
-// Compare implements traits.Comparer.Compare.
-func (i Uint) Compare(other ref.Val) ref.Val {
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return NewErr("NaN values cannot be ordered")
- }
- return compareUintDouble(i, ov)
- case Int:
- return compareUintInt(i, ov)
- case Uint:
- return compareUint(i, ov)
- default:
- return MaybeNoSuchOverloadErr(other)
- }
-}
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Uint) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- switch typeDesc.Kind() {
- case reflect.Uint, reflect.Uint32:
- v, err := uint64ToUint32Checked(uint64(i))
- if err != nil {
- return 0, err
- }
- return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
- case reflect.Uint64:
- return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
- case reflect.Ptr:
- switch typeDesc {
- case anyValueType:
- // Primitives must be wrapped before being set on an Any field.
- return anypb.New(wrapperspb.UInt64(uint64(i)))
- case jsonValueType:
- // JSON can accurately represent 32-bit uints as floating point values.
- if i.isJSONSafe() {
- return structpb.NewNumberValue(float64(i)), nil
- }
- // Proto3 to JSON conversion requires string-formatted uint64 values
- // since the conversion to floating point would result in truncation.
- return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil
- case uint32WrapperType:
- // Convert the value to a wrapperspb.UInt32Value, error on overflow.
- v, err := uint64ToUint32Checked(uint64(i))
- if err != nil {
- return 0, err
- }
- return wrapperspb.UInt32(v), nil
- case uint64WrapperType:
- // Convert the value to a wrapperspb.UInt64Value.
- return wrapperspb.UInt64(uint64(i)), nil
- }
- switch typeDesc.Elem().Kind() {
- case reflect.Uint32:
- v, err := uint64ToUint32Checked(uint64(i))
- if err != nil {
- return 0, err
- }
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- case reflect.Uint64:
- v := uint64(i)
- p := reflect.New(typeDesc.Elem())
- p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
- return p.Interface(), nil
- }
- case reflect.Interface:
- iv := i.Value()
- if reflect.TypeOf(iv).Implements(typeDesc) {
- return iv, nil
- }
- if reflect.TypeOf(i).Implements(typeDesc) {
- return i, nil
- }
- }
- return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc)
-}
-
-// ConvertToType implements ref.Val.ConvertToType.
-func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
- switch typeVal {
- case IntType:
- v, err := uint64ToInt64Checked(uint64(i))
- if err != nil {
- return wrapErr(err)
- }
- return Int(v)
- case UintType:
- return i
- case DoubleType:
- return Double(i)
- case StringType:
- return String(fmt.Sprintf("%d", uint64(i)))
- case TypeType:
- return UintType
- }
- return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal)
-}
-
-// Divide implements traits.Divider.Divide.
-func (i Uint) Divide(other ref.Val) ref.Val {
- otherUint, ok := other.(Uint)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- div, err := divideUint64Checked(uint64(i), uint64(otherUint))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(div)
-}
-
-// Equal implements ref.Val.Equal.
-func (i Uint) Equal(other ref.Val) ref.Val {
- switch ov := other.(type) {
- case Double:
- if math.IsNaN(float64(ov)) {
- return False
- }
- return Bool(compareUintDouble(i, ov) == 0)
- case Int:
- return Bool(compareUintInt(i, ov) == 0)
- case Uint:
- return Bool(i == ov)
- default:
- return False
- }
-}
-
-// Modulo implements traits.Modder.Modulo.
-func (i Uint) Modulo(other ref.Val) ref.Val {
- otherUint, ok := other.(Uint)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(mod)
-}
-
-// Multiply implements traits.Multiplier.Multiply.
-func (i Uint) Multiply(other ref.Val) ref.Val {
- otherUint, ok := other.(Uint)
- if !ok {
- return MaybeNoSuchOverloadErr(other)
- }
- val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(val)
-}
-
-// Subtract implements traits.Subtractor.Subtract.
-func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
- subtraUint, ok := subtrahend.(Uint)
- if !ok {
- return MaybeNoSuchOverloadErr(subtrahend)
- }
- val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
- if err != nil {
- return wrapErr(err)
- }
- return Uint(val)
-}
-
-// Type implements ref.Val.Type.
-func (i Uint) Type() ref.Type {
- return UintType
-}
-
-// Value implements ref.Val.Value.
-func (i Uint) Value() interface{} {
- return uint64(i)
-}
-
-// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON.
-func (i Uint) isJSONSafe() bool {
- return i <= maxIntJSON
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/unknown.go b/etcd/vendor/github.com/google/cel-go/common/types/unknown.go
deleted file mode 100644
index 95b47426fd..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/unknown.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
-
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Unknown type implementation which collects expression ids which caused the
-// current value to become unknown.
-type Unknown []int64
-
-var (
- // UnknownType singleton.
- UnknownType = NewTypeValue("unknown")
-)
-
-// ConvertToNative implements ref.Val.ConvertToNative.
-func (u Unknown) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- return u.Value(), nil
-}
-
-// ConvertToType is an identity function since unknown values cannot be modified.
-func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val {
- return u
-}
-
-// Equal is an identity function since unknown values cannot be modified.
-func (u Unknown) Equal(other ref.Val) ref.Val {
- return u
-}
-
-// Type implements ref.Val.Type.
-func (u Unknown) Type() ref.Type {
- return UnknownType
-}
-
-// Value implements ref.Val.Value.
-func (u Unknown) Value() interface{} {
- return []int64(u)
-}
-
-// IsUnknown returns whether the element ref.Type or ref.Val is equal to the
-// UnknownType singleton.
-func IsUnknown(val ref.Val) bool {
- switch val.(type) {
- case Unknown:
- return true
- default:
- return false
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/common/types/util.go b/etcd/vendor/github.com/google/cel-go/common/types/util.go
deleted file mode 100644
index a8e9afa9e7..0000000000
--- a/etcd/vendor/github.com/google/cel-go/common/types/util.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType.
-func IsUnknownOrError(val ref.Val) bool {
- switch val.(type) {
- case Unknown, *Err:
- return true
- }
- return false
-}
-
-// IsPrimitiveType returns whether the input element ref.Val is a primitive type.
-// Note, primitive types do not include well-known types such as Duration and Timestamp.
-func IsPrimitiveType(val ref.Val) bool {
- switch val.Type() {
- case BoolType, BytesType, DoubleType, IntType, StringType, UintType:
- return true
- }
- return false
-}
-
-// Equal returns whether the two ref.Value are heterogeneously equivalent.
-func Equal(lhs ref.Val, rhs ref.Val) ref.Val {
- lNull := lhs == NullValue
- rNull := rhs == NullValue
- if lNull || rNull {
- return Bool(lNull == rNull)
- }
- return lhs.Equal(rhs)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/ext/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/ext/BUILD.bazel
deleted file mode 100644
index 9c2520b408..0000000000
--- a/etcd/vendor/github.com/google/cel-go/ext/BUILD.bazel
+++ /dev/null
@@ -1,36 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "encoders.go",
- "guards.go",
- "strings.go",
- ],
- importpath = "github.com/google/cel-go/ext",
- visibility = ["//visibility:public"],
- deps = [
- "//cel:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "encoders_test.go",
- "strings_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "//cel:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/ext/README.md b/etcd/vendor/github.com/google/cel-go/ext/README.md
deleted file mode 100644
index 5ddcc41510..0000000000
--- a/etcd/vendor/github.com/google/cel-go/ext/README.md
+++ /dev/null
@@ -1,194 +0,0 @@
-# Extensions
-
-CEL extensions are a related set of constants, functions, macros, or other
-features which may not be covered by the core CEL spec.
-
-## Encoders
-
-Encoding utilies for marshalling data into standardized representations.
-
-### Base64.Decode
-
-Decodes base64-encoded string to bytes.
-
-This function will return an error if the string input is not
-base64-encoded.
-
- base64.decode() ->
-
-Examples:
-
- base64.decode('aGVsbG8=') // return b'hello'
- base64.decode('aGVsbG8') // error
-
-### Base64.Encode
-
-Encodes bytes to a base64-encoded string.
-
- base64.encode() ->
-
-Example:
-
- base64.encode(b'hello') // return 'aGVsbG8='
-
-## Strings
-
-Extended functions for string manipulation. As a general note, all indices are
-zero-based.
-
-### CharAt
-
-Returns the character at the given position. If the position is negative, or
-greater than the length of the string, the function will produce an error:
-
- .charAt() ->
-
-Examples:
-
- 'hello'.charAt(4) // return 'o'
- 'hello'.charAt(5) // return ''
- 'hello'.charAt(-1) // error
-
-### IndexOf
-
-Returns the integer index of the first occurrence of the search string. If the
-search string is not found the function returns -1.
-
-The function also accepts an optional position from which to begin the
-substring search. If the substring is the empty string, the index where the
-search starts is returned (zero or custom).
-
- .indexOf() ->
- .indexOf(, ) ->
-
-Examples:
-
- 'hello mellow'.indexOf('') // returns 0
- 'hello mellow'.indexOf('ello') // returns 1
- 'hello mellow'.indexOf('jello') // returns -1
- 'hello mellow'.indexOf('', 2) // returns 2
- 'hello mellow'.indexOf('ello', 2) // returns 7
- 'hello mellow'.indexOf('ello', 20) // error
-
-### LastIndexOf
-
-Returns the integer index of the last occurrence of the search string. If the
-search string is not found the function returns -1.
-
-The function also accepts an optional position which represents the last index
-to be considered as the beginning of the substring match. If the substring is
-the empty string, the index where the search starts is returned (string length
-or custom).
-
- .lastIndexOf() ->
- .lastIndexOf(, ) ->
-
-Examples:
-
- 'hello mellow'.lastIndexOf('') // returns 12
- 'hello mellow'.lastIndexOf('ello') // returns 7
- 'hello mellow'.lastIndexOf('jello') // returns -1
- 'hello mellow'.lastIndexOf('ello', 6) // returns 1
- 'hello mellow'.lastIndexOf('ello', -1) // error
-
-### LowerAscii
-
-Returns a new string where all ASCII characters are lower-cased.
-
-This function does not perform Unicode case-mapping for characters outside the
-ASCII range.
-
- .lowerAscii() ->
-
-Examples:
-
- 'TacoCat'.lowerAscii() // returns 'tacocat'
- 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
-
-### Replace
-
-Returns a new string based on the target, which replaces the occurrences of a
-search string with a replacement string if present. The function accepts an
-optional limit on the number of substring replacements to be made.
-
-When the replacement limit is 0, the result is the original string. When the
-limit is a negative number, the function behaves the same as replace all.
-
- .replace(, ) ->
- .replace(, , ) ->
-
-Examples:
-
- 'hello hello'.replace('he', 'we') // returns 'wello wello'
- 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
- 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
- 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
-
-### Split
-
-Returns a list of strings split from the input by the given separator. The
-function accepts an optional argument specifying a limit on the number of
-substrings produced by the split.
-
-When the split limit is 0, the result is an empty list. When the limit is 1,
-the result is the target string to split. When the limit is a negative
-number, the function behaves the same as split all.
-
- .split() -> >
- .split(, ) -> >
-
-Examples:
-
- 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
- 'hello hello hello'.split(' ', 0) // returns []
- 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
- 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
- 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
-
-### Substring
-
-Returns the substring given a numeric range corresponding to character
-positions. Optionally may omit the trailing range for a substring from a given
-character position until the end of a string.
-
-Character offsets are 0-based with an inclusive start range and exclusive end
-range. It is an error to specify an end range that is lower than the start
-range, or for either the start or end index to be negative or exceed the string
-length.
-
- .substring() ->
- .substring(, ) ->
-
-Examples:
-
- 'tacocat'.substring(4) // returns 'cat'
- 'tacocat'.substring(0, 4) // returns 'taco'
- 'tacocat'.substring(-1) // error
- 'tacocat'.substring(2, 1) // error
-
-### Trim
-
-Returns a new string which removes the leading and trailing whitespace in the
-target string. The trim function uses the Unicode definition of whitespace
-which does not include the zero-width spaces. See:
-https://en.wikipedia.org/wiki/Whitespace_character#Unicode
-
- .trim() ->
-
-Examples:
-
- ' \ttrim\n '.trim() // returns 'trim'
-
-### UpperAscii
-
-Returns a new string where all ASCII characters are upper-cased.
-
-This function does not perform Unicode case-mapping for characters outside the
-ASCII range.
-
- .upperAscii() ->
-
-Examples:
-
- 'TacoCat'.upperAscii() // returns 'TACOCAT'
- 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
diff --git a/etcd/vendor/github.com/google/cel-go/ext/encoders.go b/etcd/vendor/github.com/google/cel-go/ext/encoders.go
deleted file mode 100644
index 22e38c39f9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/ext/encoders.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "encoding/base64"
- "reflect"
-
- "github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Encoders returns a cel.EnvOption to configure extended functions for string, byte, and object
-// encodings.
-//
-// Base64.Decode
-//
-// Decodes base64-encoded string to bytes.
-//
-// This function will return an error if the string input is not base64-encoded.
-//
-// base64.decode() ->
-//
-// Examples:
-//
-// base64.decode('aGVsbG8=') // return b'hello'
-// base64.decode('aGVsbG8') // error
-//
-// Base64.Encode
-//
-// Encodes bytes to a base64-encoded string.
-//
-// base64.encode() ->
-//
-// Examples:
-//
-// base64.encode(b'hello') // return b'aGVsbG8='
-func Encoders() cel.EnvOption {
- return cel.Lib(encoderLib{})
-}
-
-type encoderLib struct{}
-
-func (encoderLib) CompileOptions() []cel.EnvOption {
- return []cel.EnvOption{
- cel.Function("base64.decode",
- cel.Overload("base64_decode_string", []*cel.Type{cel.StringType}, cel.BytesType,
- cel.UnaryBinding(func(str ref.Val) ref.Val {
- s := str.(types.String)
- return bytesOrError(base64DecodeString(string(s)))
- }))),
- cel.Function("base64.encode",
- cel.Overload("base64_encode_bytes", []*cel.Type{cel.BytesType}, cel.StringType,
- cel.UnaryBinding(func(bytes ref.Val) ref.Val {
- b := bytes.(types.Bytes)
- return stringOrError(base64EncodeBytes([]byte(b)))
- }))),
- }
-}
-
-func (encoderLib) ProgramOptions() []cel.ProgramOption {
- return []cel.ProgramOption{}
-}
-
-func base64DecodeString(str string) ([]byte, error) {
- return base64.StdEncoding.DecodeString(str)
-}
-
-func base64EncodeBytes(bytes []byte) (string, error) {
- return base64.StdEncoding.EncodeToString(bytes), nil
-}
-
-var (
- bytesListType = reflect.TypeOf([]byte{})
-)
diff --git a/etcd/vendor/github.com/google/cel-go/ext/guards.go b/etcd/vendor/github.com/google/cel-go/ext/guards.go
deleted file mode 100644
index 0794f859b5..0000000000
--- a/etcd/vendor/github.com/google/cel-go/ext/guards.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// function invocation guards for common call signatures within extension functions.
-
-func intOrError(i int64, err error) ref.Val {
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Int(i)
-}
-
-func bytesOrError(bytes []byte, err error) ref.Val {
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.Bytes(bytes)
-}
-
-func stringOrError(str string, err error) ref.Val {
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.String(str)
-}
-
-func listStringOrError(strs []string, err error) ref.Val {
- if err != nil {
- return types.NewErr(err.Error())
- }
- return types.DefaultTypeAdapter.NativeToValue(strs)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/ext/strings.go b/etcd/vendor/github.com/google/cel-go/ext/strings.go
deleted file mode 100644
index 6ce239ac2b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/ext/strings.go
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ext contains CEL extension libraries where each library defines a related set of
-// constants, functions, macros, or other configuration settings which may not be covered by
-// the core CEL spec.
-package ext
-
-import (
- "fmt"
- "reflect"
- "strings"
- "unicode"
-
- "github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Strings returns a cel.EnvOption to configure extended functions for string manipulation.
-// As a general note, all indices are zero-based.
-//
-// CharAt
-//
-// Returns the character at the given position. If the position is negative, or greater than
-// the length of the string, the function will produce an error:
-//
-// .charAt() ->
-//
-// Examples:
-//
-// 'hello'.charAt(4) // return 'o'
-// 'hello'.charAt(5) // return ''
-// 'hello'.charAt(-1) // error
-//
-// IndexOf
-//
-// Returns the integer index of the first occurrence of the search string. If the search string is
-// not found the function returns -1.
-//
-// The function also accepts an optional position from which to begin the substring search. If the
-// substring is the empty string, the index where the search starts is returned (zero or custom).
-//
-// .indexOf() ->
-// .indexOf(, ) ->
-//
-// Examples:
-//
-// 'hello mellow'.indexOf('') // returns 0
-// 'hello mellow'.indexOf('ello') // returns 1
-// 'hello mellow'.indexOf('jello') // returns -1
-// 'hello mellow'.indexOf('', 2) // returns 2
-// 'hello mellow'.indexOf('ello', 2) // returns 7
-// 'hello mellow'.indexOf('ello', 20) // error
-//
-// Join
-//
-// Returns a new string where the elements of string list are concatenated.
-//
-// The function also accepts an optional separator which is placed between elements in the resulting string.
-//
-// >.join() ->
-// >.join() ->
-//
-// Examples:
-//
-// ['hello', 'mellow'].join() // returns 'hellomellow'
-// ['hello', 'mellow'].join(' ') // returns 'hello mellow'
-// [].join() // returns ''
-// [].join('/') // returns ''
-//
-// LastIndexOf
-//
-// Returns the integer index at the start of the last occurrence of the search string. If the
-// search string is not found the function returns -1.
-//
-// The function also accepts an optional position which represents the last index to be
-// considered as the beginning of the substring match. If the substring is the empty string,
-// the index where the search starts is returned (string length or custom).
-//
-// .lastIndexOf() ->
-// .lastIndexOf(, ) ->
-//
-// Examples:
-//
-// 'hello mellow'.lastIndexOf('') // returns 12
-// 'hello mellow'.lastIndexOf('ello') // returns 7
-// 'hello mellow'.lastIndexOf('jello') // returns -1
-// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
-// 'hello mellow'.lastIndexOf('ello', -1) // error
-//
-// LowerAscii
-//
-// Returns a new string where all ASCII characters are lower-cased.
-//
-// This function does not perform Unicode case-mapping for characters outside the ASCII range.
-//
-// .lowerAscii() ->
-//
-// Examples:
-//
-// 'TacoCat'.lowerAscii() // returns 'tacocat'
-// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
-//
-// Replace
-//
-// Returns a new string based on the target, which replaces the occurrences of a search string
-// with a replacement string if present. The function accepts an optional limit on the number of
-// substring replacements to be made.
-//
-// When the replacement limit is 0, the result is the original string. When the limit is a negative
-// number, the function behaves the same as replace all.
-//
-// .replace(, ) ->
-// .replace(, , ) ->
-//
-// Examples:
-//
-// 'hello hello'.replace('he', 'we') // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
-// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
-//
-// Split
-//
-// Returns a list of strings split from the input by the given separator. The function accepts
-// an optional argument specifying a limit on the number of substrings produced by the split.
-//
-// When the split limit is 0, the result is an empty list. When the limit is 1, the result is the
-// target string to split. When the limit is a negative number, the function behaves the same as
-// split all.
-//
-// .split() -> >
-// .split(, ) -> >
-//
-// Examples:
-//
-// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
-// 'hello hello hello'.split(' ', 0) // returns []
-// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
-// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
-// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
-//
-// Substring
-//
-// Returns the substring given a numeric range corresponding to character positions. Optionally
-// may omit the trailing range for a substring from a given character position until the end of
-// a string.
-//
-// Character offsets are 0-based with an inclusive start range and exclusive end range. It is an
-// error to specify an end range that is lower than the start range, or for either the start or end
-// index to be negative or exceed the string length.
-//
-// .substring() ->
-// .substring(, ) ->
-//
-// Examples:
-//
-// 'tacocat'.substring(4) // returns 'cat'
-// 'tacocat'.substring(0, 4) // returns 'taco'
-// 'tacocat'.substring(-1) // error
-// 'tacocat'.substring(2, 1) // error
-//
-// Trim
-//
-// Returns a new string which removes the leading and trailing whitespace in the target string.
-// The trim function uses the Unicode definition of whitespace which does not include the
-// zero-width spaces. See: https://en.wikipedia.org/wiki/Whitespace_character#Unicode
-//
-// .trim() ->
-//
-// Examples:
-//
-// ' \ttrim\n '.trim() // returns 'trim'
-//
-// UpperAscii
-//
-// Returns a new string where all ASCII characters are upper-cased.
-//
-// This function does not perform Unicode case-mapping for characters outside the ASCII range.
-//
-// .upperAscii() ->
-//
-// Examples:
-//
-// 'TacoCat'.upperAscii() // returns 'TACOCAT'
-// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
-func Strings() cel.EnvOption {
- return cel.Lib(stringLib{})
-}
-
-type stringLib struct{}
-
-func (stringLib) CompileOptions() []cel.EnvOption {
- return []cel.EnvOption{
- cel.Function("charAt",
- cel.MemberOverload("string_char_at_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType,
- cel.BinaryBinding(func(str, ind ref.Val) ref.Val {
- s := str.(types.String)
- i := ind.(types.Int)
- return stringOrError(charAt(string(s), int64(i)))
- }))),
- cel.Function("indexOf",
- cel.MemberOverload("string_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType,
- cel.BinaryBinding(func(str, substr ref.Val) ref.Val {
- s := str.(types.String)
- sub := substr.(types.String)
- return intOrError(indexOf(string(s), string(sub)))
- })),
- cel.MemberOverload("string_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType,
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- s := args[0].(types.String)
- sub := args[1].(types.String)
- offset := args[2].(types.Int)
- return intOrError(indexOfOffset(string(s), string(sub), int64(offset)))
- }))),
- cel.Function("lastIndexOf",
- cel.MemberOverload("string_last_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType,
- cel.BinaryBinding(func(str, substr ref.Val) ref.Val {
- s := str.(types.String)
- sub := substr.(types.String)
- return intOrError(lastIndexOf(string(s), string(sub)))
- })),
- cel.MemberOverload("string_last_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType,
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- s := args[0].(types.String)
- sub := args[1].(types.String)
- offset := args[2].(types.Int)
- return intOrError(lastIndexOfOffset(string(s), string(sub), int64(offset)))
- }))),
- cel.Function("lowerAscii",
- cel.MemberOverload("string_lower_ascii", []*cel.Type{cel.StringType}, cel.StringType,
- cel.UnaryBinding(func(str ref.Val) ref.Val {
- s := str.(types.String)
- return stringOrError(lowerASCII(string(s)))
- }))),
- cel.Function("replace",
- cel.MemberOverload(
- "string_replace_string_string", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.StringType,
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- str := args[0].(types.String)
- old := args[1].(types.String)
- new := args[2].(types.String)
- return stringOrError(replace(string(str), string(old), string(new)))
- })),
- cel.MemberOverload(
- "string_replace_string_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.StringType, cel.IntType}, cel.StringType,
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- str := args[0].(types.String)
- old := args[1].(types.String)
- new := args[2].(types.String)
- n := args[3].(types.Int)
- return stringOrError(replaceN(string(str), string(old), string(new), int64(n)))
- }))),
- cel.Function("split",
- cel.MemberOverload("string_split_string", []*cel.Type{cel.StringType, cel.StringType}, cel.ListType(cel.StringType),
- cel.BinaryBinding(func(str, separator ref.Val) ref.Val {
- s := str.(types.String)
- sep := separator.(types.String)
- return listStringOrError(split(string(s), string(sep)))
- })),
- cel.MemberOverload("string_split_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.ListType(cel.StringType),
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- s := args[0].(types.String)
- sep := args[1].(types.String)
- n := args[2].(types.Int)
- return listStringOrError(splitN(string(s), string(sep), int64(n)))
- }))),
- cel.Function("substring",
- cel.MemberOverload("string_substring_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType,
- cel.BinaryBinding(func(str, offset ref.Val) ref.Val {
- s := str.(types.String)
- off := offset.(types.Int)
- return stringOrError(substr(string(s), int64(off)))
- })),
- cel.MemberOverload("string_substring_int_int", []*cel.Type{cel.StringType, cel.IntType, cel.IntType}, cel.StringType,
- cel.FunctionBinding(func(args ...ref.Val) ref.Val {
- s := args[0].(types.String)
- start := args[1].(types.Int)
- end := args[2].(types.Int)
- return stringOrError(substrRange(string(s), int64(start), int64(end)))
- }))),
- cel.Function("trim",
- cel.MemberOverload("string_trim", []*cel.Type{cel.StringType}, cel.StringType,
- cel.UnaryBinding(func(str ref.Val) ref.Val {
- s := str.(types.String)
- return stringOrError(trimSpace(string(s)))
- }))),
- cel.Function("upperAscii",
- cel.MemberOverload("string_upper_ascii", []*cel.Type{cel.StringType}, cel.StringType,
- cel.UnaryBinding(func(str ref.Val) ref.Val {
- s := str.(types.String)
- return stringOrError(upperASCII(string(s)))
- }))),
- cel.Function("join",
- cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType,
- cel.UnaryBinding(func(list ref.Val) ref.Val {
- l, err := list.ConvertToNative(stringListType)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return stringOrError(join(l.([]string)))
- })),
- cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType,
- cel.BinaryBinding(func(list, delim ref.Val) ref.Val {
- l, err := list.ConvertToNative(stringListType)
- if err != nil {
- return types.NewErr(err.Error())
- }
- d := delim.(types.String)
- return stringOrError(joinSeparator(l.([]string), string(d)))
- }))),
- }
-}
-
-func (stringLib) ProgramOptions() []cel.ProgramOption {
- return []cel.ProgramOption{}
-}
-
-func charAt(str string, ind int64) (string, error) {
- i := int(ind)
- runes := []rune(str)
- if i < 0 || i > len(runes) {
- return "", fmt.Errorf("index out of range: %d", ind)
- }
- if i == len(runes) {
- return "", nil
- }
- return string(runes[i]), nil
-}
-
-func indexOf(str, substr string) (int64, error) {
- return indexOfOffset(str, substr, int64(0))
-}
-
-func indexOfOffset(str, substr string, offset int64) (int64, error) {
- if substr == "" {
- return offset, nil
- }
- off := int(offset)
- runes := []rune(str)
- subrunes := []rune(substr)
- if off < 0 || off >= len(runes) {
- return -1, fmt.Errorf("index out of range: %d", off)
- }
- for i := off; i < len(runes)-(len(subrunes)-1); i++ {
- found := true
- for j := 0; j < len(subrunes); j++ {
- if runes[i+j] != subrunes[j] {
- found = false
- break
- }
- }
- if found {
- return int64(i), nil
- }
- }
- return -1, nil
-}
-
-func lastIndexOf(str, substr string) (int64, error) {
- runes := []rune(str)
- if substr == "" {
- return int64(len(runes)), nil
- }
- return lastIndexOfOffset(str, substr, int64(len(runes)-1))
-}
-
-func lastIndexOfOffset(str, substr string, offset int64) (int64, error) {
- if substr == "" {
- return offset, nil
- }
- off := int(offset)
- runes := []rune(str)
- subrunes := []rune(substr)
- if off < 0 || off >= len(runes) {
- return -1, fmt.Errorf("index out of range: %d", off)
- }
- if off > len(runes)-len(subrunes) {
- off = len(runes) - len(subrunes)
- }
- for i := off; i >= 0; i-- {
- found := true
- for j := 0; j < len(subrunes); j++ {
- if runes[i+j] != subrunes[j] {
- found = false
- break
- }
- }
- if found {
- return int64(i), nil
- }
- }
- return -1, nil
-}
-
-func lowerASCII(str string) (string, error) {
- runes := []rune(str)
- for i, r := range runes {
- if r <= unicode.MaxASCII {
- r = unicode.ToLower(r)
- runes[i] = r
- }
- }
- return string(runes), nil
-}
-
-func replace(str, old, new string) (string, error) {
- return strings.ReplaceAll(str, old, new), nil
-}
-
-func replaceN(str, old, new string, n int64) (string, error) {
- return strings.Replace(str, old, new, int(n)), nil
-}
-
-func split(str, sep string) ([]string, error) {
- return strings.Split(str, sep), nil
-}
-
-func splitN(str, sep string, n int64) ([]string, error) {
- return strings.SplitN(str, sep, int(n)), nil
-}
-
-func substr(str string, start int64) (string, error) {
- runes := []rune(str)
- if int(start) < 0 || int(start) > len(runes) {
- return "", fmt.Errorf("index out of range: %d", start)
- }
- return string(runes[start:]), nil
-}
-
-func substrRange(str string, start, end int64) (string, error) {
- runes := []rune(str)
- l := len(runes)
- if start > end {
- return "", fmt.Errorf("invalid substring range. start: %d, end: %d", start, end)
- }
- if int(start) < 0 || int(start) > l {
- return "", fmt.Errorf("index out of range: %d", start)
- }
- if int(end) < 0 || int(end) > l {
- return "", fmt.Errorf("index out of range: %d", end)
- }
- return string(runes[int(start):int(end)]), nil
-}
-
-func trimSpace(str string) (string, error) {
- return strings.TrimSpace(str), nil
-}
-
-func upperASCII(str string) (string, error) {
- runes := []rune(str)
- for i, r := range runes {
- if r <= unicode.MaxASCII {
- r = unicode.ToUpper(r)
- runes[i] = r
- }
- }
- return string(runes), nil
-}
-
-func joinSeparator(strs []string, separator string) (string, error) {
- return strings.Join(strs, separator), nil
-}
-
-func join(strs []string) (string, error) {
- return strings.Join(strs, ""), nil
-}
-
-var (
- stringListType = reflect.TypeOf([]string{})
-)
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
deleted file mode 100644
index 04a3ec7441..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
+++ /dev/null
@@ -1,72 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "activation.go",
- "attribute_patterns.go",
- "attributes.go",
- "coster.go",
- "decorators.go",
- "dispatcher.go",
- "evalstate.go",
- "interpretable.go",
- "interpreter.go",
- "optimizations.go",
- "planner.go",
- "prune.go",
- "runtimecost.go",
- ],
- importpath = "github.com/google/cel-go/interpreter",
- deps = [
- "//common:go_default_library",
- "//common/containers:go_default_library",
- "//common/operators:go_default_library",
- "//common/overloads:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
- "//common/types/traits:go_default_library",
- "//interpreter/functions:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
- "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- srcs = [
- "activation_test.go",
- "attribute_patterns_test.go",
- "attributes_test.go",
- "interpreter_test.go",
- "prune_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "//checker:go_default_library",
- "//checker/decls:go_default_library",
- "//common/containers:go_default_library",
- "//common/debug:go_default_library",
- "//common/operators:go_default_library",
- "//common/types:go_default_library",
- "//interpreter/functions:go_default_library",
- "//parser:go_default_library",
- "//test:go_default_library",
- "//test/proto2pb:go_default_library",
- "//test/proto3pb:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/anypb:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/activation.go b/etcd/vendor/github.com/google/cel-go/interpreter/activation.go
deleted file mode 100644
index 8686d4f04f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/activation.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "github.com/google/cel-go/common/types/ref"
-)
-
-// Activation used to resolve identifiers by name and references by id.
-//
-// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
-type Activation interface {
- // ResolveName returns a value from the activation by qualified name, or false if the name
- // could not be found.
- ResolveName(name string) (interface{}, bool)
-
- // Parent returns the parent of the current activation, may be nil.
- // If non-nil, the parent will be searched during resolve calls.
- Parent() Activation
-}
-
-// EmptyActivation returns a variable-free activation.
-func EmptyActivation() Activation {
- return emptyActivation{}
-}
-
-// emptyActivation is a variable-free activation.
-type emptyActivation struct{}
-
-func (emptyActivation) ResolveName(string) (interface{}, bool) { return nil, false }
-func (emptyActivation) Parent() Activation { return nil }
-
-// NewActivation returns an activation based on a map-based binding where the map keys are
-// expected to be qualified names used with ResolveName calls.
-//
-// The input `bindings` may either be of type `Activation` or `map[string]interface{}`.
-//
-// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
-// - func() ref.Val
-//
-// The output of the lazy binding will overwrite the variable reference in the internal map.
-//
-// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
-// the ref.TypeAdapter configured in the environment.
-func NewActivation(bindings interface{}) (Activation, error) {
- if bindings == nil {
- return nil, errors.New("bindings must be non-nil")
- }
- a, isActivation := bindings.(Activation)
- if isActivation {
- return a, nil
- }
- m, isMap := bindings.(map[string]interface{})
- if !isMap {
- return nil, fmt.Errorf(
- "activation input must be an activation or map[string]interface: got %T",
- bindings)
- }
- return &mapActivation{bindings: m}, nil
-}
-
-// mapActivation which implements Activation and maps of named values.
-//
-// Named bindings may lazily supply values by providing a function which accepts no arguments and
-// produces an interface value.
-type mapActivation struct {
- bindings map[string]interface{}
-}
-
-// Parent implements the Activation interface method.
-func (a *mapActivation) Parent() Activation {
- return nil
-}
-
-// ResolveName implements the Activation interface method.
-func (a *mapActivation) ResolveName(name string) (interface{}, bool) {
- obj, found := a.bindings[name]
- if !found {
- return nil, false
- }
- fn, isLazy := obj.(func() ref.Val)
- if isLazy {
- obj = fn()
- a.bindings[name] = obj
- }
- fnRaw, isLazy := obj.(func() interface{})
- if isLazy {
- obj = fnRaw()
- a.bindings[name] = obj
- }
- return obj, found
-}
-
-// hierarchicalActivation which implements Activation and contains a parent and
-// child activation.
-type hierarchicalActivation struct {
- parent Activation
- child Activation
-}
-
-// Parent implements the Activation interface method.
-func (a *hierarchicalActivation) Parent() Activation {
- return a.parent
-}
-
-// ResolveName implements the Activation interface method.
-func (a *hierarchicalActivation) ResolveName(name string) (interface{}, bool) {
- if object, found := a.child.ResolveName(name); found {
- return object, found
- }
- return a.parent.ResolveName(name)
-}
-
-// NewHierarchicalActivation takes two activations and produces a new one which prioritizes
-// resolution in the child first and parent(s) second.
-func NewHierarchicalActivation(parent Activation, child Activation) Activation {
- return &hierarchicalActivation{parent, child}
-}
-
-// NewPartialActivation returns an Activation which contains a list of AttributePattern values
-// representing field and index operations that should result in a 'types.Unknown' result.
-//
-// The `bindings` value may be any value type supported by the interpreter.NewActivation call,
-// but is typically either an existing Activation or map[string]interface{}.
-func NewPartialActivation(bindings interface{},
- unknowns ...*AttributePattern) (PartialActivation, error) {
- a, err := NewActivation(bindings)
- if err != nil {
- return nil, err
- }
- return &partActivation{Activation: a, unknowns: unknowns}, nil
-}
-
-// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns.
-type PartialActivation interface {
- Activation
-
- // UnknownAttributePaths returns a set of AttributePattern values which match Attribute
- // expressions for data accesses whose values are not yet known.
- UnknownAttributePatterns() []*AttributePattern
-}
-
-// partActivation is the default implementations of the PartialActivation interface.
-type partActivation struct {
- Activation
- unknowns []*AttributePattern
-}
-
-// UnknownAttributePatterns implements the PartialActivation interface method.
-func (a *partActivation) UnknownAttributePatterns() []*AttributePattern {
- return a.unknowns
-}
-
-// varActivation represents a single mutable variable binding.
-//
-// This activation type should only be used within folds as the fold loop controls the object
-// life-cycle.
-type varActivation struct {
- parent Activation
- name string
- val ref.Val
-}
-
-// Parent implements the Activation interface method.
-func (v *varActivation) Parent() Activation {
- return v.parent
-}
-
-// ResolveName implements the Activation interface method.
-func (v *varActivation) ResolveName(name string) (interface{}, bool) {
- if name == v.name {
- return v.val, true
- }
- return v.parent.ResolveName(name)
-}
-
-var (
- // pool of var activations to reduce allocations during folds.
- varActivationPool = &sync.Pool{
- New: func() interface{} {
- return &varActivation{}
- },
- }
-)
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/etcd/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
deleted file mode 100644
index b33f7f7fd9..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
+++ /dev/null
@@ -1,404 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "fmt"
-
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// AttributePattern represents a top-level variable with an optional set of qualifier patterns.
-//
-// When using a CEL expression within a container, e.g. a package or namespace, the variable name
-// in the pattern must match the qualified name produced during the variable namespace resolution.
-// For example, if variable `c` appears in an expression whose container is `a.b`, the variable
-// name supplied to the pattern must be `a.b.c`
-//
-// The qualifier patterns for attribute matching must be one of the following:
-//
-// - valid map key type: string, int, uint, bool
-// - wildcard (*)
-//
-// Examples:
-//
-// 1. ns.myvar["complex-value"]
-// 2. ns.myvar["complex-value"][0]
-// 3. ns.myvar["complex-value"].*.name
-//
-// The first example is simple: match an attribute where the variable is 'ns.myvar' with a
-// field access on 'complex-value'. The second example expands the match to indicate that only
-// a specific index `0` should match. And lastly, the third example matches any indexed access
-// that later selects the 'name' field.
-type AttributePattern struct {
- variable string
- qualifierPatterns []*AttributeQualifierPattern
-}
-
-// NewAttributePattern produces a new mutable AttributePattern based on a variable name.
-func NewAttributePattern(variable string) *AttributePattern {
- return &AttributePattern{
- variable: variable,
- qualifierPatterns: []*AttributeQualifierPattern{},
- }
-}
-
-// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid
-// identifier, or string map key including empty string.
-func (apat *AttributePattern) QualString(pattern string) *AttributePattern {
- apat.qualifierPatterns = append(apat.qualifierPatterns,
- &AttributeQualifierPattern{value: pattern})
- return apat
-}
-
-// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or
-// list index.
-func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern {
- apat.qualifierPatterns = append(apat.qualifierPatterns,
- &AttributeQualifierPattern{value: pattern})
- return apat
-}
-
-// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern.
-func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern {
- apat.qualifierPatterns = append(apat.qualifierPatterns,
- &AttributeQualifierPattern{value: pattern})
- return apat
-}
-
-// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern.
-func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern {
- apat.qualifierPatterns = append(apat.qualifierPatterns,
- &AttributeQualifierPattern{value: pattern})
- return apat
-}
-
-// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier.
-func (apat *AttributePattern) Wildcard() *AttributePattern {
- apat.qualifierPatterns = append(apat.qualifierPatterns,
- &AttributeQualifierPattern{wildcard: true})
- return apat
-}
-
-// VariableMatches returns true if the fully qualified variable matches the AttributePattern
-// fully qualified variable name.
-func (apat *AttributePattern) VariableMatches(variable string) bool {
- return apat.variable == variable
-}
-
-// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern.
-func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern {
- return apat.qualifierPatterns
-}
-
-// AttributeQualifierPattern holds a wildcard or valued qualifier pattern.
-type AttributeQualifierPattern struct {
- wildcard bool
- value interface{}
-}
-
-// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the
-// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern.
-func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool {
- if qpat.wildcard {
- return true
- }
- qve, ok := q.(qualifierValueEquator)
- return ok && qve.QualifierValueEquals(qpat.value)
-}
-
-// qualifierValueEquator defines an interface for determining if an input value, of valid map key
-// type, is equal to the value held in the Qualifier. This interface is used by the
-// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns.
-//
-// Note: Attribute values are also Qualifier values; however, Attributes are resolved before
-// qualification happens. This is an implementation detail, but one relevant to why the Attribute
-// types do not surface in the list of implementations.
-//
-// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is
-// used.
-type qualifierValueEquator interface {
- // QualifierValueEquals returns true if the input value is equal to the value held in the
- // Qualifier.
- QualifierValueEquals(value interface{}) bool
-}
-
-// QualifierValueEquals implementation for boolean qualifiers.
-func (q *boolQualifier) QualifierValueEquals(value interface{}) bool {
- bval, ok := value.(bool)
- return ok && q.value == bval
-}
-
-// QualifierValueEquals implementation for field qualifiers.
-func (q *fieldQualifier) QualifierValueEquals(value interface{}) bool {
- sval, ok := value.(string)
- return ok && q.Name == sval
-}
-
-// QualifierValueEquals implementation for string qualifiers.
-func (q *stringQualifier) QualifierValueEquals(value interface{}) bool {
- sval, ok := value.(string)
- return ok && q.value == sval
-}
-
-// QualifierValueEquals implementation for int qualifiers.
-func (q *intQualifier) QualifierValueEquals(value interface{}) bool {
- return numericValueEquals(value, q.celValue)
-}
-
-// QualifierValueEquals implementation for uint qualifiers.
-func (q *uintQualifier) QualifierValueEquals(value interface{}) bool {
- return numericValueEquals(value, q.celValue)
-}
-
-// QualifierValueEquals implementation for double qualifiers.
-func (q *doubleQualifier) QualifierValueEquals(value interface{}) bool {
- return numericValueEquals(value, q.celValue)
-}
-
-// numericValueEquals uses CEL equality to determine whether two number values are
-func numericValueEquals(value interface{}, celValue ref.Val) bool {
- val := types.DefaultTypeAdapter.NativeToValue(value)
- return celValue.Equal(val) == types.True
-}
-
-// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing
-// AttributePattern matches with PartialActivation inputs.
-func NewPartialAttributeFactory(container *containers.Container,
- adapter ref.TypeAdapter,
- provider ref.TypeProvider) AttributeFactory {
- fac := NewAttributeFactory(container, adapter, provider)
- return &partialAttributeFactory{
- AttributeFactory: fac,
- container: container,
- adapter: adapter,
- provider: provider,
- }
-}
-
-type partialAttributeFactory struct {
- AttributeFactory
- container *containers.Container
- adapter ref.TypeAdapter
- provider ref.TypeProvider
-}
-
-// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the
-// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match
-// unknown patterns from PartialActivation inputs if given.
-func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
- attr := fac.AttributeFactory.AbsoluteAttribute(id, names...)
- return &attributeMatcher{fac: fac, NamespacedAttribute: attr}
-}
-
-// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of
-// 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than
-// the base AttributeFactory implementation.
-func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute {
- return &maybeAttribute{
- id: id,
- attrs: []NamespacedAttribute{
- fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...),
- },
- adapter: fac.adapter,
- provider: fac.provider,
- fac: fac,
- }
-}
-
-// matchesUnknownPatterns returns true if the variable names and qualifiers for a given
-// Attribute value match any of the ActivationPattern objects in the set of unknown activation
-// patterns on the given PartialActivation.
-//
-// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string
-// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data
-// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern
-// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any
-// of the following patterns will match Attribute `a.b`:
-//
-// - `AttributePattern("a")`
-// - `AttributePattern("a").Wildcard()`
-// - `AttributePattern("a").QualString("b")`
-// - `AttributePattern("a").QualString("b").QualInt(0)`
-//
-// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result
-// for the last pattern matched variable or qualifier in the Attribute. In the first matching
-// example, the expression id representing variable `a` would be listed in the Unknown result,
-// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown.
-func (fac *partialAttributeFactory) matchesUnknownPatterns(
- vars PartialActivation,
- attrID int64,
- variableNames []string,
- qualifiers []Qualifier) (types.Unknown, error) {
- patterns := vars.UnknownAttributePatterns()
- candidateIndices := map[int]struct{}{}
- for _, variable := range variableNames {
- for i, pat := range patterns {
- if pat.VariableMatches(variable) {
- candidateIndices[i] = struct{}{}
- }
- }
- }
- // Determine whether to return early if there are no candidate unknown patterns.
- if len(candidateIndices) == 0 {
- return nil, nil
- }
- // Determine whether to return early if there are no qualifiers.
- if len(qualifiers) == 0 {
- return types.Unknown{attrID}, nil
- }
- // Resolve the attribute qualifiers into a static set. This prevents more dynamic
- // Attribute resolutions than necessary when there are multiple unknown patterns
- // that traverse the same Attribute-based qualifier field.
- newQuals := make([]Qualifier, len(qualifiers))
- for i, qual := range qualifiers {
- attr, isAttr := qual.(Attribute)
- if isAttr {
- val, err := attr.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- // If this resolution behavior ever changes, new implementations of the
- // qualifierValueEquator may be required to handle proper resolution.
- qual, err = fac.NewQualifier(nil, qual.ID(), val)
- if err != nil {
- return nil, err
- }
- }
- newQuals[i] = qual
- }
- // Determine whether any of the unknown patterns match.
- for patIdx := range candidateIndices {
- pat := patterns[patIdx]
- isUnk := true
- matchExprID := attrID
- qualPats := pat.QualifierPatterns()
- for i, qual := range newQuals {
- if i >= len(qualPats) {
- break
- }
- matchExprID = qual.ID()
- qualPat := qualPats[i]
- // Note, the AttributeQualifierPattern relies on the input Qualifier not being an
- // Attribute, since there is no way to resolve the Attribute with the information
- // provided to the Matches call.
- if !qualPat.Matches(qual) {
- isUnk = false
- break
- }
- }
- if isUnk {
- return types.Unknown{matchExprID}, nil
- }
- }
- return nil, nil
-}
-
-// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in
-// AttributePattern matching against Attribute values without having to modify the code paths that
-// identify Attributes in expressions.
-type attributeMatcher struct {
- NamespacedAttribute
- qualifiers []Qualifier
- fac *partialAttributeFactory
-}
-
-// AddQualifier implements the Attribute interface method.
-func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) {
- // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve
- // method is not a PartialActivation, or does not match an unknown attribute pattern, the
- // Resolve method is directly invoked on the underlying NamespacedAttribute.
- _, err := m.NamespacedAttribute.AddQualifier(qual)
- if err != nil {
- return nil, err
- }
- // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against
- // the variable name and qualifier set contained within the Attribute. These values are not
- // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within
- // the attributeMatcher.
- m.qualifiers = append(m.qualifiers, qual)
- return m, nil
-}
-
-// Resolve is an implementation of the Attribute interface method which uses the
-// attributeMatcher TryResolve implementation rather than the embedded NamespacedAttribute
-// Resolve implementation.
-func (m *attributeMatcher) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := m.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, fmt.Errorf("no such attribute: %v", m.NamespacedAttribute)
- }
- return obj, nil
-}
-
-// TryResolve is an implementation of the NamespacedAttribute interface method which tests
-// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise,
-// the standard Resolve logic applies.
-func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error) {
- id := m.NamespacedAttribute.ID()
- // Bug in how partial activation is resolved, should search parents as well.
- partial, isPartial := toPartialActivation(vars)
- if isPartial {
- unk, err := m.fac.matchesUnknownPatterns(
- partial,
- id,
- m.CandidateVariableNames(),
- m.qualifiers)
- if err != nil {
- return nil, true, err
- }
- if unk != nil {
- return unk, true, nil
- }
- }
- return m.NamespacedAttribute.TryResolve(vars)
-}
-
-// Qualify is an implementation of the Qualifier interface method.
-func (m *attributeMatcher) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := m.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := m.fac.NewQualifier(nil, m.ID(), val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
-}
-
-func toPartialActivation(vars Activation) (PartialActivation, bool) {
- pv, ok := vars.(PartialActivation)
- if ok {
- return pv, true
- }
- if vars.Parent() != nil {
- return toPartialActivation(vars.Parent())
- }
- return nil, false
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/attributes.go b/etcd/vendor/github.com/google/cel-go/interpreter/attributes.go
deleted file mode 100644
index 4f1772ea39..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/attributes.go
+++ /dev/null
@@ -1,1051 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "fmt"
- "math"
-
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// AttributeFactory provides methods creating Attribute and Qualifier values.
-type AttributeFactory interface {
- // AbsoluteAttribute creates an attribute that refers to a top-level variable name.
- //
- // Checked expressions generate absolute attribute with a single name.
- // Parse-only expressions may have more than one possible absolute identifier when the
- // expression is created within a container, e.g. package or namespace.
- //
- // When there is more than one name supplied to the AbsoluteAttribute call, the names
- // must be in CEL's namespace resolution order. The name arguments provided here are
- // returned in the same order as they were provided by the NamespacedAttribute
- // CandidateVariableNames method.
- AbsoluteAttribute(id int64, names ...string) NamespacedAttribute
-
- // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute
- // that is resolved depends on the boolean evaluation of the input 'expr'.
- ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute
-
- // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced
- // variable name.
- //
- // Only expressions which have not been type-checked may generate oneof attributes.
- MaybeAttribute(id int64, name string) Attribute
-
- // RelativeAttribute creates an attribute whose value is a qualification of a dynamic
- // computation rather than a static variable reference.
- RelativeAttribute(id int64, operand Interpretable) Attribute
-
- // NewQualifier creates a qualifier on the target object with a given value.
- //
- // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint.
- //
- // The qualifier may consider the object type being qualified, if present. If absent, the
- // qualification should be considered dynamic and the qualification should still work, though
- // it may be sub-optimal.
- NewQualifier(objType *exprpb.Type, qualID int64, val interface{}) (Qualifier, error)
-}
-
-// Qualifier marker interface for designating different qualifier values and where they appear
-// within field selections and index call expressions (`_[_]`).
-type Qualifier interface {
- // ID where the qualifier appears within an expression.
- ID() int64
-
- // Qualify performs a qualification, e.g. field selection, on the input object and returns
- // the value or error that results.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
-}
-
-// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the
-// qualifier's constant value.
-//
-// Non-constant qualifiers are of Attribute type.
-type ConstantQualifier interface {
- Qualifier
-
- Value() ref.Val
-}
-
-// Attribute values are a variable or value with an optional set of qualifiers, such as field, key,
-// or index accesses.
-type Attribute interface {
- Qualifier
-
- // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid
- // qualifier type.
- AddQualifier(Qualifier) (Attribute, error)
-
- // Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
-}
-
-// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers
-// such as field, key, or index accesses.
-type NamespacedAttribute interface {
- Attribute
-
- // CandidateVariableNames returns the possible namespaced variable names for this Attribute in
- // the CEL namespace resolution order.
- CandidateVariableNames() []string
-
- // Qualifiers returns the list of qualifiers associated with the Attribute.s
- Qualifiers() []Qualifier
-
- // TryResolve attempts to return the value of the attribute given the current Activation.
- // If an error is encountered during attribute resolution, it will be returned immediately.
- // If the attribute cannot be resolved within the Activation, the result must be: `nil`,
- // `false`, `nil`.
- TryResolve(Activation) (interface{}, bool, error)
-}
-
-// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
-// capable of resolving types by simple names and qualify the values using the supported qualifier
-// types: bool, int, string, and uint.
-func NewAttributeFactory(cont *containers.Container,
- a ref.TypeAdapter,
- p ref.TypeProvider) AttributeFactory {
- return &attrFactory{
- container: cont,
- adapter: a,
- provider: p,
- }
-}
-
-type attrFactory struct {
- container *containers.Container
- adapter ref.TypeAdapter
- provider ref.TypeProvider
-}
-
-// AbsoluteAttribute refers to a variable value and an optional qualifier path.
-//
-// The namespaceNames represent the names the variable could have based on namespace
-// resolution rules.
-func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
- return &absoluteAttribute{
- id: id,
- namespaceNames: names,
- qualifiers: []Qualifier{},
- adapter: r.adapter,
- provider: r.provider,
- fac: r,
- }
-}
-
-// ConditionalAttribute supports the case where an attribute selection may occur on a conditional
-// expression, e.g. (cond ? a : b).c
-func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute {
- return &conditionalAttribute{
- id: id,
- expr: expr,
- truthy: t,
- falsy: f,
- adapter: r.adapter,
- fac: r,
- }
-}
-
-// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be
-// direct variable accesses or some combination of variable access with qualification.
-func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute {
- return &maybeAttribute{
- id: id,
- attrs: []NamespacedAttribute{
- r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...),
- },
- adapter: r.adapter,
- provider: r.provider,
- fac: r,
- }
-}
-
-// RelativeAttribute refers to an expression and an optional qualifier path.
-func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute {
- return &relativeAttribute{
- id: id,
- operand: operand,
- qualifiers: []Qualifier{},
- adapter: r.adapter,
- fac: r,
- }
-}
-
-// NewQualifier is an implementation of the AttributeFactory interface.
-func (r *attrFactory) NewQualifier(objType *exprpb.Type,
- qualID int64,
- val interface{}) (Qualifier, error) {
- // Before creating a new qualifier check to see if this is a protobuf message field access.
- // If so, use the precomputed GetFrom qualification method rather than the standard
- // stringQualifier.
- str, isStr := val.(string)
- if isStr && objType != nil && objType.GetMessageType() != "" {
- ft, found := r.provider.FindFieldType(objType.GetMessageType(), str)
- if found && ft.IsSet != nil && ft.GetFrom != nil {
- return &fieldQualifier{
- id: qualID,
- Name: str,
- FieldType: ft,
- adapter: r.adapter,
- }, nil
- }
- }
- return newQualifier(r.adapter, qualID, val)
-}
-
-type absoluteAttribute struct {
- id int64
- // namespaceNames represent the names the variable could have based on declared container
- // (package) of the expression.
- namespaceNames []string
- qualifiers []Qualifier
- adapter ref.TypeAdapter
- provider ref.TypeProvider
- fac AttributeFactory
-}
-
-// ID implements the Attribute interface method.
-func (a *absoluteAttribute) ID() int64 {
- return a.id
-}
-
-// Cost implements the Coster interface method.
-func (a *absoluteAttribute) Cost() (min, max int64) {
- for _, q := range a.qualifiers {
- minQ, maxQ := estimateCost(q)
- min += minQ
- max += maxQ
- }
- min++ // For object retrieval.
- max++
- return
-}
-
-// AddQualifier implements the Attribute interface method.
-func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
- a.qualifiers = append(a.qualifiers, qual)
- return a, nil
-}
-
-// CandidateVariableNames implements the NamespaceAttribute interface method.
-func (a *absoluteAttribute) CandidateVariableNames() []string {
- return a.namespaceNames
-}
-
-// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute.
-func (a *absoluteAttribute) Qualifiers() []Qualifier {
- return a.qualifiers
-}
-
-// Qualify is an implementation of the Qualifier interface method.
-func (a *absoluteAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
-}
-
-// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
-// variable is not found, or if its Qualifiers cannot be applied successfully.
-func (a *absoluteAttribute) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := a.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if found {
- return obj, nil
- }
- return nil, fmt.Errorf("no such attribute: %v", a)
-}
-
-// String implements the Stringer interface method.
-func (a *absoluteAttribute) String() string {
- return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames)
-}
-
-// TryResolve iterates through the namespaced variable names until one is found within the
-// Activation or TypeProvider.
-//
-// If the variable name cannot be found as an Activation variable or in the TypeProvider as
-// a type, then the result is `nil`, `false`, `nil` per the interface requirement.
-func (a *absoluteAttribute) TryResolve(vars Activation) (interface{}, bool, error) {
- for _, nm := range a.namespaceNames {
- // If the variable is found, process it. Otherwise, wait until the checks to
- // determine whether the type is unknown before returning.
- op, found := vars.ResolveName(nm)
- if found {
- var err error
- for _, qual := range a.qualifiers {
- op, err = qual.Qualify(vars, op)
- if err != nil {
- return nil, true, err
- }
- }
- return op, true, nil
- }
- // Attempt to resolve the qualified type name if the name is not a variable identifier.
- typ, found := a.provider.FindIdent(nm)
- if found {
- if len(a.qualifiers) == 0 {
- return typ, true, nil
- }
- return nil, true, fmt.Errorf("no such attribute: %v", typ)
- }
- }
- return nil, false, nil
-}
-
-type conditionalAttribute struct {
- id int64
- expr Interpretable
- truthy Attribute
- falsy Attribute
- adapter ref.TypeAdapter
- fac AttributeFactory
-}
-
-// ID is an implementation of the Attribute interface method.
-func (a *conditionalAttribute) ID() int64 {
- return a.id
-}
-
-// Cost provides the heuristic cost of a ternary operation ? : .
-// The cost is computed as cost(expr) plus the min/max costs of evaluating either
-// `t` or `f`.
-func (a *conditionalAttribute) Cost() (min, max int64) {
- tMin, tMax := estimateCost(a.truthy)
- fMin, fMax := estimateCost(a.falsy)
- eMin, eMax := estimateCost(a.expr)
- return eMin + findMin(tMin, fMin), eMax + findMax(tMax, fMax)
-}
-
-// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing
-// the qualification of alternate attributes.
-func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
- _, err := a.truthy.AddQualifier(qual)
- if err != nil {
- return nil, err
- }
- _, err = a.falsy.AddQualifier(qual)
- if err != nil {
- return nil, err
- }
- return a, nil
-}
-
-// Qualify is an implementation of the Qualifier interface method.
-func (a *conditionalAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
-}
-
-// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly.
-func (a *conditionalAttribute) Resolve(vars Activation) (interface{}, error) {
- val := a.expr.Eval(vars)
- if types.IsError(val) {
- return nil, val.(*types.Err)
- }
- if val == types.True {
- return a.truthy.Resolve(vars)
- }
- if val == types.False {
- return a.falsy.Resolve(vars)
- }
- if types.IsUnknown(val) {
- return val, nil
- }
- return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err)
-}
-
-// String is an implementation of the Stringer interface method.
-func (a *conditionalAttribute) String() string {
- return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy)
-}
-
-type maybeAttribute struct {
- id int64
- attrs []NamespacedAttribute
- adapter ref.TypeAdapter
- provider ref.TypeProvider
- fac AttributeFactory
-}
-
-// ID is an implementation of the Attribute interface method.
-func (a *maybeAttribute) ID() int64 {
- return a.id
-}
-
-// Cost implements the Coster interface method. The min cost is computed as the minimal cost among
-// all the possible attributes, the max cost ditto.
-func (a *maybeAttribute) Cost() (min, max int64) {
- min, max = math.MaxInt64, 0
- for _, a := range a.attrs {
- minA, maxA := estimateCost(a)
- min = findMin(min, minA)
- max = findMax(max, maxA)
- }
- return
-}
-
-func findMin(x, y int64) int64 {
- if x < y {
- return x
- }
- return y
-}
-
-func findMax(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
-}
-
-// AddQualifier adds a qualifier to each possible attribute variant, and also creates
-// a new namespaced variable from the qualified value.
-//
-// The algorithm for building the maybe attribute is as follows:
-//
-// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression
-//
-// mb = MaybeAttribute(, "a")
-//
-// Initializing the maybe attribute creates an absolute attribute internally which includes the
-// possible namespaced names of the attribute. In this example, let's assume we are in namespace
-// 'ns', then the maybe is either one of the following variable names:
-//
-// possible variables names -- ns.a, a
-//
-// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified
-// name, or a field selection on one of the possible variable names produced earlier:
-//
-// mb.AddQualifier("b")
-//
-// possible variables names -- ns.a.b, a.b
-// possible field selection -- ns.a['b'], a['b']
-//
-// If none of the attributes within the maybe resolves a value, the result is an error.
-func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
- str := ""
- isStr := false
- cq, isConst := qual.(ConstantQualifier)
- if isConst {
- str, isStr = cq.Value().Value().(string)
- }
- var augmentedNames []string
- // First add the qualifier to all existing attributes in the oneof.
- for _, attr := range a.attrs {
- if isStr && len(attr.Qualifiers()) == 0 {
- candidateVars := attr.CandidateVariableNames()
- augmentedNames = make([]string, len(candidateVars))
- for i, name := range candidateVars {
- augmentedNames[i] = fmt.Sprintf("%s.%s", name, str)
- }
- }
- _, err := attr.AddQualifier(qual)
- if err != nil {
- return nil, err
- }
- }
- // Next, ensure the most specific variable / type reference is searched first.
- a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...)
- return a, nil
-}
-
-// Qualify is an implementation of the Qualifier interface method.
-func (a *maybeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
-}
-
-// Resolve follows the variable resolution rules to determine whether the attribute is a variable
-// or a field selection.
-func (a *maybeAttribute) Resolve(vars Activation) (interface{}, error) {
- for _, attr := range a.attrs {
- obj, found, err := attr.TryResolve(vars)
- // Return an error if one is encountered.
- if err != nil {
- return nil, err
- }
- // If the object was found, return it.
- if found {
- return obj, nil
- }
- }
- // Else, produce a no such attribute error.
- return nil, fmt.Errorf("no such attribute: %v", a)
-}
-
-// String is an implementation of the Stringer interface method.
-func (a *maybeAttribute) String() string {
- return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs)
-}
-
-type relativeAttribute struct {
- id int64
- operand Interpretable
- qualifiers []Qualifier
- adapter ref.TypeAdapter
- fac AttributeFactory
-}
-
-// ID is an implementation of the Attribute interface method.
-func (a *relativeAttribute) ID() int64 {
- return a.id
-}
-
-// Cost implements the Coster interface method.
-func (a *relativeAttribute) Cost() (min, max int64) {
- min, max = estimateCost(a.operand)
- for _, qual := range a.qualifiers {
- minQ, maxQ := estimateCost(qual)
- min += minQ
- max += maxQ
- }
- return
-}
-
-// AddQualifier implements the Attribute interface method.
-func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
- a.qualifiers = append(a.qualifiers, qual)
- return a, nil
-}
-
-// Qualify is an implementation of the Qualifier interface method.
-func (a *relativeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
-}
-
-// Resolve expression value and qualifier relative to the expression result.
-func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) {
- // First, evaluate the operand.
- v := a.operand.Eval(vars)
- if types.IsError(v) {
- return nil, v.(*types.Err)
- }
- if types.IsUnknown(v) {
- return v, nil
- }
- // Next, qualify it. Qualification handles unknowns as well, so there's no need to recheck.
- var err error
- var obj interface{} = v
- for _, qual := range a.qualifiers {
- obj, err = qual.Qualify(vars, obj)
- if err != nil {
- return nil, err
- }
- }
- return obj, nil
-}
-
-// String is an implementation of the Stringer interface method.
-func (a *relativeAttribute) String() string {
- return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
-}
-
-func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier, error) {
- var qual Qualifier
- switch val := v.(type) {
- case Attribute:
- return &attrQualifier{id: id, Attribute: val}, nil
- case string:
- qual = &stringQualifier{id: id, value: val, celValue: types.String(val), adapter: adapter}
- case int:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
- case int32:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
- case int64:
- qual = &intQualifier{id: id, value: val, celValue: types.Int(val), adapter: adapter}
- case uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
- case uint32:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
- case uint64:
- qual = &uintQualifier{id: id, value: val, celValue: types.Uint(val), adapter: adapter}
- case bool:
- qual = &boolQualifier{id: id, value: val, celValue: types.Bool(val), adapter: adapter}
- case float32:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: types.Double(val), adapter: adapter}
- case float64:
- qual = &doubleQualifier{id: id, value: val, celValue: types.Double(val), adapter: adapter}
- case types.String:
- qual = &stringQualifier{id: id, value: string(val), celValue: val, adapter: adapter}
- case types.Int:
- qual = &intQualifier{id: id, value: int64(val), celValue: val, adapter: adapter}
- case types.Uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: val, adapter: adapter}
- case types.Bool:
- qual = &boolQualifier{id: id, value: bool(val), celValue: val, adapter: adapter}
- case types.Double:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: val, adapter: adapter}
- default:
- return nil, fmt.Errorf("invalid qualifier type: %T", v)
- }
- return qual, nil
-}
-
-type attrQualifier struct {
- id int64
- Attribute
-}
-
-func (q *attrQualifier) ID() int64 {
- return q.id
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *attrQualifier) Cost() (min, max int64) {
- return estimateCost(q.Attribute)
-}
-
-type stringQualifier struct {
- id int64
- value string
- celValue ref.Val
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *stringQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *stringQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- s := q.value
- isMap := false
- isKey := false
- switch o := obj.(type) {
- case map[string]interface{}:
- isMap = true
- obj, isKey = o[s]
- case map[string]string:
- isMap = true
- obj, isKey = o[s]
- case map[string]int:
- isMap = true
- obj, isKey = o[s]
- case map[string]int32:
- isMap = true
- obj, isKey = o[s]
- case map[string]int64:
- isMap = true
- obj, isKey = o[s]
- case map[string]uint:
- isMap = true
- obj, isKey = o[s]
- case map[string]uint32:
- isMap = true
- obj, isKey = o[s]
- case map[string]uint64:
- isMap = true
- obj, isKey = o[s]
- case map[string]float32:
- isMap = true
- obj, isKey = o[s]
- case map[string]float64:
- isMap = true
- obj, isKey = o[s]
- case map[string]bool:
- isMap = true
- obj, isKey = o[s]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
- }
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", s)
- }
- return obj, nil
-}
-
-// Value implements the ConstantQualifier interface
-func (q *stringQualifier) Value() ref.Val {
- return q.celValue
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *stringQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
-type intQualifier struct {
- id int64
- value int64
- celValue ref.Val
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *intQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *intQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- i := q.value
- isMap := false
- isKey := false
- isIndex := false
- switch o := obj.(type) {
- // The specialized map types supported by an int qualifier are considerably fewer than the set
- // of specialized map types supported by string qualifiers since they are less frequently used
- // than string-based map keys. Additional specializations may be added in the future if
- // desired.
- case map[int]interface{}:
- isMap = true
- obj, isKey = o[int(i)]
- case map[int32]interface{}:
- isMap = true
- obj, isKey = o[int32(i)]
- case map[int64]interface{}:
- isMap = true
- obj, isKey = o[i]
- case []interface{}:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []string:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []int:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []int32:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []int64:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []uint:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []uint32:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []uint64:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []float32:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []float64:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case []bool:
- isIndex = i >= 0 && i < int64(len(o))
- if isIndex {
- obj = o[i]
- }
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
- }
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", i)
- }
- if !isMap && !isIndex {
- return nil, fmt.Errorf("index out of bounds: %v", i)
- }
- return obj, nil
-}
-
-// Value implements the ConstantQualifier interface
-func (q *intQualifier) Value() ref.Val {
- return q.celValue
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *intQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
-type uintQualifier struct {
- id int64
- value uint64
- celValue ref.Val
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *uintQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *uintQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- u := q.value
- isMap := false
- isKey := false
- switch o := obj.(type) {
- // The specialized map types supported by a uint qualifier are considerably fewer than the set
- // of specialized map types supported by string qualifiers since they are less frequently used
- // than string-based map keys. Additional specializations may be added in the future if
- // desired.
- case map[uint]interface{}:
- isMap = true
- obj, isKey = o[uint(u)]
- case map[uint32]interface{}:
- isMap = true
- obj, isKey = o[uint32(u)]
- case map[uint64]interface{}:
- isMap = true
- obj, isKey = o[u]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
- }
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", u)
- }
- return obj, nil
-}
-
-// Value implements the ConstantQualifier interface
-func (q *uintQualifier) Value() ref.Val {
- return q.celValue
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *uintQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
-type boolQualifier struct {
- id int64
- value bool
- celValue ref.Val
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *boolQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *boolQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- b := q.value
- isKey := false
- switch o := obj.(type) {
- // The specialized map types supported by a bool qualifier are considerably fewer than the set
- // of specialized map types supported by string qualifiers since they are less frequently used
- // than string-based map keys. Additional specializations may be added in the future if
- // desired.
- case map[bool]interface{}:
- obj, isKey = o[b]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
- }
- if !isKey {
- return nil, fmt.Errorf("no such key: %v", b)
- }
- return obj, nil
-}
-
-// Value implements the ConstantQualifier interface
-func (q *boolQualifier) Value() ref.Val {
- return q.celValue
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *boolQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
-// fieldQualifier indicates that the qualification is a well-defined field with a known
-// field type. When the field type is known this can be used to improve the speed and
-// efficiency of field resolution.
-type fieldQualifier struct {
- id int64
- Name string
- FieldType *ref.FieldType
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *fieldQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *fieldQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- if rv, ok := obj.(ref.Val); ok {
- obj = rv.Value()
- }
- return q.FieldType.GetFrom(obj)
-}
-
-// Value implements the ConstantQualifier interface
-func (q *fieldQualifier) Value() ref.Val {
- return types.String(q.Name)
-}
-
-// Cost returns zero for constant field qualifiers
-func (q *fieldQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
-// doubleQualifier qualifies a CEL object, map, or list using a double value.
-//
-// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value
-// type may not be known ahead of time and may not conform to the standard types supported as valid
-// protobuf map key types.
-type doubleQualifier struct {
- id int64
- value float64
- celValue ref.Val
- adapter ref.TypeAdapter
-}
-
-// ID is an implementation of the Qualifier interface method.
-func (q *doubleQualifier) ID() int64 {
- return q.id
-}
-
-// Qualify implements the Qualifier interface method.
-func (q *doubleQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- switch o := obj.(type) {
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
- }
-}
-
-// refResolve attempts to convert the value to a CEL value and then uses reflection methods
-// to try and resolve the qualifier.
-func refResolve(adapter ref.TypeAdapter, idx ref.Val, obj interface{}) (ref.Val, error) {
- celVal := adapter.NativeToValue(obj)
- mapper, isMapper := celVal.(traits.Mapper)
- if isMapper {
- elem, found := mapper.Find(idx)
- if !found {
- return nil, fmt.Errorf("no such key: %v", idx)
- }
- return elem, nil
- }
- indexer, isIndexer := celVal.(traits.Indexer)
- if isIndexer {
- elem := indexer.Get(idx)
- if types.IsError(elem) {
- return nil, elem.(*types.Err)
- }
- return elem, nil
- }
- if types.IsUnknown(celVal) {
- return celVal, nil
- }
- // TODO: If the types.Err value contains more than just an error message at some point in the
- // future, then it would be reasonable to return error values as ref.Val types rather than
- // simple go error types.
- if types.IsError(celVal) {
- return nil, celVal.(*types.Err)
- }
- return nil, fmt.Errorf("no such key: %v", idx)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/coster.go b/etcd/vendor/github.com/google/cel-go/interpreter/coster.go
deleted file mode 100644
index ac573d5745..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/coster.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import "math"
-
-// TODO: remove Coster.
-
-// Coster calculates the heuristic cost incurred during evaluation.
-// Deprecated: Please migrate cel.EstimateCost, it supports length estimates for input data and cost estimates for
-// extension functions.
-type Coster interface {
- Cost() (min, max int64)
-}
-
-// estimateCost returns the heuristic cost interval for the program.
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/decorators.go b/etcd/vendor/github.com/google/cel-go/interpreter/decorators.go
deleted file mode 100644
index bdbbad43e2..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/decorators.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-// InterpretableDecorator is a functional interface for decorating or replacing
-// Interpretable expression nodes at construction time.
-type InterpretableDecorator func(Interpretable) (Interpretable, error)
-
-// decObserveEval records evaluation state into an EvalState object.
-func decObserveEval(observer EvalObserver) InterpretableDecorator {
- return func(i Interpretable) (Interpretable, error) {
- switch inst := i.(type) {
- case *evalWatch, *evalWatchAttr, *evalWatchConst:
- // these instruction are already watching, return straight-away.
- return i, nil
- case InterpretableAttribute:
- return &evalWatchAttr{
- InterpretableAttribute: inst,
- observer: observer,
- }, nil
- case InterpretableConst:
- return &evalWatchConst{
- InterpretableConst: inst,
- observer: observer,
- }, nil
- default:
- return &evalWatch{
- Interpretable: i,
- observer: observer,
- }, nil
- }
- }
-}
-
-// decInterruptFolds creates an intepretable decorator which marks comprehensions as interruptable
-// where the interrupt state is communicated via a hidden variable on the Activation.
-func decInterruptFolds() InterpretableDecorator {
- return func(i Interpretable) (Interpretable, error) {
- fold, ok := i.(*evalFold)
- if !ok {
- return i, nil
- }
- fold.interruptable = true
- return fold, nil
- }
-}
-
-// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting.
-func decDisableShortcircuits() InterpretableDecorator {
- return func(i Interpretable) (Interpretable, error) {
- switch expr := i.(type) {
- case *evalOr:
- return &evalExhaustiveOr{
- id: expr.id,
- lhs: expr.lhs,
- rhs: expr.rhs,
- }, nil
- case *evalAnd:
- return &evalExhaustiveAnd{
- id: expr.id,
- lhs: expr.lhs,
- rhs: expr.rhs,
- }, nil
- case *evalFold:
- expr.exhaustive = true
- return expr, nil
- case InterpretableAttribute:
- cond, isCond := expr.Attr().(*conditionalAttribute)
- if isCond {
- return &evalExhaustiveConditional{
- id: cond.id,
- attr: cond,
- adapter: expr.Adapter(),
- }, nil
- }
- }
- return i, nil
- }
-}
-
-// decOptimize optimizes the program plan by looking for common evaluation patterns and
-// conditionally precomputing the result.
-// - build list and map values with constant elements.
-// - convert 'in' operations to set membership tests if possible.
-func decOptimize() InterpretableDecorator {
- return func(i Interpretable) (Interpretable, error) {
- switch inst := i.(type) {
- case *evalList:
- return maybeBuildListLiteral(i, inst)
- case *evalMap:
- return maybeBuildMapLiteral(i, inst)
- case InterpretableCall:
- if inst.OverloadID() == overloads.InList {
- return maybeOptimizeSetMembership(i, inst)
- }
- if overloads.IsTypeConversionFunction(inst.Function()) {
- return maybeOptimizeConstUnary(i, inst)
- }
- }
- return i, nil
- }
-}
-
-// decRegexOptimizer compiles regex pattern string constants.
-func decRegexOptimizer(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
- functionMatchMap := make(map[string]*RegexOptimization)
- overloadMatchMap := make(map[string]*RegexOptimization)
- for _, m := range regexOptimizations {
- functionMatchMap[m.Function] = m
- if m.OverloadID != "" {
- overloadMatchMap[m.OverloadID] = m
- }
- }
-
- return func(i Interpretable) (Interpretable, error) {
- call, ok := i.(InterpretableCall)
- if !ok {
- return i, nil
- }
-
- var matcher *RegexOptimization
- var found bool
- if call.OverloadID() != "" {
- matcher, found = overloadMatchMap[call.OverloadID()]
- }
- if !found {
- matcher, found = functionMatchMap[call.Function()]
- }
- if !found || matcher.RegexIndex >= len(call.Args()) {
- return i, nil
- }
- args := call.Args()
- regexArg := args[matcher.RegexIndex]
- regexStr, isConst := regexArg.(InterpretableConst)
- if !isConst {
- return i, nil
- }
- pattern, ok := regexStr.Value().(types.String)
- if !ok {
- return i, nil
- }
- return matcher.Factory(call, string(pattern))
- }
-}
-
-func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) {
- args := call.Args()
- if len(args) != 1 {
- return i, nil
- }
- _, isConst := args[0].(InterpretableConst)
- if !isConst {
- return i, nil
- }
- val := call.Eval(EmptyActivation())
- if types.IsError(val) {
- return nil, val.(*types.Err)
- }
- return NewConstValue(call.ID(), val), nil
-}
-
-func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) {
- for _, elem := range l.elems {
- _, isConst := elem.(InterpretableConst)
- if !isConst {
- return i, nil
- }
- }
- return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil
-}
-
-func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) {
- for idx, key := range mp.keys {
- _, isConst := key.(InterpretableConst)
- if !isConst {
- return i, nil
- }
- _, isConst = mp.vals[idx].(InterpretableConst)
- if !isConst {
- return i, nil
- }
- }
- return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil
-}
-
-// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership
-// test if the following conditions are true:
-// - the list is a constant with homogeneous element types.
-// - the elements are all of primitive type.
-func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) {
- args := inlist.Args()
- lhs := args[0]
- rhs := args[1]
- l, isConst := rhs.(InterpretableConst)
- if !isConst {
- return i, nil
- }
- // When the incoming binary call is flagged with as the InList overload, the value will
- // always be convertible to a `traits.Lister` type.
- list := l.Value().(traits.Lister)
- if list.Size() == types.IntZero {
- return NewConstValue(inlist.ID(), types.False), nil
- }
- it := list.Iterator()
- valueSet := make(map[ref.Val]ref.Val)
- for it.HasNext() == types.True {
- elem := it.Next()
- if !types.IsPrimitiveType(elem) {
- // Note, non-primitive type are not yet supported.
- return i, nil
- }
- valueSet[elem] = types.True
- switch ev := elem.(type) {
- case types.Double:
- iv := ev.ConvertToType(types.IntType)
- // Ensure that only lossless conversions are added to the set
- if !types.IsError(iv) && iv.Equal(ev) == types.True {
- valueSet[iv] = types.True
- }
- // Ensure that only lossless conversions are added to the set
- uv := ev.ConvertToType(types.UintType)
- if !types.IsError(uv) && uv.Equal(ev) == types.True {
- valueSet[uv] = types.True
- }
- case types.Int:
- dv := ev.ConvertToType(types.DoubleType)
- if !types.IsError(dv) {
- valueSet[dv] = types.True
- }
- uv := ev.ConvertToType(types.UintType)
- if !types.IsError(uv) {
- valueSet[uv] = types.True
- }
- case types.Uint:
- dv := ev.ConvertToType(types.DoubleType)
- if !types.IsError(dv) {
- valueSet[dv] = types.True
- }
- iv := ev.ConvertToType(types.IntType)
- if !types.IsError(iv) {
- valueSet[iv] = types.True
- }
- }
- }
- return &evalSetMembership{
- inst: inlist,
- arg: lhs,
- valueSet: valueSet,
- }, nil
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/etcd/vendor/github.com/google/cel-go/interpreter/dispatcher.go
deleted file mode 100644
index febf9d8a83..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/dispatcher.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "fmt"
-
- "github.com/google/cel-go/interpreter/functions"
-)
-
-// Dispatcher resolves function calls to their appropriate overload.
-type Dispatcher interface {
- // Add one or more overloads, returning an error if any Overload has the same Overload#Name.
- Add(overloads ...*functions.Overload) error
-
- // FindOverload returns an Overload definition matching the provided name.
- FindOverload(overload string) (*functions.Overload, bool)
-
- // OverloadIds returns the set of all overload identifiers configured for dispatch.
- OverloadIds() []string
-}
-
-// NewDispatcher returns an empty Dispatcher instance.
-func NewDispatcher() Dispatcher {
- return &defaultDispatcher{
- overloads: make(map[string]*functions.Overload)}
-}
-
-// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and
-// provides an isolation layer between built-ins and extension functions which is useful
-// for forward compatibility.
-func ExtendDispatcher(parent Dispatcher) Dispatcher {
- return &defaultDispatcher{
- parent: parent,
- overloads: make(map[string]*functions.Overload)}
-}
-
-// overloadMap helper type for indexing overloads by function name.
-type overloadMap map[string]*functions.Overload
-
-// defaultDispatcher struct which contains an overload map.
-type defaultDispatcher struct {
- parent Dispatcher
- overloads overloadMap
-}
-
-// Add implements the Dispatcher.Add interface method.
-func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error {
- for _, o := range overloads {
- // add the overload unless an overload of the same name has already been provided.
- if _, found := d.overloads[o.Operator]; found {
- return fmt.Errorf("overload already exists '%s'", o.Operator)
- }
- // index the overload by function name.
- d.overloads[o.Operator] = o
- }
- return nil
-}
-
-// FindOverload implements the Dispatcher.FindOverload interface method.
-func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) {
- o, found := d.overloads[overload]
- // Attempt to dispatch to an overload defined in the parent.
- if !found && d.parent != nil {
- return d.parent.FindOverload(overload)
- }
- return o, found
-}
-
-// OverloadIds implements the Dispatcher interface method.
-func (d *defaultDispatcher) OverloadIds() []string {
- i := 0
- overloads := make([]string, len(d.overloads))
- for name := range d.overloads {
- overloads[i] = name
- i++
- }
- if d.parent == nil {
- return overloads
- }
- parentOverloads := d.parent.OverloadIds()
- for _, pName := range parentOverloads {
- if _, found := d.overloads[pName]; !found {
- overloads = append(overloads, pName)
- }
- }
- return overloads
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/evalstate.go b/etcd/vendor/github.com/google/cel-go/interpreter/evalstate.go
deleted file mode 100644
index cc0d3e6f94..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/evalstate.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "github.com/google/cel-go/common/types/ref"
-)
-
-// EvalState tracks the values associated with expression ids during execution.
-type EvalState interface {
- // IDs returns the list of ids with recorded values.
- IDs() []int64
-
- // Value returns the observed value of the given expression id if found, and a nil false
- // result if not.
- Value(int64) (ref.Val, bool)
-
- // SetValue sets the observed value of the expression id.
- SetValue(int64, ref.Val)
-
- // Reset clears the previously recorded expression values.
- Reset()
-}
-
-// evalState permits the mutation of evaluation state for a given expression id.
-type evalState struct {
- values map[int64]ref.Val
-}
-
-// NewEvalState returns an EvalState instanced used to observe the intermediate
-// evaluations of an expression.
-func NewEvalState() EvalState {
- return &evalState{
- values: make(map[int64]ref.Val),
- }
-}
-
-// IDs implements the EvalState interface method.
-func (s *evalState) IDs() []int64 {
- var ids []int64
- for k, v := range s.values {
- if v != nil {
- ids = append(ids, k)
- }
- }
- return ids
-}
-
-// Value is an implementation of the EvalState interface method.
-func (s *evalState) Value(exprID int64) (ref.Val, bool) {
- val, found := s.values[exprID]
- return val, found
-}
-
-// SetValue is an implementation of the EvalState interface method.
-func (s *evalState) SetValue(exprID int64, val ref.Val) {
- s.values[exprID] = val
-}
-
-// Reset implements the EvalState interface method.
-func (s *evalState) Reset() {
- s.values = map[int64]ref.Val{}
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel
deleted file mode 100644
index 846d11bf47..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "functions.go",
- "standard.go",
- ],
- importpath = "github.com/google/cel-go/interpreter/functions",
- deps = [
- "//common/operators:go_default_library",
- "//common/overloads:go_default_library",
- "//common/types:go_default_library",
- "//common/types/ref:go_default_library",
- "//common/types/traits:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/etcd/vendor/github.com/google/cel-go/interpreter/functions/functions.go
deleted file mode 100644
index dd1e9ddd5f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/functions/functions.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package functions defines the standard builtin functions supported by the
-// interpreter and as declared within the checker#StandardDeclarations.
-package functions
-
-import "github.com/google/cel-go/common/types/ref"
-
-// Overload defines a named overload of a function, indicating an operand trait
-// which must be present on the first argument to the overload as well as one
-// of either a unary, binary, or function implementation.
-//
-// The majority of operators within the expression language are unary or binary
-// and the specializations simplify the call contract for implementers of
-// types with operator overloads. Any added complexity is assumed to be handled
-// by the generic FunctionOp.
-type Overload struct {
- // Operator name as written in an expression or defined within
- // operators.go.
- Operator string
-
- // Operand trait used to dispatch the call. The zero-value indicates a
- // global function overload or that one of the Unary / Binary / Function
- // definitions should be used to execute the call.
- OperandTrait int
-
- // Unary defines the overload with a UnaryOp implementation. May be nil.
- Unary UnaryOp
-
- // Binary defines the overload with a BinaryOp implementation. May be nil.
- Binary BinaryOp
-
- // Function defines the overload with a FunctionOp implementation. May be
- // nil.
- Function FunctionOp
-
- // NonStrict specifies whether the Overload will tolerate arguments that
- // are types.Err or types.Unknown.
- NonStrict bool
-}
-
-// UnaryOp is a function that takes a single value and produces an output.
-type UnaryOp func(value ref.Val) ref.Val
-
-// BinaryOp is a function that takes two values and produces an output.
-type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
-
-// FunctionOp is a function with accepts zero or more arguments and produces
-// an value (as interface{}) or error as a result.
-type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/functions/standard.go b/etcd/vendor/github.com/google/cel-go/interpreter/functions/standard.go
deleted file mode 100644
index 73e936114f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/functions/standard.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package functions
-
-import (
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-// StandardOverloads returns the definitions of the built-in overloads.
-func StandardOverloads() []*Overload {
- return []*Overload{
- // Logical not (!a)
- {
- Operator: operators.LogicalNot,
- OperandTrait: traits.NegatorType,
- Unary: func(value ref.Val) ref.Val {
- if !types.IsBool(value) {
- return types.ValOrErr(value, "no such overload")
- }
- return value.(traits.Negater).Negate()
- }},
- // Not strictly false: IsBool(a) ? a : true
- {
- Operator: operators.NotStrictlyFalse,
- Unary: notStrictlyFalse},
- // Deprecated: not strictly false, may be overridden in the environment.
- {
- Operator: operators.OldNotStrictlyFalse,
- Unary: notStrictlyFalse},
-
- // Less than operator
- {Operator: operators.Less,
- OperandTrait: traits.ComparerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- cmp := lhs.(traits.Comparer).Compare(rhs)
- if cmp == types.IntNegOne {
- return types.True
- }
- if cmp == types.IntOne || cmp == types.IntZero {
- return types.False
- }
- return cmp
- }},
-
- // Less than or equal operator
- {Operator: operators.LessEquals,
- OperandTrait: traits.ComparerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- cmp := lhs.(traits.Comparer).Compare(rhs)
- if cmp == types.IntNegOne || cmp == types.IntZero {
- return types.True
- }
- if cmp == types.IntOne {
- return types.False
- }
- return cmp
- }},
-
- // Greater than operator
- {Operator: operators.Greater,
- OperandTrait: traits.ComparerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- cmp := lhs.(traits.Comparer).Compare(rhs)
- if cmp == types.IntOne {
- return types.True
- }
- if cmp == types.IntNegOne || cmp == types.IntZero {
- return types.False
- }
- return cmp
- }},
-
- // Greater than equal operators
- {Operator: operators.GreaterEquals,
- OperandTrait: traits.ComparerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- cmp := lhs.(traits.Comparer).Compare(rhs)
- if cmp == types.IntOne || cmp == types.IntZero {
- return types.True
- }
- if cmp == types.IntNegOne {
- return types.False
- }
- return cmp
- }},
-
- // Add operator
- {Operator: operators.Add,
- OperandTrait: traits.AdderType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Adder).Add(rhs)
- }},
-
- // Subtract operators
- {Operator: operators.Subtract,
- OperandTrait: traits.SubtractorType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Subtractor).Subtract(rhs)
- }},
-
- // Multiply operator
- {Operator: operators.Multiply,
- OperandTrait: traits.MultiplierType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Multiplier).Multiply(rhs)
- }},
-
- // Divide operator
- {Operator: operators.Divide,
- OperandTrait: traits.DividerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Divider).Divide(rhs)
- }},
-
- // Modulo operator
- {Operator: operators.Modulo,
- OperandTrait: traits.ModderType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Modder).Modulo(rhs)
- }},
-
- // Negate operator
- {Operator: operators.Negate,
- OperandTrait: traits.NegatorType,
- Unary: func(value ref.Val) ref.Val {
- if types.IsBool(value) {
- return types.ValOrErr(value, "no such overload")
- }
- return value.(traits.Negater).Negate()
- }},
-
- // Index operator
- {Operator: operators.Index,
- OperandTrait: traits.IndexerType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Indexer).Get(rhs)
- }},
-
- // Size function
- {Operator: overloads.Size,
- OperandTrait: traits.SizerType,
- Unary: func(value ref.Val) ref.Val {
- return value.(traits.Sizer).Size()
- }},
-
- // In operator
- {Operator: operators.In, Binary: inAggregate},
- // Deprecated: in operator, may be overridden in the environment.
- {Operator: operators.OldIn, Binary: inAggregate},
-
- // Matches function
- {Operator: overloads.Matches,
- OperandTrait: traits.MatcherType,
- Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
- return lhs.(traits.Matcher).Match(rhs)
- }},
-
- // Type conversion functions
- // TODO: verify type conversion safety of numeric values.
-
- // Int conversions.
- {Operator: overloads.TypeConvertInt,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.IntType)
- }},
-
- // Uint conversions.
- {Operator: overloads.TypeConvertUint,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.UintType)
- }},
-
- // Double conversions.
- {Operator: overloads.TypeConvertDouble,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.DoubleType)
- }},
-
- // Bool conversions.
- {Operator: overloads.TypeConvertBool,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.BoolType)
- }},
-
- // Bytes conversions.
- {Operator: overloads.TypeConvertBytes,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.BytesType)
- }},
-
- // String conversions.
- {Operator: overloads.TypeConvertString,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.StringType)
- }},
-
- // Timestamp conversions.
- {Operator: overloads.TypeConvertTimestamp,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.TimestampType)
- }},
-
- // Duration conversions.
- {Operator: overloads.TypeConvertDuration,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.DurationType)
- }},
-
- // Type operations.
- {Operator: overloads.TypeConvertType,
- Unary: func(value ref.Val) ref.Val {
- return value.ConvertToType(types.TypeType)
- }},
-
- // Dyn conversion (identity function).
- {Operator: overloads.TypeConvertDyn,
- Unary: func(value ref.Val) ref.Val {
- return value
- }},
-
- {Operator: overloads.Iterator,
- OperandTrait: traits.IterableType,
- Unary: func(value ref.Val) ref.Val {
- return value.(traits.Iterable).Iterator()
- }},
-
- {Operator: overloads.HasNext,
- OperandTrait: traits.IteratorType,
- Unary: func(value ref.Val) ref.Val {
- return value.(traits.Iterator).HasNext()
- }},
-
- {Operator: overloads.Next,
- OperandTrait: traits.IteratorType,
- Unary: func(value ref.Val) ref.Val {
- return value.(traits.Iterator).Next()
- }},
- }
-
-}
-
-func notStrictlyFalse(value ref.Val) ref.Val {
- if types.IsBool(value) {
- return value
- }
- return types.True
-}
-
-func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
- if rhs.Type().HasTrait(traits.ContainerType) {
- return rhs.(traits.Container).Contains(lhs)
- }
- return types.ValOrErr(rhs, "no such overload")
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/interpretable.go b/etcd/vendor/github.com/google/cel-go/interpreter/interpretable.go
deleted file mode 100644
index 4fdd12028b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/interpretable.go
+++ /dev/null
@@ -1,1230 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "math"
-
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
- "github.com/google/cel-go/interpreter/functions"
-)
-
-// Interpretable can accept a given Activation and produce a value along with
-// an accompanying EvalState which can be used to inspect whether additional
-// data might be necessary to complete the evaluation.
-type Interpretable interface {
- // ID value corresponding to the expression node.
- ID() int64
-
- // Eval an Activation to produce an output.
- Eval(activation Activation) ref.Val
-}
-
-// InterpretableConst interface for tracking whether the Interpretable is a constant value.
-type InterpretableConst interface {
- Interpretable
-
- // Value returns the constant value of the instruction.
- Value() ref.Val
-}
-
-// InterpretableAttribute interface for tracking whether the Interpretable is an attribute.
-type InterpretableAttribute interface {
- Interpretable
-
- // Attr returns the Attribute value.
- Attr() Attribute
-
- // Adapter returns the type adapter to be used for adapting resolved Attribute values.
- Adapter() ref.TypeAdapter
-
- // AddQualifier proxies the Attribute.AddQualifier method.
- //
- // Note, this method may mutate the current attribute state. If the desire is to clone the
- // Attribute, the Attribute should first be copied before adding the qualifier. Attributes
- // are not copyable by default, so this is a capable that would need to be added to the
- // AttributeFactory or specifically to the underlying Attribute implementation.
- AddQualifier(Qualifier) (Attribute, error)
-
- // Qualify replicates the Attribute.Qualify method to permit extension and interception
- // of object qualification.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
-
- // Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
-}
-
-// InterpretableCall interface for inspecting Interpretable instructions related to function calls.
-type InterpretableCall interface {
- Interpretable
-
- // Function returns the function name as it appears in text or mangled operator name as it
- // appears in the operators.go file.
- Function() string
-
- // OverloadID returns the overload id associated with the function specialization.
- // Overload ids are stable across language boundaries and can be treated as synonymous with a
- // unique function signature.
- OverloadID() string
-
- // Args returns the normalized arguments to the function overload.
- // For receiver-style functions, the receiver target is arg 0.
- Args() []Interpretable
-}
-
-// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map
-// or struct.
-type InterpretableConstructor interface {
- Interpretable
-
- // InitVals returns all the list elements, map key and values or struct field values.
- InitVals() []Interpretable
-
- // Type returns the type constructed.
- Type() ref.Type
-}
-
-// Core Interpretable implementations used during the program planning phase.
-
-type evalTestOnly struct {
- id int64
- op Interpretable
- field types.String
- fieldType *ref.FieldType
-}
-
-// ID implements the Interpretable interface method.
-func (test *evalTestOnly) ID() int64 {
- return test.id
-}
-
-// Eval implements the Interpretable interface method.
-func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
- // Handle field selection on a proto in the most efficient way possible.
- if test.fieldType != nil {
- opAttr, ok := test.op.(InterpretableAttribute)
- if ok {
- opVal, err := opAttr.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- refVal, ok := opVal.(ref.Val)
- if ok {
- opVal = refVal.Value()
- }
- if test.fieldType.IsSet(opVal) {
- return types.True
- }
- return types.False
- }
- }
-
- obj := test.op.Eval(ctx)
- tester, ok := obj.(traits.FieldTester)
- if ok {
- return tester.IsSet(test.field)
- }
- container, ok := obj.(traits.Container)
- if ok {
- return container.Contains(test.field)
- }
- return types.ValOrErr(obj, "invalid type for field selection.")
-}
-
-// Cost provides the heuristic cost of a `has(field)` macro. The cost has at least 1 for determining
-// if the field exists, apart from the cost of accessing the field.
-func (test *evalTestOnly) Cost() (min, max int64) {
- min, max = estimateCost(test.op)
- min++
- max++
- return
-}
-
-// NewConstValue creates a new constant valued Interpretable.
-func NewConstValue(id int64, val ref.Val) InterpretableConst {
- return &evalConst{
- id: id,
- val: val,
- }
-}
-
-type evalConst struct {
- id int64
- val ref.Val
-}
-
-// ID implements the Interpretable interface method.
-func (cons *evalConst) ID() int64 {
- return cons.id
-}
-
-// Eval implements the Interpretable interface method.
-func (cons *evalConst) Eval(ctx Activation) ref.Val {
- return cons.val
-}
-
-// Cost returns zero for a constant valued Interpretable.
-func (cons *evalConst) Cost() (min, max int64) {
- return 0, 0
-}
-
-// Value implements the InterpretableConst interface method.
-func (cons *evalConst) Value() ref.Val {
- return cons.val
-}
-
-type evalOr struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (or *evalOr) ID() int64 {
- return or.id
-}
-
-// Eval implements the Interpretable interface method.
-func (or *evalOr) Eval(ctx Activation) ref.Val {
- // short-circuit lhs.
- lVal := or.lhs.Eval(ctx)
- lBool, lok := lVal.(types.Bool)
- if lok && lBool == types.True {
- return types.True
- }
- // short-circuit on rhs.
- rVal := or.rhs.Eval(ctx)
- rBool, rok := rVal.(types.Bool)
- if rok && rBool == types.True {
- return types.True
- }
- // return if both sides are bool false.
- if lok && rok {
- return types.False
- }
- // TODO: return both values as a set if both are unknown or error.
- // prefer left unknown to right unknown.
- if types.IsUnknown(lVal) {
- return lVal
- }
- if types.IsUnknown(rVal) {
- return rVal
- }
- // If the left-hand side is non-boolean return it as the error.
- if types.IsError(lVal) {
- return lVal
- }
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (or *evalOr) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(or.lhs, or.rhs)
-}
-
-type evalAnd struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (and *evalAnd) ID() int64 {
- return and.id
-}
-
-// Eval implements the Interpretable interface method.
-func (and *evalAnd) Eval(ctx Activation) ref.Val {
- // short-circuit lhs.
- lVal := and.lhs.Eval(ctx)
- lBool, lok := lVal.(types.Bool)
- if lok && lBool == types.False {
- return types.False
- }
- // short-circuit on rhs.
- rVal := and.rhs.Eval(ctx)
- rBool, rok := rVal.(types.Bool)
- if rok && rBool == types.False {
- return types.False
- }
- // return if both sides are bool true.
- if lok && rok {
- return types.True
- }
- // TODO: return both values as a set if both are unknown or error.
- // prefer left unknown to right unknown.
- if types.IsUnknown(lVal) {
- return lVal
- }
- if types.IsUnknown(rVal) {
- return rVal
- }
- // If the left-hand side is non-boolean return it as the error.
- if types.IsError(lVal) {
- return lVal
- }
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (and *evalAnd) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calShortCircuitBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- _, rMax := estimateCost(rhs)
- return lMin, lMax + rMax + 1
-}
-
-type evalEq struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (eq *evalEq) ID() int64 {
- return eq.id
-}
-
-// Eval implements the Interpretable interface method.
-func (eq *evalEq) Eval(ctx Activation) ref.Val {
- lVal := eq.lhs.Eval(ctx)
- rVal := eq.rhs.Eval(ctx)
- if types.IsUnknownOrError(lVal) {
- return lVal
- }
- if types.IsUnknownOrError(rVal) {
- return rVal
- }
- return types.Equal(lVal, rVal)
-}
-
-// Cost implements the Coster interface method.
-func (eq *evalEq) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(eq.lhs, eq.rhs)
-}
-
-// Function implements the InterpretableCall interface method.
-func (*evalEq) Function() string {
- return operators.Equals
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (*evalEq) OverloadID() string {
- return overloads.Equals
-}
-
-// Args implements the InterpretableCall interface method.
-func (eq *evalEq) Args() []Interpretable {
- return []Interpretable{eq.lhs, eq.rhs}
-}
-
-type evalNe struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (ne *evalNe) ID() int64 {
- return ne.id
-}
-
-// Eval implements the Interpretable interface method.
-func (ne *evalNe) Eval(ctx Activation) ref.Val {
- lVal := ne.lhs.Eval(ctx)
- rVal := ne.rhs.Eval(ctx)
- if types.IsUnknownOrError(lVal) {
- return lVal
- }
- if types.IsUnknownOrError(rVal) {
- return rVal
- }
- return types.Bool(types.Equal(lVal, rVal) != types.True)
-}
-
-// Cost implements the Coster interface method.
-func (ne *evalNe) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(ne.lhs, ne.rhs)
-}
-
-// Function implements the InterpretableCall interface method.
-func (*evalNe) Function() string {
- return operators.NotEquals
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (*evalNe) OverloadID() string {
- return overloads.NotEquals
-}
-
-// Args implements the InterpretableCall interface method.
-func (ne *evalNe) Args() []Interpretable {
- return []Interpretable{ne.lhs, ne.rhs}
-}
-
-type evalZeroArity struct {
- id int64
- function string
- overload string
- impl functions.FunctionOp
-}
-
-// ID implements the Interpretable interface method.
-func (zero *evalZeroArity) ID() int64 {
- return zero.id
-}
-
-// Eval implements the Interpretable interface method.
-func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
- return zero.impl()
-}
-
-// Cost returns 1 representing the heuristic cost of the function.
-func (zero *evalZeroArity) Cost() (min, max int64) {
- return 1, 1
-}
-
-// Function implements the InterpretableCall interface method.
-func (zero *evalZeroArity) Function() string {
- return zero.function
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (zero *evalZeroArity) OverloadID() string {
- return zero.overload
-}
-
-// Args returns the argument to the unary function.
-func (zero *evalZeroArity) Args() []Interpretable {
- return []Interpretable{}
-}
-
-type evalUnary struct {
- id int64
- function string
- overload string
- arg Interpretable
- trait int
- impl functions.UnaryOp
- nonStrict bool
-}
-
-// ID implements the Interpretable interface method.
-func (un *evalUnary) ID() int64 {
- return un.id
-}
-
-// Eval implements the Interpretable interface method.
-func (un *evalUnary) Eval(ctx Activation) ref.Val {
- argVal := un.arg.Eval(ctx)
- // Early return if the argument to the function is unknown or error.
- strict := !un.nonStrict
- if strict && types.IsUnknownOrError(argVal) {
- return argVal
- }
- // If the implementation is bound and the argument value has the right traits required to
- // invoke it, then call the implementation.
- if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) {
- return un.impl(argVal)
- }
- // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
- // operand (arg0).
- if argVal.Type().HasTrait(traits.ReceiverType) {
- return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})
- }
- return types.NewErr("no such overload: %s", un.function)
-}
-
-// Cost implements the Coster interface method.
-func (un *evalUnary) Cost() (min, max int64) {
- min, max = estimateCost(un.arg)
- min++ // add cost for function
- max++
- return
-}
-
-// Function implements the InterpretableCall interface method.
-func (un *evalUnary) Function() string {
- return un.function
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (un *evalUnary) OverloadID() string {
- return un.overload
-}
-
-// Args returns the argument to the unary function.
-func (un *evalUnary) Args() []Interpretable {
- return []Interpretable{un.arg}
-}
-
-type evalBinary struct {
- id int64
- function string
- overload string
- lhs Interpretable
- rhs Interpretable
- trait int
- impl functions.BinaryOp
- nonStrict bool
-}
-
-// ID implements the Interpretable interface method.
-func (bin *evalBinary) ID() int64 {
- return bin.id
-}
-
-// Eval implements the Interpretable interface method.
-func (bin *evalBinary) Eval(ctx Activation) ref.Val {
- lVal := bin.lhs.Eval(ctx)
- rVal := bin.rhs.Eval(ctx)
- // Early return if any argument to the function is unknown or error.
- strict := !bin.nonStrict
- if strict {
- if types.IsUnknownOrError(lVal) {
- return lVal
- }
- if types.IsUnknownOrError(rVal) {
- return rVal
- }
- }
- // If the implementation is bound and the argument value has the right traits required to
- // invoke it, then call the implementation.
- if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) {
- return bin.impl(lVal, rVal)
- }
- // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
- // operand (arg0).
- if lVal.Type().HasTrait(traits.ReceiverType) {
- return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})
- }
- return types.NewErr("no such overload: %s", bin.function)
-}
-
-// Cost implements the Coster interface method.
-func (bin *evalBinary) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(bin.lhs, bin.rhs)
-}
-
-// Function implements the InterpretableCall interface method.
-func (bin *evalBinary) Function() string {
- return bin.function
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (bin *evalBinary) OverloadID() string {
- return bin.overload
-}
-
-// Args returns the argument to the unary function.
-func (bin *evalBinary) Args() []Interpretable {
- return []Interpretable{bin.lhs, bin.rhs}
-}
-
-type evalVarArgs struct {
- id int64
- function string
- overload string
- args []Interpretable
- trait int
- impl functions.FunctionOp
- nonStrict bool
-}
-
-// NewCall creates a new call Interpretable.
-func NewCall(id int64, function, overload string, args []Interpretable, impl functions.FunctionOp) InterpretableCall {
- return &evalVarArgs{
- id: id,
- function: function,
- overload: overload,
- args: args,
- impl: impl,
- }
-}
-
-// ID implements the Interpretable interface method.
-func (fn *evalVarArgs) ID() int64 {
- return fn.id
-}
-
-// Eval implements the Interpretable interface method.
-func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
- argVals := make([]ref.Val, len(fn.args))
- // Early return if any argument to the function is unknown or error.
- strict := !fn.nonStrict
- for i, arg := range fn.args {
- argVals[i] = arg.Eval(ctx)
- if strict && types.IsUnknownOrError(argVals[i]) {
- return argVals[i]
- }
- }
- // If the implementation is bound and the argument value has the right traits required to
- // invoke it, then call the implementation.
- arg0 := argVals[0]
- if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) {
- return fn.impl(argVals...)
- }
- // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
- // operand (arg0).
- if arg0.Type().HasTrait(traits.ReceiverType) {
- return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])
- }
- return types.NewErr("no such overload: %s", fn.function)
-}
-
-// Cost implements the Coster interface method.
-func (fn *evalVarArgs) Cost() (min, max int64) {
- min, max = sumOfCost(fn.args)
- min++ // add cost for function
- max++
- return
-}
-
-// Function implements the InterpretableCall interface method.
-func (fn *evalVarArgs) Function() string {
- return fn.function
-}
-
-// OverloadID implements the InterpretableCall interface method.
-func (fn *evalVarArgs) OverloadID() string {
- return fn.overload
-}
-
-// Args returns the argument to the unary function.
-func (fn *evalVarArgs) Args() []Interpretable {
- return fn.args
-}
-
-type evalList struct {
- id int64
- elems []Interpretable
- adapter ref.TypeAdapter
-}
-
-// ID implements the Interpretable interface method.
-func (l *evalList) ID() int64 {
- return l.id
-}
-
-// Eval implements the Interpretable interface method.
-func (l *evalList) Eval(ctx Activation) ref.Val {
- elemVals := make([]ref.Val, len(l.elems))
- // If any argument is unknown or error early terminate.
- for i, elem := range l.elems {
- elemVal := elem.Eval(ctx)
- if types.IsUnknownOrError(elemVal) {
- return elemVal
- }
- elemVals[i] = elemVal
- }
- return l.adapter.NativeToValue(elemVals)
-}
-
-func (l *evalList) InitVals() []Interpretable {
- return l.elems
-}
-
-func (l *evalList) Type() ref.Type {
- return types.ListType
-}
-
-// Cost implements the Coster interface method.
-func (l *evalList) Cost() (min, max int64) {
- return sumOfCost(l.elems)
-}
-
-type evalMap struct {
- id int64
- keys []Interpretable
- vals []Interpretable
- adapter ref.TypeAdapter
-}
-
-// ID implements the Interpretable interface method.
-func (m *evalMap) ID() int64 {
- return m.id
-}
-
-// Eval implements the Interpretable interface method.
-func (m *evalMap) Eval(ctx Activation) ref.Val {
- entries := make(map[ref.Val]ref.Val)
- // If any argument is unknown or error early terminate.
- for i, key := range m.keys {
- keyVal := key.Eval(ctx)
- if types.IsUnknownOrError(keyVal) {
- return keyVal
- }
- valVal := m.vals[i].Eval(ctx)
- if types.IsUnknownOrError(valVal) {
- return valVal
- }
- entries[keyVal] = valVal
- }
- return m.adapter.NativeToValue(entries)
-}
-
-func (m *evalMap) InitVals() []Interpretable {
- if len(m.keys) != len(m.vals) {
- return nil
- }
- result := make([]Interpretable, len(m.keys)+len(m.vals))
- idx := 0
- for i, k := range m.keys {
- v := m.vals[i]
- result[idx] = k
- idx++
- result[idx] = v
- idx++
- }
- return result
-}
-
-func (m *evalMap) Type() ref.Type {
- return types.MapType
-}
-
-// Cost implements the Coster interface method.
-func (m *evalMap) Cost() (min, max int64) {
- kMin, kMax := sumOfCost(m.keys)
- vMin, vMax := sumOfCost(m.vals)
- return kMin + vMin, kMax + vMax
-}
-
-type evalObj struct {
- id int64
- typeName string
- fields []string
- vals []Interpretable
- provider ref.TypeProvider
-}
-
-// ID implements the Interpretable interface method.
-func (o *evalObj) ID() int64 {
- return o.id
-}
-
-// Eval implements the Interpretable interface method.
-func (o *evalObj) Eval(ctx Activation) ref.Val {
- fieldVals := make(map[string]ref.Val)
- // If any argument is unknown or error early terminate.
- for i, field := range o.fields {
- val := o.vals[i].Eval(ctx)
- if types.IsUnknownOrError(val) {
- return val
- }
- fieldVals[field] = val
- }
- return o.provider.NewValue(o.typeName, fieldVals)
-}
-
-func (o *evalObj) InitVals() []Interpretable {
- return o.vals
-}
-
-func (o *evalObj) Type() ref.Type {
- return types.NewObjectTypeValue(o.typeName)
-}
-
-// Cost implements the Coster interface method.
-func (o *evalObj) Cost() (min, max int64) {
- return sumOfCost(o.vals)
-}
-
-func sumOfCost(interps []Interpretable) (min, max int64) {
- min, max = 0, 0
- for _, in := range interps {
- minT, maxT := estimateCost(in)
- min += minT
- max += maxT
- }
- return
-}
-
-type evalFold struct {
- id int64
- accuVar string
- iterVar string
- iterRange Interpretable
- accu Interpretable
- cond Interpretable
- step Interpretable
- result Interpretable
- adapter ref.TypeAdapter
- exhaustive bool
- interruptable bool
-}
-
-// ID implements the Interpretable interface method.
-func (fold *evalFold) ID() int64 {
- return fold.id
-}
-
-// Eval implements the Interpretable interface method.
-func (fold *evalFold) Eval(ctx Activation) ref.Val {
- foldRange := fold.iterRange.Eval(ctx)
- if !foldRange.Type().HasTrait(traits.IterableType) {
- return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
- }
- // Configure the fold activation with the accumulator initial value.
- accuCtx := varActivationPool.Get().(*varActivation)
- accuCtx.parent = ctx
- accuCtx.name = fold.accuVar
- accuCtx.val = fold.accu.Eval(ctx)
- // If the accumulator starts as an empty list, then the comprehension will build a list
- // so create a mutable list to optimize the cost of the inner loop.
- l, ok := accuCtx.val.(traits.Lister)
- buildingList := false
- if !fold.exhaustive && ok && l.Size() == types.IntZero {
- buildingList = true
- accuCtx.val = types.NewMutableList(fold.adapter)
- }
- iterCtx := varActivationPool.Get().(*varActivation)
- iterCtx.parent = accuCtx
- iterCtx.name = fold.iterVar
-
- interrupted := false
- it := foldRange.(traits.Iterable).Iterator()
- for it.HasNext() == types.True {
- // Modify the iter var in the fold activation.
- iterCtx.val = it.Next()
-
- // Evaluate the condition, terminate the loop if false.
- cond := fold.cond.Eval(iterCtx)
- condBool, ok := cond.(types.Bool)
- if !fold.exhaustive && ok && condBool != types.True {
- break
- }
- // Evaluate the evaluation step into accu var.
- accuCtx.val = fold.step.Eval(iterCtx)
- if fold.interruptable {
- if stop, found := ctx.ResolveName("#interrupted"); found && stop == true {
- interrupted = true
- break
- }
- }
- }
- varActivationPool.Put(iterCtx)
- if interrupted {
- varActivationPool.Put(accuCtx)
- return types.NewErr("operation interrupted")
- }
-
- // Compute the result.
- res := fold.result.Eval(accuCtx)
- varActivationPool.Put(accuCtx)
- // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result.
- if !types.IsUnknownOrError(res) && buildingList {
- if _, ok := res.(traits.MutableLister); ok {
- res = res.(traits.MutableLister).ToImmutableList()
- }
- }
- return res
-}
-
-// Cost implements the Coster interface method.
-func (fold *evalFold) Cost() (min, max int64) {
- // Compute the cost for evaluating iterRange.
- iMin, iMax := estimateCost(fold.iterRange)
-
- // Compute the size of iterRange. If the size depends on the input, return the maximum possible
- // cost range.
- foldRange := fold.iterRange.Eval(EmptyActivation())
- if !foldRange.Type().HasTrait(traits.IterableType) {
- return 0, math.MaxInt64
- }
- var rangeCnt int64
- it := foldRange.(traits.Iterable).Iterator()
- for it.HasNext() == types.True {
- it.Next()
- rangeCnt++
- }
- aMin, aMax := estimateCost(fold.accu)
- cMin, cMax := estimateCost(fold.cond)
- sMin, sMax := estimateCost(fold.step)
- rMin, rMax := estimateCost(fold.result)
- if fold.exhaustive {
- cMin = cMin * rangeCnt
- sMin = sMin * rangeCnt
- }
-
- // The cond and step costs are multiplied by size(iterRange). The minimum possible cost incurs
- // when the evaluation result can be determined by the first iteration.
- return iMin + aMin + cMin + sMin + rMin,
- iMax + aMax + cMax*rangeCnt + sMax*rangeCnt + rMax
-}
-
-// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
-// plan via decorators.
-
-// evalSetMembership is an Interpretable implementation which tests whether an input value
-// exists within the set of map keys used to model a set.
-type evalSetMembership struct {
- inst Interpretable
- arg Interpretable
- valueSet map[ref.Val]ref.Val
-}
-
-// ID implements the Interpretable interface method.
-func (e *evalSetMembership) ID() int64 {
- return e.inst.ID()
-}
-
-// Eval implements the Interpretable interface method.
-func (e *evalSetMembership) Eval(ctx Activation) ref.Val {
- val := e.arg.Eval(ctx)
- if ret, found := e.valueSet[val]; found {
- return ret
- }
- return types.False
-}
-
-// Cost implements the Coster interface method.
-func (e *evalSetMembership) Cost() (min, max int64) {
- return estimateCost(e.arg)
-}
-
-// evalWatch is an Interpretable implementation that wraps the execution of a given
-// expression so that it may observe the computed value and send it to an observer.
-type evalWatch struct {
- Interpretable
- observer EvalObserver
-}
-
-// Eval implements the Interpretable interface method.
-func (e *evalWatch) Eval(ctx Activation) ref.Val {
- val := e.Interpretable.Eval(ctx)
- e.observer(e.ID(), e.Interpretable, val)
- return val
-}
-
-// Cost implements the Coster interface method.
-func (e *evalWatch) Cost() (min, max int64) {
- return estimateCost(e.Interpretable)
-}
-
-// evalWatchAttr describes a watcher of an instAttr Interpretable.
-//
-// Since the watcher may be selected against at a later stage in program planning, the watcher
-// must implement the instAttr interface by proxy.
-type evalWatchAttr struct {
- InterpretableAttribute
- observer EvalObserver
-}
-
-// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification
-// result.
-func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) {
- cq, isConst := q.(ConstantQualifier)
- if isConst {
- q = &evalWatchConstQual{
- ConstantQualifier: cq,
- observer: e.observer,
- adapter: e.InterpretableAttribute.Adapter(),
- }
- } else {
- q = &evalWatchQual{
- Qualifier: q,
- observer: e.observer,
- adapter: e.InterpretableAttribute.Adapter(),
- }
- }
- _, err := e.InterpretableAttribute.AddQualifier(q)
- return e, err
-}
-
-// Cost implements the Coster interface method.
-func (e *evalWatchAttr) Cost() (min, max int64) {
- return estimateCost(e.InterpretableAttribute)
-}
-
-// Eval implements the Interpretable interface method.
-func (e *evalWatchAttr) Eval(vars Activation) ref.Val {
- val := e.InterpretableAttribute.Eval(vars)
- e.observer(e.ID(), e.InterpretableAttribute, val)
- return val
-}
-
-// evalWatchConstQual observes the qualification of an object using a constant boolean, int,
-// string, or uint.
-type evalWatchConstQual struct {
- ConstantQualifier
- observer EvalObserver
- adapter ref.TypeAdapter
-}
-
-// Cost implements the Coster interface method.
-func (e *evalWatchConstQual) Cost() (min, max int64) {
- return estimateCost(e.ConstantQualifier)
-}
-
-// Qualify observes the qualification of a object via a constant boolean, int, string, or uint.
-func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- out, err := e.ConstantQualifier.Qualify(vars, obj)
- var val ref.Val
- if err != nil {
- val = types.NewErr(err.Error())
- } else {
- val = e.adapter.NativeToValue(out)
- }
- e.observer(e.ID(), e.ConstantQualifier, val)
- return out, err
-}
-
-// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant.
-func (e *evalWatchConstQual) QualifierValueEquals(value interface{}) bool {
- qve, ok := e.ConstantQualifier.(qualifierValueEquator)
- return ok && qve.QualifierValueEquals(value)
-}
-
-// evalWatchQual observes the qualification of an object by a value computed at runtime.
-type evalWatchQual struct {
- Qualifier
- observer EvalObserver
- adapter ref.TypeAdapter
-}
-
-// Cost implements the Coster interface method.
-func (e *evalWatchQual) Cost() (min, max int64) {
- return estimateCost(e.Qualifier)
-}
-
-// Qualify observes the qualification of a object via a value computed at runtime.
-func (e *evalWatchQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- out, err := e.Qualifier.Qualify(vars, obj)
- var val ref.Val
- if err != nil {
- val = types.NewErr(err.Error())
- } else {
- val = e.adapter.NativeToValue(out)
- }
- e.observer(e.ID(), e.Qualifier, val)
- return out, err
-}
-
-// evalWatchConst describes a watcher of an instConst Interpretable.
-type evalWatchConst struct {
- InterpretableConst
- observer EvalObserver
-}
-
-// Eval implements the Interpretable interface method.
-func (e *evalWatchConst) Eval(vars Activation) ref.Val {
- val := e.Value()
- e.observer(e.ID(), e.InterpretableConst, val)
- return val
-}
-
-// Cost implements the Coster interface method.
-func (e *evalWatchConst) Cost() (min, max int64) {
- return estimateCost(e.InterpretableConst)
-}
-
-// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
-type evalExhaustiveOr struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (or *evalExhaustiveOr) ID() int64 {
- return or.id
-}
-
-// Eval implements the Interpretable interface method.
-func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
- lVal := or.lhs.Eval(ctx)
- rVal := or.rhs.Eval(ctx)
- lBool, lok := lVal.(types.Bool)
- if lok && lBool == types.True {
- return types.True
- }
- rBool, rok := rVal.(types.Bool)
- if rok && rBool == types.True {
- return types.True
- }
- if lok && rok {
- return types.False
- }
- if types.IsUnknown(lVal) {
- return lVal
- }
- if types.IsUnknown(rVal) {
- return rVal
- }
- // TODO: Combine the errors into a set in the future.
- // If the left-hand side is non-boolean return it as the error.
- if types.IsError(lVal) {
- return lVal
- }
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (or *evalExhaustiveOr) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(or.lhs, or.rhs)
-}
-
-// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
-type evalExhaustiveAnd struct {
- id int64
- lhs Interpretable
- rhs Interpretable
-}
-
-// ID implements the Interpretable interface method.
-func (and *evalExhaustiveAnd) ID() int64 {
- return and.id
-}
-
-// Eval implements the Interpretable interface method.
-func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
- lVal := and.lhs.Eval(ctx)
- rVal := and.rhs.Eval(ctx)
- lBool, lok := lVal.(types.Bool)
- if lok && lBool == types.False {
- return types.False
- }
- rBool, rok := rVal.(types.Bool)
- if rok && rBool == types.False {
- return types.False
- }
- if lok && rok {
- return types.True
- }
- if types.IsUnknown(lVal) {
- return lVal
- }
- if types.IsUnknown(rVal) {
- return rVal
- }
- // TODO: Combine the errors into a set in the future.
- // If the left-hand side is non-boolean return it as the error.
- if types.IsError(lVal) {
- return lVal
- }
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (and *evalExhaustiveAnd) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calExhaustiveBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- rMin, rMax := estimateCost(rhs)
- return lMin + rMin + 1, lMax + rMax + 1
-}
-
-// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
-// evaluation.
-type evalExhaustiveConditional struct {
- id int64
- adapter ref.TypeAdapter
- attr *conditionalAttribute
-}
-
-// ID implements the Interpretable interface method.
-func (cond *evalExhaustiveConditional) ID() int64 {
- return cond.id
-}
-
-// Eval implements the Interpretable interface method.
-func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
- cVal := cond.attr.expr.Eval(ctx)
- tVal, err := cond.attr.truthy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- fVal, err := cond.attr.falsy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- cBool, ok := cVal.(types.Bool)
- if !ok {
- return types.ValOrErr(cVal, "no such overload")
- }
- if cBool {
- return cond.adapter.NativeToValue(tVal)
- }
- return cond.adapter.NativeToValue(fVal)
-}
-
-// Cost implements the Coster interface method.
-func (cond *evalExhaustiveConditional) Cost() (min, max int64) {
- return cond.attr.Cost()
-}
-
-// evalAttr evaluates an Attribute value.
-type evalAttr struct {
- adapter ref.TypeAdapter
- attr Attribute
-}
-
-// ID of the attribute instruction.
-func (a *evalAttr) ID() int64 {
- return a.attr.ID()
-}
-
-// AddQualifier implements the instAttr interface method.
-func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) {
- attr, err := a.attr.AddQualifier(qual)
- a.attr = attr
- return attr, err
-}
-
-// Attr implements the instAttr interface method.
-func (a *evalAttr) Attr() Attribute {
- return a.attr
-}
-
-// Adapter implements the instAttr interface method.
-func (a *evalAttr) Adapter() ref.TypeAdapter {
- return a.adapter
-}
-
-// Cost implements the Coster interface method.
-func (a *evalAttr) Cost() (min, max int64) {
- return estimateCost(a.attr)
-}
-
-// Eval implements the Interpretable interface method.
-func (a *evalAttr) Eval(ctx Activation) ref.Val {
- v, err := a.attr.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return a.adapter.NativeToValue(v)
-}
-
-// Qualify proxies to the Attribute's Qualify method.
-func (a *evalAttr) Qualify(ctx Activation, obj interface{}) (interface{}, error) {
- return a.attr.Qualify(ctx, obj)
-}
-
-// Resolve proxies to the Attribute's Resolve method.
-func (a *evalAttr) Resolve(ctx Activation) (interface{}, error) {
- return a.attr.Resolve(ctx)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/interpreter.go b/etcd/vendor/github.com/google/cel-go/interpreter/interpreter.go
deleted file mode 100644
index b3fd14f8b3..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/interpreter.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package interpreter provides functions to evaluate parsed expressions with
-// the option to augment the evaluation with inputs and functions supplied at
-// evaluation time.
-package interpreter
-
-import (
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/interpreter/functions"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Interpreter generates a new Interpretable from a checked or unchecked expression.
-type Interpreter interface {
- // NewInterpretable creates an Interpretable from a checked expression and an
- // optional list of InterpretableDecorator values.
- NewInterpretable(checked *exprpb.CheckedExpr,
- decorators ...InterpretableDecorator) (Interpretable, error)
-
- // NewUncheckedInterpretable returns an Interpretable from a parsed expression
- // and an optional list of InterpretableDecorator values.
- NewUncheckedInterpretable(expr *exprpb.Expr,
- decorators ...InterpretableDecorator) (Interpretable, error)
-}
-
-// EvalObserver is a functional interface that accepts an expression id and an observed value.
-// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that
-// was evaluated and value is the result of the evaluation.
-type EvalObserver func(id int64, programStep interface{}, value ref.Val)
-
-// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable
-// or Qualifier during program evaluation.
-func Observe(observers ...EvalObserver) InterpretableDecorator {
- if len(observers) == 1 {
- return decObserveEval(observers[0])
- }
- observeFn := func(id int64, programStep interface{}, val ref.Val) {
- for _, observer := range observers {
- observer(id, programStep, val)
- }
- }
- return decObserveEval(observeFn)
-}
-
-// EvalCancelledError represents a cancelled program evaluation operation.
-type EvalCancelledError struct {
- Message string
- // Type identifies the cause of the cancellation.
- Cause CancellationCause
-}
-
-func (e EvalCancelledError) Error() string {
- return e.Message
-}
-
-// CancellationCause enumerates the ways a program evaluation operation can be cancelled.
-type CancellationCause int
-
-const (
- // ContextCancelled indicates that the operation was cancelled in response to a Golang context cancellation.
- ContextCancelled CancellationCause = iota
-
- // CostLimitExceeded indicates that the operation was cancelled in response to the actual cost limit being
- // exceeded.
- CostLimitExceeded
-)
-
-// TODO: Replace all usages of TrackState with EvalStateObserver
-
-// TrackState decorates each expression node with an observer which records the value
-// associated with the given expression id. EvalState must be provided to the decorator.
-// This decorator is not thread-safe, and the EvalState must be reset between Eval()
-// calls.
-// DEPRECATED: Please use EvalStateObserver instead. It composes gracefully with additional observers.
-func TrackState(state EvalState) InterpretableDecorator {
- return Observe(EvalStateObserver(state))
-}
-
-// EvalStateObserver provides an observer which records the value
-// associated with the given expression id. EvalState must be provided to the observer.
-// This decorator is not thread-safe, and the EvalState must be reset between Eval()
-// calls.
-func EvalStateObserver(state EvalState) EvalObserver {
- return func(id int64, programStep interface{}, val ref.Val) {
- state.SetValue(id, val)
- }
-}
-
-// ExhaustiveEval replaces operations that short-circuit with versions that evaluate
-// expressions and couples this behavior with the TrackState() decorator to provide
-// insight into the evaluation state of the entire expression. EvalState must be
-// provided to the decorator. This decorator is not thread-safe, and the EvalState
-// must be reset between Eval() calls.
-func ExhaustiveEval() InterpretableDecorator {
- ex := decDisableShortcircuits()
- return func(i Interpretable) (Interpretable, error) {
- return ex(i)
- }
-}
-
-// InterruptableEval annotates comprehension loops with information that indicates they
-// should check the `#interrupted` state within a custom Activation.
-//
-// The custom activation is currently managed higher up in the stack within the 'cel' package
-// and should not require any custom support on behalf of callers.
-func InterruptableEval() InterpretableDecorator {
- return decInterruptFolds()
-}
-
-// Optimize will pre-compute operations such as list and map construction and optimize
-// call arguments to set membership tests. The set of optimizations will increase over time.
-func Optimize() InterpretableDecorator {
- return decOptimize()
-}
-
-// RegexOptimization provides a way to replace an InterpretableCall for a regex function when the
-// RegexIndex argument is a string constant. Typically, the Factory would compile the regex pattern at
-// RegexIndex and report any errors (at program creation time) and then use the compiled regex for
-// all regex function invocations.
-type RegexOptimization struct {
- // Function is the name of the function to optimize.
- Function string
- // OverloadID is the ID of the overload to optimize.
- OverloadID string
- // RegexIndex is the index position of the regex pattern argument. Only calls to the function where this argument is
- // a string constant will be delegated to this optimizer.
- RegexIndex int
- // Factory constructs a replacement InterpretableCall node that optimizes the regex function call. Factory is
- // provided with the unoptimized regex call and the string constant at the RegexIndex argument.
- // The Factory may compile the regex for use across all invocations of the call, return any errors and
- // return an interpreter.NewCall with the desired regex optimized function impl.
- Factory func(call InterpretableCall, regexPattern string) (InterpretableCall, error)
-}
-
-// CompileRegexConstants compiles regex pattern string constants at program creation time and reports any regex pattern
-// compile errors.
-func CompileRegexConstants(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
- return decRegexOptimizer(regexOptimizations...)
-}
-
-type exprInterpreter struct {
- dispatcher Dispatcher
- container *containers.Container
- provider ref.TypeProvider
- adapter ref.TypeAdapter
- attrFactory AttributeFactory
-}
-
-// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used
-// throughout the Eval of all Interpretable instances generated from it.
-func NewInterpreter(dispatcher Dispatcher,
- container *containers.Container,
- provider ref.TypeProvider,
- adapter ref.TypeAdapter,
- attrFactory AttributeFactory) Interpreter {
- return &exprInterpreter{
- dispatcher: dispatcher,
- container: container,
- provider: provider,
- adapter: adapter,
- attrFactory: attrFactory}
-}
-
-// NewStandardInterpreter builds a Dispatcher and TypeProvider with support for all of the CEL
-// builtins defined in the language definition.
-func NewStandardInterpreter(container *containers.Container,
- provider ref.TypeProvider,
- adapter ref.TypeAdapter,
- resolver AttributeFactory) Interpreter {
- dispatcher := NewDispatcher()
- dispatcher.Add(functions.StandardOverloads()...)
- return NewInterpreter(dispatcher, container, provider, adapter, resolver)
-}
-
-// NewIntepretable implements the Interpreter interface method.
-func (i *exprInterpreter) NewInterpretable(
- checked *exprpb.CheckedExpr,
- decorators ...InterpretableDecorator) (Interpretable, error) {
- p := newPlanner(
- i.dispatcher,
- i.provider,
- i.adapter,
- i.attrFactory,
- i.container,
- checked,
- decorators...)
- return p.Plan(checked.GetExpr())
-}
-
-// NewUncheckedIntepretable implements the Interpreter interface method.
-func (i *exprInterpreter) NewUncheckedInterpretable(
- expr *exprpb.Expr,
- decorators ...InterpretableDecorator) (Interpretable, error) {
- p := newUncheckedPlanner(
- i.dispatcher,
- i.provider,
- i.adapter,
- i.attrFactory,
- i.container,
- decorators...)
- return p.Plan(expr)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/optimizations.go b/etcd/vendor/github.com/google/cel-go/interpreter/optimizations.go
deleted file mode 100644
index 2fc87e693b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/optimizations.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "regexp"
-
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
-)
-
-// MatchesRegexOptimization optimizes the 'matches' standard library function by compiling the regex pattern and
-// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function
-// call invocations.
-var MatchesRegexOptimization = &RegexOptimization{
- Function: "matches",
- RegexIndex: 1,
- Factory: func(call InterpretableCall, regexPattern string) (InterpretableCall, error) {
- compiledRegex, err := regexp.Compile(regexPattern)
- if err != nil {
- return nil, err
- }
- return NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(values ...ref.Val) ref.Val {
- if len(values) != 2 {
- return types.NoSuchOverloadErr()
- }
- in, ok := values[0].Value().(string)
- if !ok {
- return types.NoSuchOverloadErr()
- }
- return types.Bool(compiledRegex.MatchString(in))
- }), nil
- },
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/planner.go b/etcd/vendor/github.com/google/cel-go/interpreter/planner.go
deleted file mode 100644
index 882e0419a5..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/planner.go
+++ /dev/null
@@ -1,794 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "fmt"
- "strings"
-
- "github.com/google/cel-go/common/containers"
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/interpreter/functions"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value.
-type interpretablePlanner interface {
- // Plan generates an Interpretable value (or error) from the input proto Expr.
- Plan(expr *exprpb.Expr) (Interpretable, error)
-}
-
-// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
-// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve
-// functions, types, and namespaced identifiers at plan time rather than at runtime since
-// it only needs to be done once and may be semi-expensive to compute.
-func newPlanner(disp Dispatcher,
- provider ref.TypeProvider,
- adapter ref.TypeAdapter,
- attrFactory AttributeFactory,
- cont *containers.Container,
- checked *exprpb.CheckedExpr,
- decorators ...InterpretableDecorator) interpretablePlanner {
- return &planner{
- disp: disp,
- provider: provider,
- adapter: adapter,
- attrFactory: attrFactory,
- container: cont,
- refMap: checked.GetReferenceMap(),
- typeMap: checked.GetTypeMap(),
- decorators: decorators,
- }
-}
-
-// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
-// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in
-// Select expressions are resolved lazily at evaluation time.
-func newUncheckedPlanner(disp Dispatcher,
- provider ref.TypeProvider,
- adapter ref.TypeAdapter,
- attrFactory AttributeFactory,
- cont *containers.Container,
- decorators ...InterpretableDecorator) interpretablePlanner {
- return &planner{
- disp: disp,
- provider: provider,
- adapter: adapter,
- attrFactory: attrFactory,
- container: cont,
- refMap: make(map[int64]*exprpb.Reference),
- typeMap: make(map[int64]*exprpb.Type),
- decorators: decorators,
- }
-}
-
-// planner is an implementation of the interpretablePlanner interface.
-type planner struct {
- disp Dispatcher
- provider ref.TypeProvider
- adapter ref.TypeAdapter
- attrFactory AttributeFactory
- container *containers.Container
- refMap map[int64]*exprpb.Reference
- typeMap map[int64]*exprpb.Type
- decorators []InterpretableDecorator
-}
-
-// Plan implements the interpretablePlanner interface. This implementation of the Plan method also
-// applies decorators to each Interpretable generated as part of the overall plan. Decorators are
-// useful for layering functionality into the evaluation that is not natively understood by CEL,
-// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of
-// repeated expressions.
-func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) {
- switch expr.GetExprKind().(type) {
- case *exprpb.Expr_CallExpr:
- return p.decorate(p.planCall(expr))
- case *exprpb.Expr_IdentExpr:
- return p.decorate(p.planIdent(expr))
- case *exprpb.Expr_SelectExpr:
- return p.decorate(p.planSelect(expr))
- case *exprpb.Expr_ListExpr:
- return p.decorate(p.planCreateList(expr))
- case *exprpb.Expr_StructExpr:
- return p.decorate(p.planCreateStruct(expr))
- case *exprpb.Expr_ComprehensionExpr:
- return p.decorate(p.planComprehension(expr))
- case *exprpb.Expr_ConstExpr:
- return p.decorate(p.planConst(expr))
- }
- return nil, fmt.Errorf("unsupported expr: %v", expr)
-}
-
-// decorate applies the InterpretableDecorator functions to the given Interpretable.
-// Both the Interpretable and error generated by a Plan step are accepted as arguments
-// for convenience.
-func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) {
- if err != nil {
- return nil, err
- }
- for _, dec := range p.decorators {
- i, err = dec(i)
- if err != nil {
- return nil, err
- }
- }
- return i, nil
-}
-
-// planIdent creates an Interpretable that resolves an identifier from an Activation.
-func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) {
- // Establish whether the identifier is in the reference map.
- if identRef, found := p.refMap[expr.GetId()]; found {
- return p.planCheckedIdent(expr.GetId(), identRef)
- }
- // Create the possible attribute list for the unresolved reference.
- ident := expr.GetIdentExpr()
- return &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name),
- }, nil
-}
-
-func (p *planner) planCheckedIdent(id int64, identRef *exprpb.Reference) (Interpretable, error) {
- // Plan a constant reference if this is the case for this simple identifier.
- if identRef.GetValue() != nil {
- return p.Plan(&exprpb.Expr{Id: id,
- ExprKind: &exprpb.Expr_ConstExpr{
- ConstExpr: identRef.GetValue(),
- }})
- }
-
- // Check to see whether the type map indicates this is a type name. All types should be
- // registered with the provider.
- cType := p.typeMap[id]
- if cType.GetType() != nil {
- cVal, found := p.provider.FindIdent(identRef.GetName())
- if !found {
- return nil, fmt.Errorf("reference to undefined type: %s", identRef.GetName())
- }
- return NewConstValue(id, cVal), nil
- }
-
- // Otherwise, return the attribute for the resolved identifier name.
- return &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.AbsoluteAttribute(id, identRef.GetName()),
- }, nil
-}
-
-// planSelect creates an Interpretable with either:
-//
-// a) selects a field from a map or proto.
-// b) creates a field presence test for a select within a has() macro.
-// c) resolves the select expression to a namespaced identifier.
-func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
- // If the Select id appears in the reference map from the CheckedExpr proto then it is either
- // a namespaced identifier or enum value.
- if identRef, found := p.refMap[expr.GetId()]; found {
- return p.planCheckedIdent(expr.GetId(), identRef)
- }
-
- sel := expr.GetSelectExpr()
- // Plan the operand evaluation.
- op, err := p.Plan(sel.GetOperand())
- if err != nil {
- return nil, err
- }
-
- // Determine the field type if this is a proto message type.
- var fieldType *ref.FieldType
- opType := p.typeMap[sel.GetOperand().GetId()]
- if opType.GetMessageType() != "" {
- ft, found := p.provider.FindFieldType(opType.GetMessageType(), sel.GetField())
- if found && ft.IsSet != nil && ft.GetFrom != nil {
- fieldType = ft
- }
- }
-
- // If the Select was marked TestOnly, this is a presence test.
- //
- // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json)
- // as follows:
- // - True if the object field has a non-default value, e.g. obj.str != ""
- // - True if the dynamic value has the field defined, e.g. key in map
- //
- // However, presence tests are not defined for qualified identifier names with primitive types.
- // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
- // it is not clear whether has should error or follow the convention defined for structured
- // values.
- if sel.TestOnly {
- // Return the test only eval expression.
- return &evalTestOnly{
- id: expr.GetId(),
- field: types.String(sel.GetField()),
- fieldType: fieldType,
- op: op,
- }, nil
- }
- // Build a qualifier.
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), sel.GetField())
- if err != nil {
- return nil, err
- }
- // Lastly, create a field selection Interpretable.
- attr, isAttr := op.(InterpretableAttribute)
- if isAttr {
- _, err = attr.AddQualifier(qual)
- return attr, err
- }
-
- relAttr, err := p.relativeAttr(op.ID(), op)
- if err != nil {
- return nil, err
- }
- _, err = relAttr.AddQualifier(qual)
- if err != nil {
- return nil, err
- }
- return relAttr, nil
-}
-
-// planCall creates a callable Interpretable while specializing for common functions and invocation
-// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in
-// optimized Interpretable values.
-func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
- call := expr.GetCallExpr()
- target, fnName, oName := p.resolveFunction(expr)
- argCount := len(call.GetArgs())
- var offset int
- if target != nil {
- argCount++
- offset++
- }
-
- args := make([]Interpretable, argCount)
- if target != nil {
- arg, err := p.Plan(target)
- if err != nil {
- return nil, err
- }
- args[0] = arg
- }
- for i, argExpr := range call.GetArgs() {
- arg, err := p.Plan(argExpr)
- if err != nil {
- return nil, err
- }
- args[i+offset] = arg
- }
-
- // Generate specialized Interpretable operators by function name if possible.
- switch fnName {
- case operators.LogicalAnd:
- return p.planCallLogicalAnd(expr, args)
- case operators.LogicalOr:
- return p.planCallLogicalOr(expr, args)
- case operators.Conditional:
- return p.planCallConditional(expr, args)
- case operators.Equals:
- return p.planCallEqual(expr, args)
- case operators.NotEquals:
- return p.planCallNotEqual(expr, args)
- case operators.Index:
- return p.planCallIndex(expr, args)
- }
-
- // Otherwise, generate Interpretable calls specialized by argument count.
- // Try to find the specific function by overload id.
- var fnDef *functions.Overload
- if oName != "" {
- fnDef, _ = p.disp.FindOverload(oName)
- }
- // If the overload id couldn't resolve the function, try the simple function name.
- if fnDef == nil {
- fnDef, _ = p.disp.FindOverload(fnName)
- }
- switch argCount {
- case 0:
- return p.planCallZero(expr, fnName, oName, fnDef)
- case 1:
- // If the FunctionOp has been used, then use it as it may exist for the purposes
- // of dynamic dispatch within a singleton function implementation.
- if fnDef != nil && fnDef.Unary == nil && fnDef.Function != nil {
- return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
- }
- return p.planCallUnary(expr, fnName, oName, fnDef, args)
- case 2:
- // If the FunctionOp has been used, then use it as it may exist for the purposes
- // of dynamic dispatch within a singleton function implementation.
- if fnDef != nil && fnDef.Binary == nil && fnDef.Function != nil {
- return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
- }
- return p.planCallBinary(expr, fnName, oName, fnDef, args)
- default:
- return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
- }
-}
-
-// planCallZero generates a zero-arity callable Interpretable.
-func (p *planner) planCallZero(expr *exprpb.Expr,
- function string,
- overload string,
- impl *functions.Overload) (Interpretable, error) {
- if impl == nil || impl.Function == nil {
- return nil, fmt.Errorf("no such overload: %s()", function)
- }
- return &evalZeroArity{
- id: expr.GetId(),
- function: function,
- overload: overload,
- impl: impl.Function,
- }, nil
-}
-
-// planCallUnary generates a unary callable Interpretable.
-func (p *planner) planCallUnary(expr *exprpb.Expr,
- function string,
- overload string,
- impl *functions.Overload,
- args []Interpretable) (Interpretable, error) {
- var fn functions.UnaryOp
- var trait int
- var nonStrict bool
- if impl != nil {
- if impl.Unary == nil {
- return nil, fmt.Errorf("no such overload: %s(arg)", function)
- }
- fn = impl.Unary
- trait = impl.OperandTrait
- nonStrict = impl.NonStrict
- }
- return &evalUnary{
- id: expr.GetId(),
- function: function,
- overload: overload,
- arg: args[0],
- trait: trait,
- impl: fn,
- nonStrict: nonStrict,
- }, nil
-}
-
-// planCallBinary generates a binary callable Interpretable.
-func (p *planner) planCallBinary(expr *exprpb.Expr,
- function string,
- overload string,
- impl *functions.Overload,
- args []Interpretable) (Interpretable, error) {
- var fn functions.BinaryOp
- var trait int
- var nonStrict bool
- if impl != nil {
- if impl.Binary == nil {
- return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function)
- }
- fn = impl.Binary
- trait = impl.OperandTrait
- nonStrict = impl.NonStrict
- }
- return &evalBinary{
- id: expr.GetId(),
- function: function,
- overload: overload,
- lhs: args[0],
- rhs: args[1],
- trait: trait,
- impl: fn,
- nonStrict: nonStrict,
- }, nil
-}
-
-// planCallVarArgs generates a variable argument callable Interpretable.
-func (p *planner) planCallVarArgs(expr *exprpb.Expr,
- function string,
- overload string,
- impl *functions.Overload,
- args []Interpretable) (Interpretable, error) {
- var fn functions.FunctionOp
- var trait int
- var nonStrict bool
- if impl != nil {
- if impl.Function == nil {
- return nil, fmt.Errorf("no such overload: %s(...)", function)
- }
- fn = impl.Function
- trait = impl.OperandTrait
- nonStrict = impl.NonStrict
- }
- return &evalVarArgs{
- id: expr.GetId(),
- function: function,
- overload: overload,
- args: args,
- trait: trait,
- impl: fn,
- nonStrict: nonStrict,
- }, nil
-}
-
-// planCallEqual generates an equals (==) Interpretable.
-func (p *planner) planCallEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- return &evalEq{
- id: expr.GetId(),
- lhs: args[0],
- rhs: args[1],
- }, nil
-}
-
-// planCallNotEqual generates a not equals (!=) Interpretable.
-func (p *planner) planCallNotEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- return &evalNe{
- id: expr.GetId(),
- lhs: args[0],
- rhs: args[1],
- }, nil
-}
-
-// planCallLogicalAnd generates a logical and (&&) Interpretable.
-func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- return &evalAnd{
- id: expr.GetId(),
- lhs: args[0],
- rhs: args[1],
- }, nil
-}
-
-// planCallLogicalOr generates a logical or (||) Interpretable.
-func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- return &evalOr{
- id: expr.GetId(),
- lhs: args[0],
- rhs: args[1],
- }, nil
-}
-
-// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
-func (p *planner) planCallConditional(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- cond := args[0]
-
- t := args[1]
- var tAttr Attribute
- truthyAttr, isTruthyAttr := t.(InterpretableAttribute)
- if isTruthyAttr {
- tAttr = truthyAttr.Attr()
- } else {
- tAttr = p.attrFactory.RelativeAttribute(t.ID(), t)
- }
-
- f := args[2]
- var fAttr Attribute
- falsyAttr, isFalsyAttr := f.(InterpretableAttribute)
- if isFalsyAttr {
- fAttr = falsyAttr.Attr()
- } else {
- fAttr = p.attrFactory.RelativeAttribute(f.ID(), f)
- }
-
- return &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.ConditionalAttribute(expr.GetId(), cond, tAttr, fAttr),
- }, nil
-}
-
-// planCallIndex either extends an attribute with the argument to the index operation, or creates
-// a relative attribute based on the return of a function call or operation.
-func (p *planner) planCallIndex(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
- op := args[0]
- ind := args[1]
- opAttr, err := p.relativeAttr(op.ID(), op)
- if err != nil {
- return nil, err
- }
- opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()]
- indConst, isIndConst := ind.(InterpretableConst)
- if isIndConst {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indConst.Value())
- if err != nil {
- return nil, err
- }
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
- }
- indAttr, isIndAttr := ind.(InterpretableAttribute)
- if isIndAttr {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indAttr)
- if err != nil {
- return nil, err
- }
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
- }
- indQual, err := p.relativeAttr(expr.GetId(), ind)
- if err != nil {
- return nil, err
- }
- _, err = opAttr.AddQualifier(indQual)
- return opAttr, err
-}
-
-// planCreateList generates a list construction Interpretable.
-func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
- list := expr.GetListExpr()
- elems := make([]Interpretable, len(list.GetElements()))
- for i, elem := range list.GetElements() {
- elemVal, err := p.Plan(elem)
- if err != nil {
- return nil, err
- }
- elems[i] = elemVal
- }
- return &evalList{
- id: expr.GetId(),
- elems: elems,
- adapter: p.adapter,
- }, nil
-}
-
-// planCreateStruct generates a map or object construction Interpretable.
-func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
- str := expr.GetStructExpr()
- if len(str.MessageName) != 0 {
- return p.planCreateObj(expr)
- }
- entries := str.GetEntries()
- keys := make([]Interpretable, len(entries))
- vals := make([]Interpretable, len(entries))
- for i, entry := range entries {
- keyVal, err := p.Plan(entry.GetMapKey())
- if err != nil {
- return nil, err
- }
- keys[i] = keyVal
-
- valVal, err := p.Plan(entry.GetValue())
- if err != nil {
- return nil, err
- }
- vals[i] = valVal
- }
- return &evalMap{
- id: expr.GetId(),
- keys: keys,
- vals: vals,
- adapter: p.adapter,
- }, nil
-}
-
-// planCreateObj generates an object construction Interpretable.
-func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
- obj := expr.GetStructExpr()
- typeName, defined := p.resolveTypeName(obj.MessageName)
- if !defined {
- return nil, fmt.Errorf("unknown type: %s", typeName)
- }
- entries := obj.GetEntries()
- fields := make([]string, len(entries))
- vals := make([]Interpretable, len(entries))
- for i, entry := range entries {
- fields[i] = entry.GetFieldKey()
- val, err := p.Plan(entry.GetValue())
- if err != nil {
- return nil, err
- }
- vals[i] = val
- }
- return &evalObj{
- id: expr.GetId(),
- typeName: typeName,
- fields: fields,
- vals: vals,
- provider: p.provider,
- }, nil
-}
-
-// planComprehension generates an Interpretable fold operation.
-func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) {
- fold := expr.GetComprehensionExpr()
- accu, err := p.Plan(fold.GetAccuInit())
- if err != nil {
- return nil, err
- }
- iterRange, err := p.Plan(fold.GetIterRange())
- if err != nil {
- return nil, err
- }
- cond, err := p.Plan(fold.GetLoopCondition())
- if err != nil {
- return nil, err
- }
- step, err := p.Plan(fold.GetLoopStep())
- if err != nil {
- return nil, err
- }
- result, err := p.Plan(fold.GetResult())
- if err != nil {
- return nil, err
- }
- return &evalFold{
- id: expr.GetId(),
- accuVar: fold.AccuVar,
- accu: accu,
- iterVar: fold.IterVar,
- iterRange: iterRange,
- cond: cond,
- step: step,
- result: result,
- adapter: p.adapter,
- }, nil
-}
-
-// planConst generates a constant valued Interpretable.
-func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) {
- val, err := p.constValue(expr.GetConstExpr())
- if err != nil {
- return nil, err
- }
- return NewConstValue(expr.GetId(), val), nil
-}
-
-// constValue converts a proto Constant value to a ref.Val.
-func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) {
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- return p.adapter.NativeToValue(c.GetBoolValue()), nil
- case *exprpb.Constant_BytesValue:
- return p.adapter.NativeToValue(c.GetBytesValue()), nil
- case *exprpb.Constant_DoubleValue:
- return p.adapter.NativeToValue(c.GetDoubleValue()), nil
- case *exprpb.Constant_DurationValue:
- return p.adapter.NativeToValue(c.GetDurationValue().AsDuration()), nil
- case *exprpb.Constant_Int64Value:
- return p.adapter.NativeToValue(c.GetInt64Value()), nil
- case *exprpb.Constant_NullValue:
- return p.adapter.NativeToValue(c.GetNullValue()), nil
- case *exprpb.Constant_StringValue:
- return p.adapter.NativeToValue(c.GetStringValue()), nil
- case *exprpb.Constant_TimestampValue:
- return p.adapter.NativeToValue(c.GetTimestampValue().AsTime()), nil
- case *exprpb.Constant_Uint64Value:
- return p.adapter.NativeToValue(c.GetUint64Value()), nil
- }
- return nil, fmt.Errorf("unknown constant type: %v", c)
-}
-
-// resolveTypeName takes a qualified string constructed at parse time, applies the proto
-// namespace resolution rules to it in a scan over possible matching types in the TypeProvider.
-func (p *planner) resolveTypeName(typeName string) (string, bool) {
- for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) {
- if _, found := p.provider.FindType(qualifiedTypeName); found {
- return qualifiedTypeName, true
- }
- }
- return "", false
-}
-
-// resolveFunction determines the call target, function name, and overload name from a given Expr
-// value.
-//
-// The resolveFunction resolves ambiguities where a function may either be a receiver-style
-// invocation or a qualified global function name.
-// - The target expression may only consist of ident and select expressions.
-// - The function is declared in the environment using its fully-qualified name.
-// - The fully-qualified function name matches the string serialized target value.
-func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) {
- // Note: similar logic exists within the `checker/checker.go`. If making changes here
- // please consider the impact on checker.go and consolidate implementations or mirror code
- // as appropriate.
- call := expr.GetCallExpr()
- target := call.GetTarget()
- fnName := call.GetFunction()
-
- // Checked expressions always have a reference map entry, and _should_ have the fully qualified
- // function name as the fnName value.
- oRef, hasOverload := p.refMap[expr.GetId()]
- if hasOverload {
- if len(oRef.GetOverloadId()) == 1 {
- return target, fnName, oRef.GetOverloadId()[0]
- }
- // Note, this namespaced function name will not appear as a fully qualified name in ASTs
- // built and stored before cel-go v0.5.0; however, this functionality did not work at all
- // before the v0.5.0 release.
- return target, fnName, ""
- }
-
- // Parse-only expressions need to handle the same logic as is normally performed at check time,
- // but with potentially much less information. The only reliable source of information about
- // which functions are configured is the dispatcher.
- if target == nil {
- // If the user has a parse-only expression, then it should have been configured as such in
- // the interpreter dispatcher as it may have been omitted from the checker environment.
- for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) {
- _, found := p.disp.FindOverload(qualifiedName)
- if found {
- return nil, qualifiedName, ""
- }
- }
- // It's possible that the overload was not found, but this situation is accounted for in
- // the planCall phase; however, the leading dot used for denoting fully-qualified
- // namespaced identifiers must be stripped, as all declarations already use fully-qualified
- // names. This stripping behavior is handled automatically by the ResolveCandidateNames
- // call.
- return target, stripLeadingDot(fnName), ""
- }
-
- // Handle the situation where the function target actually indicates a qualified function name.
- qualifiedPrefix, maybeQualified := p.toQualifiedName(target)
- if maybeQualified {
- maybeQualifiedName := qualifiedPrefix + "." + fnName
- for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) {
- _, found := p.disp.FindOverload(qualifiedName)
- if found {
- // Clear the target to ensure the proper arity is used for finding the
- // implementation.
- return nil, qualifiedName, ""
- }
- }
- }
- // In the default case, the function is exactly as it was advertised: a receiver call on with
- // an expression-based target with the given simple function name.
- return target, fnName, ""
-}
-
-func (p *planner) relativeAttr(id int64, eval Interpretable) (InterpretableAttribute, error) {
- eAttr, ok := eval.(InterpretableAttribute)
- if !ok {
- eAttr = &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.RelativeAttribute(id, eval),
- }
- }
- decAttr, err := p.decorate(eAttr, nil)
- if err != nil {
- return nil, err
- }
- eAttr, ok = decAttr.(InterpretableAttribute)
- if !ok {
- return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr)
- }
- return eAttr, nil
-}
-
-// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean
-// 'found' value that indicates if the conversion is successful.
-func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) {
- // If the checker identified the expression as an attribute by the type-checker, then it can't
- // possibly be part of qualified name in a namespace.
- _, isAttr := p.refMap[operand.GetId()]
- if isAttr {
- return "", false
- }
- // Since functions cannot be both namespaced and receiver functions, if the operand is not an
- // qualified variable name, return the (possibly) qualified name given the expressions.
- return containers.ToQualifiedName(operand)
-}
-
-func stripLeadingDot(name string) string {
- if strings.HasPrefix(name, ".") {
- return name[1:]
- }
- return name
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/prune.go b/etcd/vendor/github.com/google/cel-go/interpreter/prune.go
deleted file mode 100644
index eab46e0c06..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/prune.go
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-type astPruner struct {
- expr *exprpb.Expr
- state EvalState
- nextExprID int64
-}
-
-// TODO Consider having a separate walk of the AST that finds common
-// subexpressions. This can be called before or after constant folding to find
-// common subexpressions.
-
-// PruneAst prunes the given AST based on the given EvalState and generates a new AST.
-// Given AST is copied on write and a new AST is returned.
-// Couple of typical use cases this interface would be:
-//
-// A)
-// 1) Evaluate expr with some unknowns,
-// 2) If result is unknown:
-//
-// a) PruneAst
-// b) Goto 1
-//
-// Functional call results which are known would be effectively cached across
-// iterations.
-//
-// B)
-// 1) Compile the expression (maybe via a service and maybe after checking a
-//
-// compiled expression does not exists in local cache)
-//
-// 2) Prepare the environment and the interpreter. Activation might be empty.
-// 3) Eval the expression. This might return unknown or error or a concrete
-//
-// value.
-//
-// 4) PruneAst
-// 4) Maybe cache the expression
-// This is effectively constant folding the expression. How the environment is
-// prepared in step 2 is flexible. For example, If the caller caches the
-// compiled and constant folded expressions, but is not willing to constant
-// fold(and thus cache results of) some external calls, then they can prepare
-// the overloads accordingly.
-func PruneAst(expr *exprpb.Expr, state EvalState) *exprpb.Expr {
- pruner := &astPruner{
- expr: expr,
- state: state,
- nextExprID: 1}
- newExpr, _ := pruner.prune(expr)
- return newExpr
-}
-
-func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_ConstExpr{
- ConstExpr: val,
- },
- }
-}
-
-func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) {
- switch val.Type() {
- case types.BoolType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: val.Value().(bool)}}), true
- case types.IntType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: val.Value().(int64)}}), true
- case types.UintType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: val.Value().(uint64)}}), true
- case types.StringType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: val.Value().(string)}}), true
- case types.DoubleType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: val.Value().(float64)}}), true
- case types.BytesType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: val.Value().([]byte)}}), true
- case types.NullType:
- return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: val.Value().(structpb.NullValue)}}), true
- }
-
- // Attempt to build a list literal.
- if list, isList := val.(traits.Lister); isList {
- sz := list.Size().(types.Int)
- elemExprs := make([]*exprpb.Expr, sz)
- for i := types.Int(0); i < sz; i++ {
- elem := list.Get(i)
- if types.IsUnknownOrError(elem) {
- return nil, false
- }
- elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem)
- if !ok {
- return nil, false
- }
- elemExprs[i] = elemExpr
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_ListExpr{
- ListExpr: &exprpb.Expr_CreateList{
- Elements: elemExprs,
- },
- },
- }, true
- }
-
- // Create a map literal if possible.
- if mp, isMap := val.(traits.Mapper); isMap {
- it := mp.Iterator()
- entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int))
- i := 0
- for it.HasNext() != types.False {
- key := it.Next()
- val := mp.Get(key)
- if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) {
- return nil, false
- }
- keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key)
- if !ok {
- return nil, false
- }
- valExpr, ok := p.maybeCreateLiteral(p.nextID(), val)
- if !ok {
- return nil, false
- }
- entry := &exprpb.Expr_CreateStruct_Entry{
- Id: p.nextID(),
- KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
- MapKey: keyExpr,
- },
- Value: valExpr,
- }
- entries[i] = entry
- i++
- }
- return &exprpb.Expr{
- Id: id,
- ExprKind: &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{
- Entries: entries,
- },
- },
- }, true
- }
-
- // TODO(issues/377) To construct message literals, the type provider will need to support
- // the enumeration the fields for a given message.
- return nil, false
-}
-
-func (p *astPruner) maybePruneAndOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
- if !p.existsWithUnknownValue(node.GetId()) {
- return nil, false
- }
-
- call := node.GetCallExpr()
- // We know result is unknown, so we have at least one unknown arg
- // and if one side is a known value, we know we can ignore it.
- if p.existsWithKnownValue(call.Args[0].GetId()) {
- return call.Args[1], true
- }
- if p.existsWithKnownValue(call.Args[1].GetId()) {
- return call.Args[0], true
- }
- return nil, false
-}
-
-func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
- if !p.existsWithUnknownValue(node.GetId()) {
- return nil, false
- }
-
- call := node.GetCallExpr()
- condVal, condValueExists := p.value(call.Args[0].GetId())
- if !condValueExists || types.IsUnknownOrError(condVal) {
- return nil, false
- }
-
- if condVal.Value().(bool) {
- return call.Args[1], true
- }
- return call.Args[2], true
-}
-
-func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
- call := node.GetCallExpr()
- if call.Function == operators.LogicalOr || call.Function == operators.LogicalAnd {
- return p.maybePruneAndOr(node)
- }
- if call.Function == operators.Conditional {
- return p.maybePruneConditional(node)
- }
-
- return nil, false
-}
-
-func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
- if node == nil {
- return node, false
- }
- val, valueExists := p.value(node.GetId())
- if valueExists && !types.IsUnknownOrError(val) {
- if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok {
- return newNode, true
- }
- }
-
- // We have either an unknown/error value, or something we don't want to
- // transform, or expression was not evaluated. If possible, drill down
- // more.
-
- switch node.GetExprKind().(type) {
- case *exprpb.Expr_SelectExpr:
- if operand, pruned := p.prune(node.GetSelectExpr().GetOperand()); pruned {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_SelectExpr{
- SelectExpr: &exprpb.Expr_Select{
- Operand: operand,
- Field: node.GetSelectExpr().GetField(),
- TestOnly: node.GetSelectExpr().GetTestOnly(),
- },
- },
- }, true
- }
- case *exprpb.Expr_CallExpr:
- if newExpr, pruned := p.maybePruneFunction(node); pruned {
- newExpr, _ = p.prune(newExpr)
- return newExpr, true
- }
- var prunedCall bool
- call := node.GetCallExpr()
- args := call.GetArgs()
- newArgs := make([]*exprpb.Expr, len(args))
- newCall := &exprpb.Expr_Call{
- Function: call.GetFunction(),
- Target: call.GetTarget(),
- Args: newArgs,
- }
- for i, arg := range args {
- newArgs[i] = arg
- if newArg, prunedArg := p.prune(arg); prunedArg {
- prunedCall = true
- newArgs[i] = newArg
- }
- }
- if newTarget, prunedTarget := p.prune(call.GetTarget()); prunedTarget {
- prunedCall = true
- newCall.Target = newTarget
- }
- if prunedCall {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_CallExpr{
- CallExpr: newCall,
- },
- }, true
- }
- case *exprpb.Expr_ListExpr:
- elems := node.GetListExpr().GetElements()
- newElems := make([]*exprpb.Expr, len(elems))
- var prunedList bool
- for i, elem := range elems {
- newElems[i] = elem
- if newElem, prunedElem := p.prune(elem); prunedElem {
- newElems[i] = newElem
- prunedList = true
- }
- }
- if prunedList {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_ListExpr{
- ListExpr: &exprpb.Expr_CreateList{
- Elements: newElems,
- },
- },
- }, true
- }
- case *exprpb.Expr_StructExpr:
- var prunedStruct bool
- entries := node.GetStructExpr().GetEntries()
- messageType := node.GetStructExpr().GetMessageName()
- newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
- for i, entry := range entries {
- newEntries[i] = entry
- newKey, prunedKey := p.prune(entry.GetMapKey())
- newValue, prunedValue := p.prune(entry.GetValue())
- if !prunedKey && !prunedValue {
- continue
- }
- prunedStruct = true
- newEntry := &exprpb.Expr_CreateStruct_Entry{
- Value: newValue,
- }
- if messageType != "" {
- newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{
- FieldKey: entry.GetFieldKey(),
- }
- } else {
- newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{
- MapKey: newKey,
- }
- }
- newEntries[i] = newEntry
- }
- if prunedStruct {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{
- MessageName: messageType,
- Entries: newEntries,
- },
- },
- }, true
- }
- case *exprpb.Expr_ComprehensionExpr:
- compre := node.GetComprehensionExpr()
- // Only the range of the comprehension is pruned since the state tracking only records
- // the last iteration of the comprehension and not each step in the evaluation which
- // means that the any residuals computed in between might be inaccurate.
- if newRange, pruned := p.prune(compre.GetIterRange()); pruned {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_ComprehensionExpr{
- ComprehensionExpr: &exprpb.Expr_Comprehension{
- IterVar: compre.GetIterVar(),
- IterRange: newRange,
- AccuVar: compre.GetAccuVar(),
- AccuInit: compre.GetAccuInit(),
- LoopCondition: compre.GetLoopCondition(),
- LoopStep: compre.GetLoopStep(),
- Result: compre.GetResult(),
- },
- },
- }, true
- }
- }
- return node, false
-}
-
-func (p *astPruner) value(id int64) (ref.Val, bool) {
- val, found := p.state.Value(id)
- return val, (found && val != nil)
-}
-
-func (p *astPruner) existsWithUnknownValue(id int64) bool {
- val, valueExists := p.value(id)
- return valueExists && types.IsUnknown(val)
-}
-
-func (p *astPruner) existsWithKnownValue(id int64) bool {
- val, valueExists := p.value(id)
- return valueExists && !types.IsUnknown(val)
-}
-
-func (p *astPruner) nextID() int64 {
- for {
- _, found := p.state.Value(p.nextExprID)
- if !found {
- next := p.nextExprID
- p.nextExprID++
- return next
- }
- p.nextExprID++
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/etcd/vendor/github.com/google/cel-go/interpreter/runtimecost.go
deleted file mode 100644
index 06b6b27ef1..0000000000
--- a/etcd/vendor/github.com/google/cel-go/interpreter/runtimecost.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package interpreter
-
-import (
- "math"
-
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/overloads"
- "github.com/google/cel-go/common/types"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
-)
-
-// WARNING: Any changes to cost calculations in this file require a corresponding change in checker/cost.go
-
-// ActualCostEstimator provides function call cost estimations at runtime
-// CallCost returns an estimated cost for the function overload invocation with the given args, or nil if it has no
-// estimate to provide. CEL attempts to provide reasonable estimates for its standard function library, so CallCost
-// should typically not need to provide an estimate for CELs standard function.
-type ActualCostEstimator interface {
- CallCost(function, overloadID string, args []ref.Val, result ref.Val) *uint64
-}
-
-// CostObserver provides an observer that tracks runtime cost.
-func CostObserver(tracker *CostTracker) EvalObserver {
- observer := func(id int64, programStep interface{}, val ref.Val) {
- switch t := programStep.(type) {
- case ConstantQualifier:
- // TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them
- // and enable the below pop. Once enabled this can case can be collapsed into the Qualifier case.
- tracker.cost++
- case InterpretableConst:
- // zero cost
- case InterpretableAttribute:
- switch a := t.Attr().(type) {
- case *conditionalAttribute:
- // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
- tracker.stack.drop(a.falsy.ID(), a.truthy.ID(), a.expr.ID())
- default:
- tracker.stack.drop(t.Attr().ID())
- tracker.cost += common.SelectAndIdentCost
- }
- case *evalExhaustiveConditional:
- // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
- tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID())
-
- // While the field names are identical, the boolean operation eval structs do not share an interface and so
- // must be handled individually.
- case *evalOr:
- tracker.stack.drop(t.rhs.ID(), t.lhs.ID())
- case *evalAnd:
- tracker.stack.drop(t.rhs.ID(), t.lhs.ID())
- case *evalExhaustiveOr:
- tracker.stack.drop(t.rhs.ID(), t.lhs.ID())
- case *evalExhaustiveAnd:
- tracker.stack.drop(t.rhs.ID(), t.lhs.ID())
- case *evalFold:
- tracker.stack.drop(t.iterRange.ID())
- case Qualifier:
- tracker.cost++
- case InterpretableCall:
- if argVals, ok := tracker.stack.dropArgs(t.Args()); ok {
- tracker.cost += tracker.costCall(t, argVals, val)
- }
- case InterpretableConstructor:
- tracker.stack.dropArgs(t.InitVals())
- switch t.Type() {
- case types.ListType:
- tracker.cost += common.ListCreateBaseCost
- case types.MapType:
- tracker.cost += common.MapCreateBaseCost
- default:
- tracker.cost += common.StructCreateBaseCost
- }
- }
- tracker.stack.push(val, id)
-
- if tracker.Limit != nil && tracker.cost > *tracker.Limit {
- panic(EvalCancelledError{Cause: CostLimitExceeded, Message: "operation cancelled: actual cost limit exceeded"})
- }
- }
- return observer
-}
-
-// CostTracker represents the information needed for tacking runtime cost
-type CostTracker struct {
- Estimator ActualCostEstimator
- Limit *uint64
-
- cost uint64
- stack refValStack
-}
-
-// ActualCost returns the runtime cost
-func (c CostTracker) ActualCost() uint64 {
- return c.cost
-}
-
-func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 {
- var cost uint64
- if c.Estimator != nil {
- callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), argValues, result)
- if callCost != nil {
- cost += *callCost
- return cost
- }
- }
- // if user didn't specify, the default way of calculating runtime cost would be used.
- // if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation
- switch call.OverloadID() {
- // O(n) functions
- case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
- cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor))
- case overloads.InList:
- // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
- // We just assume all list containment checks are O(n).
- cost += c.actualSize(argValues[1])
- // O(min(m, n)) functions
- case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
- overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
- overloads.Equals, overloads.NotEquals:
- // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.),
- // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost
- // of 1.
- lhsSize := c.actualSize(argValues[0])
- rhsSize := c.actualSize(argValues[1])
- minSize := lhsSize
- if rhsSize < minSize {
- minSize = rhsSize
- }
- cost += uint64(math.Ceil(float64(minSize) * common.StringTraversalCostFactor))
- // O(m+n) functions
- case overloads.AddString, overloads.AddBytes:
- // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over.
- cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])+c.actualSize(argValues[1])) * common.StringTraversalCostFactor))
- // O(nm) functions
- case overloads.MatchesString:
- // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
- // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
- // in case where string is empty but regex is still expensive.
- strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(argValues[0]))) * common.StringTraversalCostFactor))
- // We don't know how many expressions are in the regex, just the string length (a huge
- // improvement here would be to somehow get a count the number of expressions in the regex or
- // how many states are in the regex state machine and use that to measure regex cost).
- // For now, we're making a guess that each expression in a regex is typically at least 4 chars
- // in length.
- regexCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.RegexStringLengthCostFactor))
- cost += strCost * regexCost
- case overloads.ContainsString:
- strCost := uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor))
- substrCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.StringTraversalCostFactor))
- cost += strCost * substrCost
-
- default:
- // The following operations are assumed to have O(1) complexity.
- // - AddList due to the implementation. Index lookup can be O(c) the
- // number of concatenated lists, but we don't track that is cost calculations.
- // - Conversions, since none perform a traversal of a type of unbound length.
- // - Computing the size of strings, byte sequences, lists and maps.
- // - Logical operations and all operators on fixed width scalars (comparisons, equality)
- // - Any functions that don't have a declared cost either here or in provided ActualCostEstimator.
- cost++
-
- }
- return cost
-}
-
-// actualSize returns the size of value
-func (c CostTracker) actualSize(value ref.Val) uint64 {
- if sz, ok := value.(traits.Sizer); ok {
- return uint64(sz.Size().(types.Int))
- }
- return 1
-}
-
-type stackVal struct {
- Val ref.Val
- ID int64
-}
-
-// refValStack keeps track of values of the stack for cost calculation purposes
-type refValStack []stackVal
-
-func (s *refValStack) push(val ref.Val, id int64) {
- value := stackVal{Val: val, ID: id}
- *s = append(*s, value)
-}
-
-// TODO: Allowing drop and dropArgs to remove stack items above the IDs they are provided is a workaround. drop and dropArgs
-// should find and remove only the stack items matching the provided IDs once all attributes are properly pushed and popped from stack.
-
-// drop searches the stack for each ID and removes the ID and all stack items above it.
-// If none of the IDs are found, the stack is not modified.
-// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
-// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
-func (s *refValStack) drop(ids ...int64) {
- for _, id := range ids {
- for idx := len(*s) - 1; idx >= 0; idx-- {
- if (*s)[idx].ID == id {
- *s = (*s)[:idx]
- break
- }
- }
- }
-}
-
-// dropArgs searches the stack for all the args by their IDs, accumulates their associated ref.Vals and drops any
-// stack items above any of the arg IDs. If any of the IDs are not found the stack, false is returned.
-// Args are assumed to be found in the stack in reverse order, i.e. the last arg is expected to be found highest in
-// the stack.
-// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
-// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
-func (s *refValStack) dropArgs(args []Interpretable) ([]ref.Val, bool) {
- result := make([]ref.Val, len(args))
-argloop:
- for nIdx := len(args) - 1; nIdx >= 0; nIdx-- {
- for idx := len(*s) - 1; idx >= 0; idx-- {
- if (*s)[idx].ID == args[nIdx].ID() {
- el := (*s)[idx]
- *s = (*s)[:idx]
- result[nIdx] = el.Val
- continue argloop
- }
- }
- return nil, false
- }
- return result, true
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/parser/BUILD.bazel
deleted file mode 100644
index b76e6e4844..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/BUILD.bazel
+++ /dev/null
@@ -1,51 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "errors.go",
- "helper.go",
- "input.go",
- "macro.go",
- "options.go",
- "parser.go",
- "unescape.go",
- "unparser.go",
- ],
- importpath = "github.com/google/cel-go/parser",
- visibility = ["//visibility:public"],
- deps = [
- "//common:go_default_library",
- "//common/operators:go_default_library",
- "//common/runes:go_default_library",
- "//parser/gen:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_google_protobuf//types/known/structpb:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "parser_test.go",
- "unescape_test.go",
- "unparser_test.go",
- ],
- embed = [
- ":go_default_library",
- ],
- deps = [
- "//common/debug:go_default_library",
- "//parser/gen:go_default_library",
- "//test:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/parser/errors.go b/etcd/vendor/github.com/google/cel-go/parser/errors.go
deleted file mode 100644
index ce49bb87f8..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/errors.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "fmt"
-
- "github.com/google/cel-go/common"
-)
-
-// parseErrors is a specialization of Errors.
-type parseErrors struct {
- *common.Errors
-}
-
-func (e *parseErrors) syntaxError(l common.Location, message string) {
- e.ReportError(l, fmt.Sprintf("Syntax error: %s", message))
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/etcd/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
deleted file mode 100644
index 22711310ce..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
+++ /dev/null
@@ -1,26 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-package(
- default_visibility = ["//parser:__subpackages__"],
- licenses = ["notice"], # Apache 2.0
-)
-
-go_library(
- name = "go_default_library",
- srcs = [
- "cel_base_listener.go",
- "cel_base_visitor.go",
- "cel_lexer.go",
- "cel_listener.go",
- "cel_parser.go",
- "cel_visitor.go",
- ],
- data = [
- "CEL.tokens",
- "CELLexer.tokens",
- ],
- importpath = "github.com/google/cel-go/parser/gen",
- deps = [
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
- ],
-)
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.g4
deleted file mode 100644
index 11145ec374..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.g4
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-grammar CEL;
-
-// Grammar Rules
-// =============
-
-start
- : e=expr EOF
- ;
-
-expr
- : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)?
- ;
-
-conditionalOr
- : e=conditionalAnd (ops+='||' e1+=conditionalAnd)*
- ;
-
-conditionalAnd
- : e=relation (ops+='&&' e1+=relation)*
- ;
-
-relation
- : calc
- | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation
- ;
-
-calc
- : unary
- | calc op=('*'|'/'|'%') calc
- | calc op=('+'|'-') calc
- ;
-
-unary
- : member # MemberExpr
- | (ops+='!')+ member # LogicalNot
- | (ops+='-')+ member # Negate
- ;
-
-member
- : primary # PrimaryExpr
- | member op='.' id=IDENTIFIER (open='(' args=exprList? ')')? # SelectOrCall
- | member op='[' index=expr ']' # Index
- | member op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
- ;
-
-primary
- : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall
- | '(' e=expr ')' # Nested
- | op='[' elems=exprList? ','? ']' # CreateList
- | op='{' entries=mapInitializerList? ','? '}' # CreateStruct
- | literal # ConstantLiteral
- ;
-
-exprList
- : e+=expr (',' e+=expr)*
- ;
-
-fieldInitializerList
- : fields+=IDENTIFIER cols+=':' values+=expr (',' fields+=IDENTIFIER cols+=':' values+=expr)*
- ;
-
-mapInitializerList
- : keys+=expr cols+=':' values+=expr (',' keys+=expr cols+=':' values+=expr)*
- ;
-
-literal
- : sign=MINUS? tok=NUM_INT # Int
- | tok=NUM_UINT # Uint
- | sign=MINUS? tok=NUM_FLOAT # Double
- | tok=STRING # String
- | tok=BYTES # Bytes
- | tok=CEL_TRUE # BoolTrue
- | tok=CEL_FALSE # BoolFalse
- | tok=NUL # Null
- ;
-
-// Lexer Rules
-// ===========
-
-EQUALS : '==';
-NOT_EQUALS : '!=';
-IN: 'in';
-LESS : '<';
-LESS_EQUALS : '<=';
-GREATER_EQUALS : '>=';
-GREATER : '>';
-LOGICAL_AND : '&&';
-LOGICAL_OR : '||';
-
-LBRACKET : '[';
-RPRACKET : ']';
-LBRACE : '{';
-RBRACE : '}';
-LPAREN : '(';
-RPAREN : ')';
-DOT : '.';
-COMMA : ',';
-MINUS : '-';
-EXCLAM : '!';
-QUESTIONMARK : '?';
-COLON : ':';
-PLUS : '+';
-STAR : '*';
-SLASH : '/';
-PERCENT : '%';
-CEL_TRUE : 'true';
-CEL_FALSE : 'false';
-NUL : 'null';
-
-fragment BACKSLASH : '\\';
-fragment LETTER : 'A'..'Z' | 'a'..'z' ;
-fragment DIGIT : '0'..'9' ;
-fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ;
-fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
-fragment RAW : 'r' | 'R';
-
-fragment ESC_SEQ
- : ESC_CHAR_SEQ
- | ESC_BYTE_SEQ
- | ESC_UNI_SEQ
- | ESC_OCT_SEQ
- ;
-
-fragment ESC_CHAR_SEQ
- : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`')
- ;
-
-fragment ESC_OCT_SEQ
- : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7')
- ;
-
-fragment ESC_BYTE_SEQ
- : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT
- ;
-
-fragment ESC_UNI_SEQ
- : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
- | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
- ;
-
-WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ;
-COMMENT : '//' (~'\n')* -> channel(HIDDEN) ;
-
-NUM_FLOAT
- : ( DIGIT+ ('.' DIGIT+) EXPONENT?
- | DIGIT+ EXPONENT
- | '.' DIGIT+ EXPONENT?
- )
- ;
-
-NUM_INT
- : ( DIGIT+ | '0x' HEXDIGIT+ );
-
-NUM_UINT
- : DIGIT+ ( 'u' | 'U' )
- | '0x' HEXDIGIT+ ( 'u' | 'U' )
- ;
-
-STRING
- : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"'
- | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\''
- | '"""' (ESC_SEQ | ~('\\'))*? '"""'
- | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\''
- | RAW '"' ~('"'|'\n'|'\r')* '"'
- | RAW '\'' ~('\''|'\n'|'\r')* '\''
- | RAW '"""' .*? '"""'
- | RAW '\'\'\'' .*? '\'\'\''
- ;
-
-BYTES : ('b' | 'B') STRING;
-
-IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*;
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.interp
deleted file mode 100644
index 13e3a10d17..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.interp
+++ /dev/null
@@ -1,96 +0,0 @@
-token literal names:
-null
-'=='
-'!='
-'in'
-'<'
-'<='
-'>='
-'>'
-'&&'
-'||'
-'['
-']'
-'{'
-'}'
-'('
-')'
-'.'
-','
-'-'
-'!'
-'?'
-':'
-'+'
-'*'
-'/'
-'%'
-'true'
-'false'
-'null'
-null
-null
-null
-null
-null
-null
-null
-null
-
-token symbolic names:
-null
-EQUALS
-NOT_EQUALS
-IN
-LESS
-LESS_EQUALS
-GREATER_EQUALS
-GREATER
-LOGICAL_AND
-LOGICAL_OR
-LBRACKET
-RPRACKET
-LBRACE
-RBRACE
-LPAREN
-RPAREN
-DOT
-COMMA
-MINUS
-EXCLAM
-QUESTIONMARK
-COLON
-PLUS
-STAR
-SLASH
-PERCENT
-CEL_TRUE
-CEL_FALSE
-NUL
-WHITESPACE
-COMMENT
-NUM_FLOAT
-NUM_INT
-NUM_UINT
-STRING
-BYTES
-IDENTIFIER
-
-rule names:
-start
-expr
-conditionalOr
-conditionalAnd
-relation
-calc
-unary
-member
-primary
-exprList
-fieldInitializerList
-mapInitializerList
-literal
-
-
-atn:
-[4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2, 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8, 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3, 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115, 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124, 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3, 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8, 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10, 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9, 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3, 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2, 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1, 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0, 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1, 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32, 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39, 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3, 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0, 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1, 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64, 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2, 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71, 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0, 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0, 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0, 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6, 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0, 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102, 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0, 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108, 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112, 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111, 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0, 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15, 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0, 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139, 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17, 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0, 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157, 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5, 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0, 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166, 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176, 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5, 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0, 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178, 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189, 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21, 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0, 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190, 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192, 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32, 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0, 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201, 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207, 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1, 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0, 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207, 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114, 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193, 198, 206]
\ No newline at end of file
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
deleted file mode 100644
index b305bdad32..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
+++ /dev/null
@@ -1,64 +0,0 @@
-EQUALS=1
-NOT_EQUALS=2
-IN=3
-LESS=4
-LESS_EQUALS=5
-GREATER_EQUALS=6
-GREATER=7
-LOGICAL_AND=8
-LOGICAL_OR=9
-LBRACKET=10
-RPRACKET=11
-LBRACE=12
-RBRACE=13
-LPAREN=14
-RPAREN=15
-DOT=16
-COMMA=17
-MINUS=18
-EXCLAM=19
-QUESTIONMARK=20
-COLON=21
-PLUS=22
-STAR=23
-SLASH=24
-PERCENT=25
-CEL_TRUE=26
-CEL_FALSE=27
-NUL=28
-WHITESPACE=29
-COMMENT=30
-NUM_FLOAT=31
-NUM_INT=32
-NUM_UINT=33
-STRING=34
-BYTES=35
-IDENTIFIER=36
-'=='=1
-'!='=2
-'in'=3
-'<'=4
-'<='=5
-'>='=6
-'>'=7
-'&&'=8
-'||'=9
-'['=10
-']'=11
-'{'=12
-'}'=13
-'('=14
-')'=15
-'.'=16
-','=17
-'-'=18
-'!'=19
-'?'=20
-':'=21
-'+'=22
-'*'=23
-'/'=24
-'%'=25
-'true'=26
-'false'=27
-'null'=28
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
deleted file mode 100644
index 26e7f471e8..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
+++ /dev/null
@@ -1,136 +0,0 @@
-token literal names:
-null
-'=='
-'!='
-'in'
-'<'
-'<='
-'>='
-'>'
-'&&'
-'||'
-'['
-']'
-'{'
-'}'
-'('
-')'
-'.'
-','
-'-'
-'!'
-'?'
-':'
-'+'
-'*'
-'/'
-'%'
-'true'
-'false'
-'null'
-null
-null
-null
-null
-null
-null
-null
-null
-
-token symbolic names:
-null
-EQUALS
-NOT_EQUALS
-IN
-LESS
-LESS_EQUALS
-GREATER_EQUALS
-GREATER
-LOGICAL_AND
-LOGICAL_OR
-LBRACKET
-RPRACKET
-LBRACE
-RBRACE
-LPAREN
-RPAREN
-DOT
-COMMA
-MINUS
-EXCLAM
-QUESTIONMARK
-COLON
-PLUS
-STAR
-SLASH
-PERCENT
-CEL_TRUE
-CEL_FALSE
-NUL
-WHITESPACE
-COMMENT
-NUM_FLOAT
-NUM_INT
-NUM_UINT
-STRING
-BYTES
-IDENTIFIER
-
-rule names:
-EQUALS
-NOT_EQUALS
-IN
-LESS
-LESS_EQUALS
-GREATER_EQUALS
-GREATER
-LOGICAL_AND
-LOGICAL_OR
-LBRACKET
-RPRACKET
-LBRACE
-RBRACE
-LPAREN
-RPAREN
-DOT
-COMMA
-MINUS
-EXCLAM
-QUESTIONMARK
-COLON
-PLUS
-STAR
-SLASH
-PERCENT
-CEL_TRUE
-CEL_FALSE
-NUL
-BACKSLASH
-LETTER
-DIGIT
-EXPONENT
-HEXDIGIT
-RAW
-ESC_SEQ
-ESC_CHAR_SEQ
-ESC_OCT_SEQ
-ESC_BYTE_SEQ
-ESC_UNI_SEQ
-WHITESPACE
-COMMENT
-NUM_FLOAT
-NUM_INT
-NUM_UINT
-STRING
-BYTES
-IDENTIFIER
-
-channel names:
-DEFAULT_TOKEN_CHANNEL
-HIDDEN
-
-mode names:
-DEFAULT_MODE
-
-atn:
-[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0]
\ No newline at end of file
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
deleted file mode 100644
index b305bdad32..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
+++ /dev/null
@@ -1,64 +0,0 @@
-EQUALS=1
-NOT_EQUALS=2
-IN=3
-LESS=4
-LESS_EQUALS=5
-GREATER_EQUALS=6
-GREATER=7
-LOGICAL_AND=8
-LOGICAL_OR=9
-LBRACKET=10
-RPRACKET=11
-LBRACE=12
-RBRACE=13
-LPAREN=14
-RPAREN=15
-DOT=16
-COMMA=17
-MINUS=18
-EXCLAM=19
-QUESTIONMARK=20
-COLON=21
-PLUS=22
-STAR=23
-SLASH=24
-PERCENT=25
-CEL_TRUE=26
-CEL_FALSE=27
-NUL=28
-WHITESPACE=29
-COMMENT=30
-NUM_FLOAT=31
-NUM_INT=32
-NUM_UINT=33
-STRING=34
-BYTES=35
-IDENTIFIER=36
-'=='=1
-'!='=2
-'in'=3
-'<'=4
-'<='=5
-'>='=6
-'>'=7
-'&&'=8
-'||'=9
-'['=10
-']'=11
-'{'=12
-'}'=13
-'('=14
-')'=15
-'.'=16
-','=17
-'-'=18
-'!'=19
-'?'=20
-':'=21
-'+'=22
-'*'=23
-'/'=24
-'%'=25
-'true'=26
-'false'=27
-'null'=28
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
deleted file mode 100644
index 969a598618..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
-
-package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
-
-// BaseCELListener is a complete listener for a parse tree produced by CELParser.
-type BaseCELListener struct{}
-
-var _ CELListener = &BaseCELListener{}
-
-// VisitTerminal is called when a terminal node is visited.
-func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {}
-
-// VisitErrorNode is called when an error node is visited.
-func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {}
-
-// EnterEveryRule is called when any rule is entered.
-func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
-
-// ExitEveryRule is called when any rule is exited.
-func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
-
-// EnterStart is called when production start is entered.
-func (s *BaseCELListener) EnterStart(ctx *StartContext) {}
-
-// ExitStart is called when production start is exited.
-func (s *BaseCELListener) ExitStart(ctx *StartContext) {}
-
-// EnterExpr is called when production expr is entered.
-func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {}
-
-// ExitExpr is called when production expr is exited.
-func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {}
-
-// EnterConditionalOr is called when production conditionalOr is entered.
-func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {}
-
-// ExitConditionalOr is called when production conditionalOr is exited.
-func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {}
-
-// EnterConditionalAnd is called when production conditionalAnd is entered.
-func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {}
-
-// ExitConditionalAnd is called when production conditionalAnd is exited.
-func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {}
-
-// EnterRelation is called when production relation is entered.
-func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {}
-
-// ExitRelation is called when production relation is exited.
-func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {}
-
-// EnterCalc is called when production calc is entered.
-func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {}
-
-// ExitCalc is called when production calc is exited.
-func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {}
-
-// EnterMemberExpr is called when production MemberExpr is entered.
-func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {}
-
-// ExitMemberExpr is called when production MemberExpr is exited.
-func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {}
-
-// EnterLogicalNot is called when production LogicalNot is entered.
-func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {}
-
-// ExitLogicalNot is called when production LogicalNot is exited.
-func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {}
-
-// EnterNegate is called when production Negate is entered.
-func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
-
-// ExitNegate is called when production Negate is exited.
-func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
-
-// EnterSelectOrCall is called when production SelectOrCall is entered.
-func (s *BaseCELListener) EnterSelectOrCall(ctx *SelectOrCallContext) {}
-
-// ExitSelectOrCall is called when production SelectOrCall is exited.
-func (s *BaseCELListener) ExitSelectOrCall(ctx *SelectOrCallContext) {}
-
-// EnterPrimaryExpr is called when production PrimaryExpr is entered.
-func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
-
-// ExitPrimaryExpr is called when production PrimaryExpr is exited.
-func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {}
-
-// EnterIndex is called when production Index is entered.
-func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
-
-// ExitIndex is called when production Index is exited.
-func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
-
-// EnterCreateMessage is called when production CreateMessage is entered.
-func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
-
-// ExitCreateMessage is called when production CreateMessage is exited.
-func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
-
-// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
-func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
-
-// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited.
-func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
-
-// EnterNested is called when production Nested is entered.
-func (s *BaseCELListener) EnterNested(ctx *NestedContext) {}
-
-// ExitNested is called when production Nested is exited.
-func (s *BaseCELListener) ExitNested(ctx *NestedContext) {}
-
-// EnterCreateList is called when production CreateList is entered.
-func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {}
-
-// ExitCreateList is called when production CreateList is exited.
-func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {}
-
-// EnterCreateStruct is called when production CreateStruct is entered.
-func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
-
-// ExitCreateStruct is called when production CreateStruct is exited.
-func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
-
-// EnterConstantLiteral is called when production ConstantLiteral is entered.
-func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
-
-// ExitConstantLiteral is called when production ConstantLiteral is exited.
-func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {}
-
-// EnterExprList is called when production exprList is entered.
-func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
-
-// ExitExprList is called when production exprList is exited.
-func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
-
-// EnterFieldInitializerList is called when production fieldInitializerList is entered.
-func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
-
-// ExitFieldInitializerList is called when production fieldInitializerList is exited.
-func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
-
-// EnterMapInitializerList is called when production mapInitializerList is entered.
-func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
-
-// ExitMapInitializerList is called when production mapInitializerList is exited.
-func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
-
-// EnterInt is called when production Int is entered.
-func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
-
-// ExitInt is called when production Int is exited.
-func (s *BaseCELListener) ExitInt(ctx *IntContext) {}
-
-// EnterUint is called when production Uint is entered.
-func (s *BaseCELListener) EnterUint(ctx *UintContext) {}
-
-// ExitUint is called when production Uint is exited.
-func (s *BaseCELListener) ExitUint(ctx *UintContext) {}
-
-// EnterDouble is called when production Double is entered.
-func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {}
-
-// ExitDouble is called when production Double is exited.
-func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {}
-
-// EnterString is called when production String is entered.
-func (s *BaseCELListener) EnterString(ctx *StringContext) {}
-
-// ExitString is called when production String is exited.
-func (s *BaseCELListener) ExitString(ctx *StringContext) {}
-
-// EnterBytes is called when production Bytes is entered.
-func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {}
-
-// ExitBytes is called when production Bytes is exited.
-func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {}
-
-// EnterBoolTrue is called when production BoolTrue is entered.
-func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {}
-
-// ExitBoolTrue is called when production BoolTrue is exited.
-func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {}
-
-// EnterBoolFalse is called when production BoolFalse is entered.
-func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {}
-
-// ExitBoolFalse is called when production BoolFalse is exited.
-func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {}
-
-// EnterNull is called when production Null is entered.
-func (s *BaseCELListener) EnterNull(ctx *NullContext) {}
-
-// ExitNull is called when production Null is exited.
-func (s *BaseCELListener) ExitNull(ctx *NullContext) {}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
deleted file mode 100644
index 8e84579ed1..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
-
-package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
-
-type BaseCELVisitor struct {
- *antlr.BaseParseTreeVisitor
-}
-
-func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitSelectOrCall(ctx *SelectOrCallContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} {
- return v.VisitChildren(ctx)
-}
-
-func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} {
- return v.VisitChildren(ctx)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/etcd/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
deleted file mode 100644
index 7b4cca62e6..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
-
-package gen
-
-import (
- "fmt"
- "sync"
- "unicode"
-
- "github.com/antlr/antlr4/runtime/Go/antlr"
-)
-
-// Suppress unused import error
-var _ = fmt.Printf
-var _ = sync.Once{}
-var _ = unicode.IsLetter
-
-type CELLexer struct {
- *antlr.BaseLexer
- channelNames []string
- modeNames []string
- // TODO: EOF string
-}
-
-var cellexerLexerStaticData struct {
- once sync.Once
- serializedATN []int32
- channelNames []string
- modeNames []string
- literalNames []string
- symbolicNames []string
- ruleNames []string
- predictionContextCache *antlr.PredictionContextCache
- atn *antlr.ATN
- decisionToDFA []*antlr.DFA
-}
-
-func cellexerLexerInit() {
- staticData := &cellexerLexerStaticData
- staticData.channelNames = []string{
- "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
- }
- staticData.modeNames = []string{
- "DEFAULT_MODE",
- }
- staticData.literalNames = []string{
- "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
- "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
- "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
- }
- staticData.symbolicNames = []string{
- "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
- "STRING", "BYTES", "IDENTIFIER",
- }
- staticData.ruleNames = []string{
- "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW",
- "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ",
- "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING",
- "BYTES", "IDENTIFIER",
- }
- staticData.predictionContextCache = antlr.NewPredictionContextCache()
- staticData.serializedATN = []int32{
- 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
- 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
- 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
- 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
- 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
- 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
- 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
- 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7,
- 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46,
- 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4,
- 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8,
- 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13,
- 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1,
- 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24,
- 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1,
- 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29,
- 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31,
- 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1,
- 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36,
- 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1,
- 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38,
- 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39,
- 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40,
- 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11,
- 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253,
- 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261,
- 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1,
- 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11,
- 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42,
- 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43,
- 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43,
- 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44,
- 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5,
- 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44,
- 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1,
- 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349,
- 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
- 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44,
- 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1,
- 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44,
- 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
- 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44,
- 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1,
- 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46,
- 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7,
- 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27,
- 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45,
- 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0,
- 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31,
- 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122,
- 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97,
- 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96,
- 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120,
- 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117,
- 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92,
- 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39,
- 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5,
- 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13,
- 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0,
- 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0,
- 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0,
- 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0,
- 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
- 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81,
- 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0,
- 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0,
- 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0,
- 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17,
- 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1,
- 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0,
- 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138,
- 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0,
- 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152,
- 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0,
- 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183,
- 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0,
- 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227,
- 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0,
- 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413,
- 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0,
- 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102,
- 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5,
- 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0,
- 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111,
- 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5,
- 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124,
- 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0,
- 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124,
- 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26,
- 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41,
- 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0,
- 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137,
- 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5,
- 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0,
- 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147,
- 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5,
- 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114,
- 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0,
- 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0,
- 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0,
- 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0,
- 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169,
- 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2,
- 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0,
- 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178,
- 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179,
- 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3,
- 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187,
- 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190,
- 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189,
- 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57,
- 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28,
- 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55,
- 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203,
- 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207,
- 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210,
- 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225,
- 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3,
- 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3,
- 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3,
- 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0,
- 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0,
- 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229,
- 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80,
- 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1,
- 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0,
- 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241,
- 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246,
- 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1,
- 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0,
- 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
- 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255,
- 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275,
- 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1,
- 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0,
- 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0,
- 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269,
- 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273,
- 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1,
- 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0,
- 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278,
- 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290,
- 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1,
- 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0,
- 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0,
- 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293,
- 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1,
- 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0,
- 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0,
- 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303,
- 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306,
- 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0,
- 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0,
- 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313,
- 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316,
- 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34,
- 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0,
- 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324,
- 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324,
- 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5,
- 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69,
- 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0,
- 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337,
- 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341,
- 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5,
- 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69,
- 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0,
- 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351,
- 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355,
- 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5,
- 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0,
- 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0,
- 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366,
- 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368,
- 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0,
- 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0,
- 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378,
- 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383,
- 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0,
- 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0,
- 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390,
- 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394,
- 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9,
- 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0,
- 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402,
- 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407,
- 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0,
- 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0,
- 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409,
- 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3,
- 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0,
- 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30,
- 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418,
- 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421,
- 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181,
- 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294,
- 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406,
- 413, 418, 420, 1, 0, 1, 0,
- }
- deserializer := antlr.NewATNDeserializer(nil)
- staticData.atn = deserializer.Deserialize(staticData.serializedATN)
- atn := staticData.atn
- staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
- decisionToDFA := staticData.decisionToDFA
- for index, state := range atn.DecisionToState {
- decisionToDFA[index] = antlr.NewDFA(state, index)
- }
-}
-
-// CELLexerInit initializes any static state used to implement CELLexer. By default the
-// static state used to implement the lexer is lazily initialized during the first call to
-// NewCELLexer(). You can call this function if you wish to initialize the static state ahead
-// of time.
-func CELLexerInit() {
- staticData := &cellexerLexerStaticData
- staticData.once.Do(cellexerLexerInit)
-}
-
-// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream.
-func NewCELLexer(input antlr.CharStream) *CELLexer {
- CELLexerInit()
- l := new(CELLexer)
- l.BaseLexer = antlr.NewBaseLexer(input)
- staticData := &cellexerLexerStaticData
- l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
- l.channelNames = staticData.channelNames
- l.modeNames = staticData.modeNames
- l.RuleNames = staticData.ruleNames
- l.LiteralNames = staticData.literalNames
- l.SymbolicNames = staticData.symbolicNames
- l.GrammarFileName = "CEL.g4"
- // TODO: l.EOF = antlr.TokenEOF
-
- return l
-}
-
-// CELLexer tokens.
-const (
- CELLexerEQUALS = 1
- CELLexerNOT_EQUALS = 2
- CELLexerIN = 3
- CELLexerLESS = 4
- CELLexerLESS_EQUALS = 5
- CELLexerGREATER_EQUALS = 6
- CELLexerGREATER = 7
- CELLexerLOGICAL_AND = 8
- CELLexerLOGICAL_OR = 9
- CELLexerLBRACKET = 10
- CELLexerRPRACKET = 11
- CELLexerLBRACE = 12
- CELLexerRBRACE = 13
- CELLexerLPAREN = 14
- CELLexerRPAREN = 15
- CELLexerDOT = 16
- CELLexerCOMMA = 17
- CELLexerMINUS = 18
- CELLexerEXCLAM = 19
- CELLexerQUESTIONMARK = 20
- CELLexerCOLON = 21
- CELLexerPLUS = 22
- CELLexerSTAR = 23
- CELLexerSLASH = 24
- CELLexerPERCENT = 25
- CELLexerCEL_TRUE = 26
- CELLexerCEL_FALSE = 27
- CELLexerNUL = 28
- CELLexerWHITESPACE = 29
- CELLexerCOMMENT = 30
- CELLexerNUM_FLOAT = 31
- CELLexerNUM_INT = 32
- CELLexerNUM_UINT = 33
- CELLexerSTRING = 34
- CELLexerBYTES = 35
- CELLexerIDENTIFIER = 36
-)
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/etcd/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
deleted file mode 100644
index 1b631b6e1b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
-
-package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
-
-// CELListener is a complete listener for a parse tree produced by CELParser.
-type CELListener interface {
- antlr.ParseTreeListener
-
- // EnterStart is called when entering the start production.
- EnterStart(c *StartContext)
-
- // EnterExpr is called when entering the expr production.
- EnterExpr(c *ExprContext)
-
- // EnterConditionalOr is called when entering the conditionalOr production.
- EnterConditionalOr(c *ConditionalOrContext)
-
- // EnterConditionalAnd is called when entering the conditionalAnd production.
- EnterConditionalAnd(c *ConditionalAndContext)
-
- // EnterRelation is called when entering the relation production.
- EnterRelation(c *RelationContext)
-
- // EnterCalc is called when entering the calc production.
- EnterCalc(c *CalcContext)
-
- // EnterMemberExpr is called when entering the MemberExpr production.
- EnterMemberExpr(c *MemberExprContext)
-
- // EnterLogicalNot is called when entering the LogicalNot production.
- EnterLogicalNot(c *LogicalNotContext)
-
- // EnterNegate is called when entering the Negate production.
- EnterNegate(c *NegateContext)
-
- // EnterSelectOrCall is called when entering the SelectOrCall production.
- EnterSelectOrCall(c *SelectOrCallContext)
-
- // EnterPrimaryExpr is called when entering the PrimaryExpr production.
- EnterPrimaryExpr(c *PrimaryExprContext)
-
- // EnterIndex is called when entering the Index production.
- EnterIndex(c *IndexContext)
-
- // EnterCreateMessage is called when entering the CreateMessage production.
- EnterCreateMessage(c *CreateMessageContext)
-
- // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
- EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
-
- // EnterNested is called when entering the Nested production.
- EnterNested(c *NestedContext)
-
- // EnterCreateList is called when entering the CreateList production.
- EnterCreateList(c *CreateListContext)
-
- // EnterCreateStruct is called when entering the CreateStruct production.
- EnterCreateStruct(c *CreateStructContext)
-
- // EnterConstantLiteral is called when entering the ConstantLiteral production.
- EnterConstantLiteral(c *ConstantLiteralContext)
-
- // EnterExprList is called when entering the exprList production.
- EnterExprList(c *ExprListContext)
-
- // EnterFieldInitializerList is called when entering the fieldInitializerList production.
- EnterFieldInitializerList(c *FieldInitializerListContext)
-
- // EnterMapInitializerList is called when entering the mapInitializerList production.
- EnterMapInitializerList(c *MapInitializerListContext)
-
- // EnterInt is called when entering the Int production.
- EnterInt(c *IntContext)
-
- // EnterUint is called when entering the Uint production.
- EnterUint(c *UintContext)
-
- // EnterDouble is called when entering the Double production.
- EnterDouble(c *DoubleContext)
-
- // EnterString is called when entering the String production.
- EnterString(c *StringContext)
-
- // EnterBytes is called when entering the Bytes production.
- EnterBytes(c *BytesContext)
-
- // EnterBoolTrue is called when entering the BoolTrue production.
- EnterBoolTrue(c *BoolTrueContext)
-
- // EnterBoolFalse is called when entering the BoolFalse production.
- EnterBoolFalse(c *BoolFalseContext)
-
- // EnterNull is called when entering the Null production.
- EnterNull(c *NullContext)
-
- // ExitStart is called when exiting the start production.
- ExitStart(c *StartContext)
-
- // ExitExpr is called when exiting the expr production.
- ExitExpr(c *ExprContext)
-
- // ExitConditionalOr is called when exiting the conditionalOr production.
- ExitConditionalOr(c *ConditionalOrContext)
-
- // ExitConditionalAnd is called when exiting the conditionalAnd production.
- ExitConditionalAnd(c *ConditionalAndContext)
-
- // ExitRelation is called when exiting the relation production.
- ExitRelation(c *RelationContext)
-
- // ExitCalc is called when exiting the calc production.
- ExitCalc(c *CalcContext)
-
- // ExitMemberExpr is called when exiting the MemberExpr production.
- ExitMemberExpr(c *MemberExprContext)
-
- // ExitLogicalNot is called when exiting the LogicalNot production.
- ExitLogicalNot(c *LogicalNotContext)
-
- // ExitNegate is called when exiting the Negate production.
- ExitNegate(c *NegateContext)
-
- // ExitSelectOrCall is called when exiting the SelectOrCall production.
- ExitSelectOrCall(c *SelectOrCallContext)
-
- // ExitPrimaryExpr is called when exiting the PrimaryExpr production.
- ExitPrimaryExpr(c *PrimaryExprContext)
-
- // ExitIndex is called when exiting the Index production.
- ExitIndex(c *IndexContext)
-
- // ExitCreateMessage is called when exiting the CreateMessage production.
- ExitCreateMessage(c *CreateMessageContext)
-
- // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
- ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
-
- // ExitNested is called when exiting the Nested production.
- ExitNested(c *NestedContext)
-
- // ExitCreateList is called when exiting the CreateList production.
- ExitCreateList(c *CreateListContext)
-
- // ExitCreateStruct is called when exiting the CreateStruct production.
- ExitCreateStruct(c *CreateStructContext)
-
- // ExitConstantLiteral is called when exiting the ConstantLiteral production.
- ExitConstantLiteral(c *ConstantLiteralContext)
-
- // ExitExprList is called when exiting the exprList production.
- ExitExprList(c *ExprListContext)
-
- // ExitFieldInitializerList is called when exiting the fieldInitializerList production.
- ExitFieldInitializerList(c *FieldInitializerListContext)
-
- // ExitMapInitializerList is called when exiting the mapInitializerList production.
- ExitMapInitializerList(c *MapInitializerListContext)
-
- // ExitInt is called when exiting the Int production.
- ExitInt(c *IntContext)
-
- // ExitUint is called when exiting the Uint production.
- ExitUint(c *UintContext)
-
- // ExitDouble is called when exiting the Double production.
- ExitDouble(c *DoubleContext)
-
- // ExitString is called when exiting the String production.
- ExitString(c *StringContext)
-
- // ExitBytes is called when exiting the Bytes production.
- ExitBytes(c *BytesContext)
-
- // ExitBoolTrue is called when exiting the BoolTrue production.
- ExitBoolTrue(c *BoolTrueContext)
-
- // ExitBoolFalse is called when exiting the BoolFalse production.
- ExitBoolFalse(c *BoolFalseContext)
-
- // ExitNull is called when exiting the Null production.
- ExitNull(c *NullContext)
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/etcd/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
deleted file mode 100644
index afb3fe0d1c..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
+++ /dev/null
@@ -1,4650 +0,0 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
-
-package gen // CEL
-import (
- "fmt"
- "strconv"
- "sync"
-
- "github.com/antlr/antlr4/runtime/Go/antlr"
-)
-
-// Suppress unused import errors
-var _ = fmt.Printf
-var _ = strconv.Itoa
-var _ = sync.Once{}
-
-type CELParser struct {
- *antlr.BaseParser
-}
-
-var celParserStaticData struct {
- once sync.Once
- serializedATN []int32
- literalNames []string
- symbolicNames []string
- ruleNames []string
- predictionContextCache *antlr.PredictionContextCache
- atn *antlr.ATN
- decisionToDFA []*antlr.DFA
-}
-
-func celParserInit() {
- staticData := &celParserStaticData
- staticData.literalNames = []string{
- "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
- "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
- "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
- }
- staticData.symbolicNames = []string{
- "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
- "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
- "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
- "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
- "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
- "STRING", "BYTES", "IDENTIFIER",
- }
- staticData.ruleNames = []string{
- "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
- "unary", "member", "primary", "exprList", "fieldInitializerList", "mapInitializerList",
- "literal",
- }
- staticData.predictionContextCache = antlr.NewPredictionContextCache()
- staticData.serializedATN = []int32{
- 4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
- 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
- 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2,
- 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9,
- 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63,
- 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8,
- 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82,
- 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1,
- 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3,
- 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115,
- 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124,
- 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3,
- 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8,
- 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8,
- 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10,
- 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5,
- 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11,
- 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9,
- 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1,
- 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3,
- 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1,
- 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2,
- 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0,
- 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1,
- 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0,
- 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1,
- 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32,
- 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0,
- 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39,
- 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0,
- 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0,
- 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46,
- 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0,
- 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3,
- 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0,
- 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1,
- 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64,
- 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2,
- 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71,
- 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0,
- 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11,
- 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0,
- 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83,
- 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0,
- 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89,
- 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0,
- 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6,
- 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0,
- 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102,
- 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0,
- 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0,
- 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108,
- 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112,
- 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1,
- 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0,
- 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119,
- 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111,
- 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0,
- 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0,
- 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128,
- 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130,
- 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15,
- 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0,
- 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139,
- 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141,
- 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17,
- 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0,
- 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150,
- 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154,
- 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0,
- 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0,
- 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157,
- 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5,
- 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0,
- 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166,
- 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176,
- 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5,
- 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0,
- 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178,
- 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189,
- 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21,
- 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0,
- 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190,
- 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192,
- 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32,
- 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0,
- 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201,
- 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207,
- 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1,
- 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0,
- 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207,
- 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114,
- 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193,
- 198, 206,
- }
- deserializer := antlr.NewATNDeserializer(nil)
- staticData.atn = deserializer.Deserialize(staticData.serializedATN)
- atn := staticData.atn
- staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
- decisionToDFA := staticData.decisionToDFA
- for index, state := range atn.DecisionToState {
- decisionToDFA[index] = antlr.NewDFA(state, index)
- }
-}
-
-// CELParserInit initializes any static state used to implement CELParser. By default the
-// static state used to implement the parser is lazily initialized during the first call to
-// NewCELParser(). You can call this function if you wish to initialize the static state ahead
-// of time.
-func CELParserInit() {
- staticData := &celParserStaticData
- staticData.once.Do(celParserInit)
-}
-
-// NewCELParser produces a new parser instance for the optional input antlr.TokenStream.
-func NewCELParser(input antlr.TokenStream) *CELParser {
- CELParserInit()
- this := new(CELParser)
- this.BaseParser = antlr.NewBaseParser(input)
- staticData := &celParserStaticData
- this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
- this.RuleNames = staticData.ruleNames
- this.LiteralNames = staticData.literalNames
- this.SymbolicNames = staticData.symbolicNames
- this.GrammarFileName = "CEL.g4"
-
- return this
-}
-
-// CELParser tokens.
-const (
- CELParserEOF = antlr.TokenEOF
- CELParserEQUALS = 1
- CELParserNOT_EQUALS = 2
- CELParserIN = 3
- CELParserLESS = 4
- CELParserLESS_EQUALS = 5
- CELParserGREATER_EQUALS = 6
- CELParserGREATER = 7
- CELParserLOGICAL_AND = 8
- CELParserLOGICAL_OR = 9
- CELParserLBRACKET = 10
- CELParserRPRACKET = 11
- CELParserLBRACE = 12
- CELParserRBRACE = 13
- CELParserLPAREN = 14
- CELParserRPAREN = 15
- CELParserDOT = 16
- CELParserCOMMA = 17
- CELParserMINUS = 18
- CELParserEXCLAM = 19
- CELParserQUESTIONMARK = 20
- CELParserCOLON = 21
- CELParserPLUS = 22
- CELParserSTAR = 23
- CELParserSLASH = 24
- CELParserPERCENT = 25
- CELParserCEL_TRUE = 26
- CELParserCEL_FALSE = 27
- CELParserNUL = 28
- CELParserWHITESPACE = 29
- CELParserCOMMENT = 30
- CELParserNUM_FLOAT = 31
- CELParserNUM_INT = 32
- CELParserNUM_UINT = 33
- CELParserSTRING = 34
- CELParserBYTES = 35
- CELParserIDENTIFIER = 36
-)
-
-// CELParser rules.
-const (
- CELParserRULE_start = 0
- CELParserRULE_expr = 1
- CELParserRULE_conditionalOr = 2
- CELParserRULE_conditionalAnd = 3
- CELParserRULE_relation = 4
- CELParserRULE_calc = 5
- CELParserRULE_unary = 6
- CELParserRULE_member = 7
- CELParserRULE_primary = 8
- CELParserRULE_exprList = 9
- CELParserRULE_fieldInitializerList = 10
- CELParserRULE_mapInitializerList = 11
- CELParserRULE_literal = 12
-)
-
-// IStartContext is an interface to support dynamic dispatch.
-type IStartContext interface {
- antlr.ParserRuleContext
-
- // GetParser returns the parser.
- GetParser() antlr.Parser
-
- // GetE returns the e rule contexts.
- GetE() IExprContext
-
- // SetE sets the e rule contexts.
- SetE(IExprContext)
-
- // IsStartContext differentiates from other interfaces.
- IsStartContext()
-}
-
-type StartContext struct {
- *antlr.BaseParserRuleContext
- parser antlr.Parser
- e IExprContext
-}
-
-func NewEmptyStartContext() *StartContext {
- var p = new(StartContext)
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
- p.RuleIndex = CELParserRULE_start
- return p
-}
-
-func (*StartContext) IsStartContext() {}
-
-func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext {
- var p = new(StartContext)
-
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
-
- p.parser = parser
- p.RuleIndex = CELParserRULE_start
-
- return p
-}
-
-func (s *StartContext) GetParser() antlr.Parser { return s.parser }
-
-func (s *StartContext) GetE() IExprContext { return s.e }
-
-func (s *StartContext) SetE(v IExprContext) { s.e = v }
-
-func (s *StartContext) EOF() antlr.TerminalNode {
- return s.GetToken(CELParserEOF, 0)
-}
-
-func (s *StartContext) Expr() IExprContext {
- var t antlr.RuleContext
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext)
- break
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IExprContext)
-}
-
-func (s *StartContext) GetRuleContext() antlr.RuleContext {
- return s
-}
-
-func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
- return antlr.TreesStringTree(s, ruleNames, recog)
-}
-
-func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.EnterStart(s)
- }
-}
-
-func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.ExitStart(s)
- }
-}
-
-func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
- switch t := visitor.(type) {
- case CELVisitor:
- return t.VisitStart(s)
-
- default:
- return t.VisitChildren(s)
- }
-}
-
-func (p *CELParser) Start() (localctx IStartContext) {
- this := p
- _ = this
-
- localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState())
- p.EnterRule(localctx, 0, CELParserRULE_start)
-
- defer func() {
- p.ExitRule()
- }()
-
- defer func() {
- if err := recover(); err != nil {
- if v, ok := err.(antlr.RecognitionException); ok {
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- } else {
- panic(err)
- }
- }
- }()
-
- p.EnterOuterAlt(localctx, 1)
- {
- p.SetState(26)
-
- var _x = p.Expr()
-
- localctx.(*StartContext).e = _x
- }
- {
- p.SetState(27)
- p.Match(CELParserEOF)
- }
-
- return localctx
-}
-
-// IExprContext is an interface to support dynamic dispatch.
-type IExprContext interface {
- antlr.ParserRuleContext
-
- // GetParser returns the parser.
- GetParser() antlr.Parser
-
- // GetOp returns the op token.
- GetOp() antlr.Token
-
- // SetOp sets the op token.
- SetOp(antlr.Token)
-
- // GetE returns the e rule contexts.
- GetE() IConditionalOrContext
-
- // GetE1 returns the e1 rule contexts.
- GetE1() IConditionalOrContext
-
- // GetE2 returns the e2 rule contexts.
- GetE2() IExprContext
-
- // SetE sets the e rule contexts.
- SetE(IConditionalOrContext)
-
- // SetE1 sets the e1 rule contexts.
- SetE1(IConditionalOrContext)
-
- // SetE2 sets the e2 rule contexts.
- SetE2(IExprContext)
-
- // IsExprContext differentiates from other interfaces.
- IsExprContext()
-}
-
-type ExprContext struct {
- *antlr.BaseParserRuleContext
- parser antlr.Parser
- e IConditionalOrContext
- op antlr.Token
- e1 IConditionalOrContext
- e2 IExprContext
-}
-
-func NewEmptyExprContext() *ExprContext {
- var p = new(ExprContext)
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
- p.RuleIndex = CELParserRULE_expr
- return p
-}
-
-func (*ExprContext) IsExprContext() {}
-
-func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext {
- var p = new(ExprContext)
-
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
-
- p.parser = parser
- p.RuleIndex = CELParserRULE_expr
-
- return p
-}
-
-func (s *ExprContext) GetParser() antlr.Parser { return s.parser }
-
-func (s *ExprContext) GetOp() antlr.Token { return s.op }
-
-func (s *ExprContext) SetOp(v antlr.Token) { s.op = v }
-
-func (s *ExprContext) GetE() IConditionalOrContext { return s.e }
-
-func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 }
-
-func (s *ExprContext) GetE2() IExprContext { return s.e2 }
-
-func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v }
-
-func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v }
-
-func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v }
-
-func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
- children := s.GetChildren()
- len := 0
- for _, ctx := range children {
- if _, ok := ctx.(IConditionalOrContext); ok {
- len++
- }
- }
-
- tst := make([]IConditionalOrContext, len)
- i := 0
- for _, ctx := range children {
- if t, ok := ctx.(IConditionalOrContext); ok {
- tst[i] = t.(IConditionalOrContext)
- i++
- }
- }
-
- return tst
-}
-
-func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext {
- var t antlr.RuleContext
- j := 0
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IConditionalOrContext); ok {
- if j == i {
- t = ctx.(antlr.RuleContext)
- break
- }
- j++
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IConditionalOrContext)
-}
-
-func (s *ExprContext) COLON() antlr.TerminalNode {
- return s.GetToken(CELParserCOLON, 0)
-}
-
-func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode {
- return s.GetToken(CELParserQUESTIONMARK, 0)
-}
-
-func (s *ExprContext) Expr() IExprContext {
- var t antlr.RuleContext
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IExprContext); ok {
- t = ctx.(antlr.RuleContext)
- break
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IExprContext)
-}
-
-func (s *ExprContext) GetRuleContext() antlr.RuleContext {
- return s
-}
-
-func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
- return antlr.TreesStringTree(s, ruleNames, recog)
-}
-
-func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.EnterExpr(s)
- }
-}
-
-func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.ExitExpr(s)
- }
-}
-
-func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
- switch t := visitor.(type) {
- case CELVisitor:
- return t.VisitExpr(s)
-
- default:
- return t.VisitChildren(s)
- }
-}
-
-func (p *CELParser) Expr() (localctx IExprContext) {
- this := p
- _ = this
-
- localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState())
- p.EnterRule(localctx, 2, CELParserRULE_expr)
- var _la int
-
- defer func() {
- p.ExitRule()
- }()
-
- defer func() {
- if err := recover(); err != nil {
- if v, ok := err.(antlr.RecognitionException); ok {
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- } else {
- panic(err)
- }
- }
- }()
-
- p.EnterOuterAlt(localctx, 1)
- {
- p.SetState(29)
-
- var _x = p.ConditionalOr()
-
- localctx.(*ExprContext).e = _x
- }
- p.SetState(35)
- p.GetErrorHandler().Sync(p)
- _la = p.GetTokenStream().LA(1)
-
- if _la == CELParserQUESTIONMARK {
- {
- p.SetState(30)
-
- var _m = p.Match(CELParserQUESTIONMARK)
-
- localctx.(*ExprContext).op = _m
- }
- {
- p.SetState(31)
-
- var _x = p.ConditionalOr()
-
- localctx.(*ExprContext).e1 = _x
- }
- {
- p.SetState(32)
- p.Match(CELParserCOLON)
- }
- {
- p.SetState(33)
-
- var _x = p.Expr()
-
- localctx.(*ExprContext).e2 = _x
- }
-
- }
-
- return localctx
-}
-
-// IConditionalOrContext is an interface to support dynamic dispatch.
-type IConditionalOrContext interface {
- antlr.ParserRuleContext
-
- // GetParser returns the parser.
- GetParser() antlr.Parser
-
- // GetS9 returns the s9 token.
- GetS9() antlr.Token
-
- // SetS9 sets the s9 token.
- SetS9(antlr.Token)
-
- // GetOps returns the ops token list.
- GetOps() []antlr.Token
-
- // SetOps sets the ops token list.
- SetOps([]antlr.Token)
-
- // GetE returns the e rule contexts.
- GetE() IConditionalAndContext
-
- // Get_conditionalAnd returns the _conditionalAnd rule contexts.
- Get_conditionalAnd() IConditionalAndContext
-
- // SetE sets the e rule contexts.
- SetE(IConditionalAndContext)
-
- // Set_conditionalAnd sets the _conditionalAnd rule contexts.
- Set_conditionalAnd(IConditionalAndContext)
-
- // GetE1 returns the e1 rule context list.
- GetE1() []IConditionalAndContext
-
- // SetE1 sets the e1 rule context list.
- SetE1([]IConditionalAndContext)
-
- // IsConditionalOrContext differentiates from other interfaces.
- IsConditionalOrContext()
-}
-
-type ConditionalOrContext struct {
- *antlr.BaseParserRuleContext
- parser antlr.Parser
- e IConditionalAndContext
- s9 antlr.Token
- ops []antlr.Token
- _conditionalAnd IConditionalAndContext
- e1 []IConditionalAndContext
-}
-
-func NewEmptyConditionalOrContext() *ConditionalOrContext {
- var p = new(ConditionalOrContext)
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
- p.RuleIndex = CELParserRULE_conditionalOr
- return p
-}
-
-func (*ConditionalOrContext) IsConditionalOrContext() {}
-
-func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext {
- var p = new(ConditionalOrContext)
-
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
-
- p.parser = parser
- p.RuleIndex = CELParserRULE_conditionalOr
-
- return p
-}
-
-func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser }
-
-func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 }
-
-func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v }
-
-func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops }
-
-func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v }
-
-func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e }
-
-func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd }
-
-func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v }
-
-func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v }
-
-func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 }
-
-func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v }
-
-func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
- children := s.GetChildren()
- len := 0
- for _, ctx := range children {
- if _, ok := ctx.(IConditionalAndContext); ok {
- len++
- }
- }
-
- tst := make([]IConditionalAndContext, len)
- i := 0
- for _, ctx := range children {
- if t, ok := ctx.(IConditionalAndContext); ok {
- tst[i] = t.(IConditionalAndContext)
- i++
- }
- }
-
- return tst
-}
-
-func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext {
- var t antlr.RuleContext
- j := 0
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IConditionalAndContext); ok {
- if j == i {
- t = ctx.(antlr.RuleContext)
- break
- }
- j++
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IConditionalAndContext)
-}
-
-func (s *ConditionalOrContext) AllLOGICAL_OR() []antlr.TerminalNode {
- return s.GetTokens(CELParserLOGICAL_OR)
-}
-
-func (s *ConditionalOrContext) LOGICAL_OR(i int) antlr.TerminalNode {
- return s.GetToken(CELParserLOGICAL_OR, i)
-}
-
-func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext {
- return s
-}
-
-func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
- return antlr.TreesStringTree(s, ruleNames, recog)
-}
-
-func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.EnterConditionalOr(s)
- }
-}
-
-func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.ExitConditionalOr(s)
- }
-}
-
-func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
- switch t := visitor.(type) {
- case CELVisitor:
- return t.VisitConditionalOr(s)
-
- default:
- return t.VisitChildren(s)
- }
-}
-
-func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
- this := p
- _ = this
-
- localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState())
- p.EnterRule(localctx, 4, CELParserRULE_conditionalOr)
- var _la int
-
- defer func() {
- p.ExitRule()
- }()
-
- defer func() {
- if err := recover(); err != nil {
- if v, ok := err.(antlr.RecognitionException); ok {
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- } else {
- panic(err)
- }
- }
- }()
-
- p.EnterOuterAlt(localctx, 1)
- {
- p.SetState(37)
-
- var _x = p.ConditionalAnd()
-
- localctx.(*ConditionalOrContext).e = _x
- }
- p.SetState(42)
- p.GetErrorHandler().Sync(p)
- _la = p.GetTokenStream().LA(1)
-
- for _la == CELParserLOGICAL_OR {
- {
- p.SetState(38)
-
- var _m = p.Match(CELParserLOGICAL_OR)
-
- localctx.(*ConditionalOrContext).s9 = _m
- }
- localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
- {
- p.SetState(39)
-
- var _x = p.ConditionalAnd()
-
- localctx.(*ConditionalOrContext)._conditionalAnd = _x
- }
- localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
-
- p.SetState(44)
- p.GetErrorHandler().Sync(p)
- _la = p.GetTokenStream().LA(1)
- }
-
- return localctx
-}
-
-// IConditionalAndContext is an interface to support dynamic dispatch.
-type IConditionalAndContext interface {
- antlr.ParserRuleContext
-
- // GetParser returns the parser.
- GetParser() antlr.Parser
-
- // GetS8 returns the s8 token.
- GetS8() antlr.Token
-
- // SetS8 sets the s8 token.
- SetS8(antlr.Token)
-
- // GetOps returns the ops token list.
- GetOps() []antlr.Token
-
- // SetOps sets the ops token list.
- SetOps([]antlr.Token)
-
- // GetE returns the e rule contexts.
- GetE() IRelationContext
-
- // Get_relation returns the _relation rule contexts.
- Get_relation() IRelationContext
-
- // SetE sets the e rule contexts.
- SetE(IRelationContext)
-
- // Set_relation sets the _relation rule contexts.
- Set_relation(IRelationContext)
-
- // GetE1 returns the e1 rule context list.
- GetE1() []IRelationContext
-
- // SetE1 sets the e1 rule context list.
- SetE1([]IRelationContext)
-
- // IsConditionalAndContext differentiates from other interfaces.
- IsConditionalAndContext()
-}
-
-type ConditionalAndContext struct {
- *antlr.BaseParserRuleContext
- parser antlr.Parser
- e IRelationContext
- s8 antlr.Token
- ops []antlr.Token
- _relation IRelationContext
- e1 []IRelationContext
-}
-
-func NewEmptyConditionalAndContext() *ConditionalAndContext {
- var p = new(ConditionalAndContext)
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
- p.RuleIndex = CELParserRULE_conditionalAnd
- return p
-}
-
-func (*ConditionalAndContext) IsConditionalAndContext() {}
-
-func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext {
- var p = new(ConditionalAndContext)
-
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
-
- p.parser = parser
- p.RuleIndex = CELParserRULE_conditionalAnd
-
- return p
-}
-
-func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser }
-
-func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 }
-
-func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v }
-
-func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops }
-
-func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v }
-
-func (s *ConditionalAndContext) GetE() IRelationContext { return s.e }
-
-func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation }
-
-func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v }
-
-func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v }
-
-func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 }
-
-func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v }
-
-func (s *ConditionalAndContext) AllRelation() []IRelationContext {
- children := s.GetChildren()
- len := 0
- for _, ctx := range children {
- if _, ok := ctx.(IRelationContext); ok {
- len++
- }
- }
-
- tst := make([]IRelationContext, len)
- i := 0
- for _, ctx := range children {
- if t, ok := ctx.(IRelationContext); ok {
- tst[i] = t.(IRelationContext)
- i++
- }
- }
-
- return tst
-}
-
-func (s *ConditionalAndContext) Relation(i int) IRelationContext {
- var t antlr.RuleContext
- j := 0
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IRelationContext); ok {
- if j == i {
- t = ctx.(antlr.RuleContext)
- break
- }
- j++
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IRelationContext)
-}
-
-func (s *ConditionalAndContext) AllLOGICAL_AND() []antlr.TerminalNode {
- return s.GetTokens(CELParserLOGICAL_AND)
-}
-
-func (s *ConditionalAndContext) LOGICAL_AND(i int) antlr.TerminalNode {
- return s.GetToken(CELParserLOGICAL_AND, i)
-}
-
-func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext {
- return s
-}
-
-func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
- return antlr.TreesStringTree(s, ruleNames, recog)
-}
-
-func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.EnterConditionalAnd(s)
- }
-}
-
-func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.ExitConditionalAnd(s)
- }
-}
-
-func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
- switch t := visitor.(type) {
- case CELVisitor:
- return t.VisitConditionalAnd(s)
-
- default:
- return t.VisitChildren(s)
- }
-}
-
-func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
- this := p
- _ = this
-
- localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState())
- p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd)
- var _la int
-
- defer func() {
- p.ExitRule()
- }()
-
- defer func() {
- if err := recover(); err != nil {
- if v, ok := err.(antlr.RecognitionException); ok {
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- } else {
- panic(err)
- }
- }
- }()
-
- p.EnterOuterAlt(localctx, 1)
- {
- p.SetState(45)
-
- var _x = p.relation(0)
-
- localctx.(*ConditionalAndContext).e = _x
- }
- p.SetState(50)
- p.GetErrorHandler().Sync(p)
- _la = p.GetTokenStream().LA(1)
-
- for _la == CELParserLOGICAL_AND {
- {
- p.SetState(46)
-
- var _m = p.Match(CELParserLOGICAL_AND)
-
- localctx.(*ConditionalAndContext).s8 = _m
- }
- localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
- {
- p.SetState(47)
-
- var _x = p.relation(0)
-
- localctx.(*ConditionalAndContext)._relation = _x
- }
- localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
-
- p.SetState(52)
- p.GetErrorHandler().Sync(p)
- _la = p.GetTokenStream().LA(1)
- }
-
- return localctx
-}
-
-// IRelationContext is an interface to support dynamic dispatch.
-type IRelationContext interface {
- antlr.ParserRuleContext
-
- // GetParser returns the parser.
- GetParser() antlr.Parser
-
- // GetOp returns the op token.
- GetOp() antlr.Token
-
- // SetOp sets the op token.
- SetOp(antlr.Token)
-
- // IsRelationContext differentiates from other interfaces.
- IsRelationContext()
-}
-
-type RelationContext struct {
- *antlr.BaseParserRuleContext
- parser antlr.Parser
- op antlr.Token
-}
-
-func NewEmptyRelationContext() *RelationContext {
- var p = new(RelationContext)
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
- p.RuleIndex = CELParserRULE_relation
- return p
-}
-
-func (*RelationContext) IsRelationContext() {}
-
-func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext {
- var p = new(RelationContext)
-
- p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
-
- p.parser = parser
- p.RuleIndex = CELParserRULE_relation
-
- return p
-}
-
-func (s *RelationContext) GetParser() antlr.Parser { return s.parser }
-
-func (s *RelationContext) GetOp() antlr.Token { return s.op }
-
-func (s *RelationContext) SetOp(v antlr.Token) { s.op = v }
-
-func (s *RelationContext) Calc() ICalcContext {
- var t antlr.RuleContext
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(ICalcContext); ok {
- t = ctx.(antlr.RuleContext)
- break
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(ICalcContext)
-}
-
-func (s *RelationContext) AllRelation() []IRelationContext {
- children := s.GetChildren()
- len := 0
- for _, ctx := range children {
- if _, ok := ctx.(IRelationContext); ok {
- len++
- }
- }
-
- tst := make([]IRelationContext, len)
- i := 0
- for _, ctx := range children {
- if t, ok := ctx.(IRelationContext); ok {
- tst[i] = t.(IRelationContext)
- i++
- }
- }
-
- return tst
-}
-
-func (s *RelationContext) Relation(i int) IRelationContext {
- var t antlr.RuleContext
- j := 0
- for _, ctx := range s.GetChildren() {
- if _, ok := ctx.(IRelationContext); ok {
- if j == i {
- t = ctx.(antlr.RuleContext)
- break
- }
- j++
- }
- }
-
- if t == nil {
- return nil
- }
-
- return t.(IRelationContext)
-}
-
-func (s *RelationContext) LESS() antlr.TerminalNode {
- return s.GetToken(CELParserLESS, 0)
-}
-
-func (s *RelationContext) LESS_EQUALS() antlr.TerminalNode {
- return s.GetToken(CELParserLESS_EQUALS, 0)
-}
-
-func (s *RelationContext) GREATER_EQUALS() antlr.TerminalNode {
- return s.GetToken(CELParserGREATER_EQUALS, 0)
-}
-
-func (s *RelationContext) GREATER() antlr.TerminalNode {
- return s.GetToken(CELParserGREATER, 0)
-}
-
-func (s *RelationContext) EQUALS() antlr.TerminalNode {
- return s.GetToken(CELParserEQUALS, 0)
-}
-
-func (s *RelationContext) NOT_EQUALS() antlr.TerminalNode {
- return s.GetToken(CELParserNOT_EQUALS, 0)
-}
-
-func (s *RelationContext) IN() antlr.TerminalNode {
- return s.GetToken(CELParserIN, 0)
-}
-
-func (s *RelationContext) GetRuleContext() antlr.RuleContext {
- return s
-}
-
-func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
- return antlr.TreesStringTree(s, ruleNames, recog)
-}
-
-func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.EnterRelation(s)
- }
-}
-
-func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) {
- if listenerT, ok := listener.(CELListener); ok {
- listenerT.ExitRelation(s)
- }
-}
-
-func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
- switch t := visitor.(type) {
- case CELVisitor:
- return t.VisitRelation(s)
-
- default:
- return t.VisitChildren(s)
- }
-}
-
-func (p *CELParser) Relation() (localctx IRelationContext) {
- return p.relation(0)
-}
-
-func (p *CELParser) relation(_p int) (localctx IRelationContext) {
- this := p
- _ = this
-
- var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
- _parentState := p.GetState()
- localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState)
- var _prevctx IRelationContext = localctx
- var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
- _startState := 8
- p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p)
- var _la int
-
- defer func() {
- p.UnrollRecursionContexts(_parentctx)
- }()
-
- defer func() {
- if err := recover(); err != nil {
- if v, ok := err.(antlr.RecognitionException); ok {
- localctx.SetException(v)
- p.GetErrorHandler().ReportError(p, v)
- p.GetErrorHandler().Recover(p, v)
- } else {
- panic(err)
- }
- }
- }()
-
- var _alt int
-
- p.EnterOuterAlt(localctx, 1)
- {
- p.SetState(54)
- p.calc(0)
- }
-
- p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
- p.SetState(61)
- p.GetErrorHandler().Sync(p)
- _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
-
- for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
- if _alt == 1 {
- if p.GetParseListeners() != nil {
- p.TriggerExitRuleEvent()
- }
- _prevctx = localctx
- localctx = NewRelationContext(p, _parentctx, _parentState)
- p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
- p.SetState(56)
-
- if !(p.Precpred(p.GetParserRuleContext(), 1)) {
- panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
- }
- {
- p.SetState(57)
-
- var _lt = p.GetTokenStream().LT(1)
-
- localctx.(*RelationContext).op = _lt
-
- _la = p.GetTokenStream().LA(1)
-
- if !(((_la)&-(0x1f+1)) == 0 && ((1<-complete.jar.
-# 3. Modify the script below to refer to the current ANTLR version.
-# 4. Execute the generation script from the gen directory.
-# 5. Delete the jar and commit the regenerated sources.
-
-#!/bin/sh
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-# Generate AntLR artifacts.
-java -Xmx500M -cp ${DIR}/antlr-4.10.1-complete.jar org.antlr.v4.Tool \
- -Dlanguage=Go \
- -package gen \
- -o ${DIR} \
- -visitor ${DIR}/CEL.g4
-
diff --git a/etcd/vendor/github.com/google/cel-go/parser/helper.go b/etcd/vendor/github.com/google/cel-go/parser/helper.go
deleted file mode 100644
index be41339e3f..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/helper.go
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "sync"
-
- "github.com/antlr/antlr4/runtime/Go/antlr"
- "github.com/google/cel-go/common"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-type parserHelper struct {
- source common.Source
- nextID int64
- positions map[int64]int32
- macroCalls map[int64]*exprpb.Expr
-}
-
-func newParserHelper(source common.Source) *parserHelper {
- return &parserHelper{
- source: source,
- nextID: 1,
- positions: make(map[int64]int32),
- macroCalls: make(map[int64]*exprpb.Expr),
- }
-}
-
-func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo {
- return &exprpb.SourceInfo{
- Location: p.source.Description(),
- Positions: p.positions,
- LineOffsets: p.source.LineOffsets(),
- MacroCalls: p.macroCalls}
-}
-
-func (p *parserHelper) newLiteral(ctx interface{}, value *exprpb.Constant) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value}
- return exprNode
-}
-
-func (p *parserHelper) newLiteralBool(ctx interface{}, value bool) *exprpb.Expr {
- return p.newLiteral(ctx,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}})
-}
-
-func (p *parserHelper) newLiteralString(ctx interface{}, value string) *exprpb.Expr {
- return p.newLiteral(ctx,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}})
-}
-
-func (p *parserHelper) newLiteralBytes(ctx interface{}, value []byte) *exprpb.Expr {
- return p.newLiteral(ctx,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}})
-}
-
-func (p *parserHelper) newLiteralInt(ctx interface{}, value int64) *exprpb.Expr {
- return p.newLiteral(ctx,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}})
-}
-
-func (p *parserHelper) newLiteralUint(ctx interface{}, value uint64) *exprpb.Expr {
- return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}})
-}
-
-func (p *parserHelper) newLiteralDouble(ctx interface{}, value float64) *exprpb.Expr {
- return p.newLiteral(ctx,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}})
-}
-
-func (p *parserHelper) newIdent(ctx interface{}, name string) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}}
- return exprNode
-}
-
-func (p *parserHelper) newSelect(ctx interface{}, operand *exprpb.Expr, field string) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_SelectExpr{
- SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}}
- return exprNode
-}
-
-func (p *parserHelper) newPresenceTest(ctx interface{}, operand *exprpb.Expr, field string) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_SelectExpr{
- SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}}
- return exprNode
-}
-
-func (p *parserHelper) newGlobalCall(ctx interface{}, function string, args ...*exprpb.Expr) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_CallExpr{
- CallExpr: &exprpb.Expr_Call{Function: function, Args: args}}
- return exprNode
-}
-
-func (p *parserHelper) newReceiverCall(ctx interface{}, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_CallExpr{
- CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}}
- return exprNode
-}
-
-func (p *parserHelper) newList(ctx interface{}, elements ...*exprpb.Expr) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_ListExpr{
- ListExpr: &exprpb.Expr_CreateList{Elements: elements}}
- return exprNode
-}
-
-func (p *parserHelper) newMap(ctx interface{}, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}}
- return exprNode
-}
-
-func (p *parserHelper) newMapEntry(entryID int64, key *exprpb.Expr, value *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
- return &exprpb.Expr_CreateStruct_Entry{
- Id: entryID,
- KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key},
- Value: value}
-}
-
-func (p *parserHelper) newObject(ctx interface{},
- typeName string,
- entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_StructExpr{
- StructExpr: &exprpb.Expr_CreateStruct{
- MessageName: typeName,
- Entries: entries}}
- return exprNode
-}
-
-func (p *parserHelper) newObjectField(fieldID int64, field string, value *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
- return &exprpb.Expr_CreateStruct_Entry{
- Id: fieldID,
- KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field},
- Value: value}
-}
-
-func (p *parserHelper) newComprehension(ctx interface{}, iterVar string,
- iterRange *exprpb.Expr,
- accuVar string,
- accuInit *exprpb.Expr,
- condition *exprpb.Expr,
- step *exprpb.Expr,
- result *exprpb.Expr) *exprpb.Expr {
- exprNode := p.newExpr(ctx)
- exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{
- ComprehensionExpr: &exprpb.Expr_Comprehension{
- AccuVar: accuVar,
- AccuInit: accuInit,
- IterVar: iterVar,
- IterRange: iterRange,
- LoopCondition: condition,
- LoopStep: step,
- Result: result}}
- return exprNode
-}
-
-func (p *parserHelper) newExpr(ctx interface{}) *exprpb.Expr {
- id, isID := ctx.(int64)
- if isID {
- return &exprpb.Expr{Id: id}
- }
- return &exprpb.Expr{Id: p.id(ctx)}
-}
-
-func (p *parserHelper) id(ctx interface{}) int64 {
- var location common.Location
- switch ctx.(type) {
- case antlr.ParserRuleContext:
- token := (ctx.(antlr.ParserRuleContext)).GetStart()
- location = p.source.NewLocation(token.GetLine(), token.GetColumn())
- case antlr.Token:
- token := ctx.(antlr.Token)
- location = p.source.NewLocation(token.GetLine(), token.GetColumn())
- case common.Location:
- location = ctx.(common.Location)
- default:
- // This should only happen if the ctx is nil
- return -1
- }
- id := p.nextID
- p.positions[id], _ = p.source.LocationOffset(location)
- p.nextID++
- return id
-}
-
-func (p *parserHelper) getLocation(id int64) common.Location {
- characterOffset := p.positions[id]
- location, _ := p.source.OffsetLocation(characterOffset)
- return location
-}
-
-// buildMacroCallArg iterates the expression and returns a new expression
-// where all macros have been replaced by their IDs in MacroCalls
-func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr {
- if _, found := p.macroCalls[expr.GetId()]; found {
- return &exprpb.Expr{Id: expr.GetId()}
- }
-
- switch expr.GetExprKind().(type) {
- case *exprpb.Expr_CallExpr:
- // Iterate the AST from `expr` recursively looking for macros. Because we are at most
- // starting from the top level macro, this recursion is bounded by the size of the AST. This
- // means that the depth check on the AST during parsing will catch recursion overflows
- // before we get to here.
- macroTarget := expr.GetCallExpr().GetTarget()
- if macroTarget != nil {
- macroTarget = p.buildMacroCallArg(macroTarget)
- }
- macroArgs := make([]*exprpb.Expr, len(expr.GetCallExpr().GetArgs()))
- for index, arg := range expr.GetCallExpr().GetArgs() {
- macroArgs[index] = p.buildMacroCallArg(arg)
- }
- return &exprpb.Expr{
- Id: expr.GetId(),
- ExprKind: &exprpb.Expr_CallExpr{
- CallExpr: &exprpb.Expr_Call{
- Target: macroTarget,
- Function: expr.GetCallExpr().GetFunction(),
- Args: macroArgs,
- },
- },
- }
- case *exprpb.Expr_ListExpr:
- listExpr := expr.GetListExpr()
- macroListArgs := make([]*exprpb.Expr, len(listExpr.GetElements()))
- for i, elem := range listExpr.GetElements() {
- macroListArgs[i] = p.buildMacroCallArg(elem)
- }
- return &exprpb.Expr{
- Id: expr.GetId(),
- ExprKind: &exprpb.Expr_ListExpr{
- ListExpr: &exprpb.Expr_CreateList{
- Elements: macroListArgs,
- },
- },
- }
- }
-
- return expr
-}
-
-// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target
-// that are macros, their ID will be stored instead for later self-lookups.
-func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) {
- macroTarget := target
- if target != nil {
- if _, found := p.macroCalls[target.GetId()]; found {
- macroTarget = &exprpb.Expr{Id: target.GetId()}
- } else {
- macroTarget = p.buildMacroCallArg(target)
- }
- }
-
- macroArgs := make([]*exprpb.Expr, len(args))
- for index, arg := range args {
- macroArgs[index] = p.buildMacroCallArg(arg)
- }
-
- p.macroCalls[exprID] = &exprpb.Expr{
- ExprKind: &exprpb.Expr_CallExpr{
- CallExpr: &exprpb.Expr_Call{
- Target: macroTarget,
- Function: function,
- Args: macroArgs,
- },
- },
- }
-}
-
-// balancer performs tree balancing on operators whose arguments are of equal precedence.
-//
-// The purpose of the balancer is to ensure a compact serialization format for the logical &&, ||
-// operators which have a tendency to create long DAGs which are skewed in one direction. Since the
-// operators are commutative re-ordering the terms *must not* affect the evaluation result.
-//
-// Re-balancing the terms is a safe, if somewhat controversial choice. A better solution would be
-// to make these functions variadic and update both the checker and interpreter to understand this;
-// however, this is a more complex change.
-//
-// TODO: Consider replacing tree-balancing with variadic logical &&, || within the parser, checker,
-// and interpreter.
-type balancer struct {
- helper *parserHelper
- function string
- terms []*exprpb.Expr
- ops []int64
-}
-
-// newBalancer creates a balancer instance bound to a specific function and its first term.
-func newBalancer(h *parserHelper, function string, term *exprpb.Expr) *balancer {
- return &balancer{
- helper: h,
- function: function,
- terms: []*exprpb.Expr{term},
- ops: []int64{},
- }
-}
-
-// addTerm adds an operation identifier and term to the set of terms to be balanced.
-func (b *balancer) addTerm(op int64, term *exprpb.Expr) {
- b.terms = append(b.terms, term)
- b.ops = append(b.ops, op)
-}
-
-// balance creates a balanced tree from the sub-terms and returns the final Expr value.
-func (b *balancer) balance() *exprpb.Expr {
- if len(b.terms) == 1 {
- return b.terms[0]
- }
- return b.balancedTree(0, len(b.ops)-1)
-}
-
-// balancedTree recursively balances the terms provided to a commutative operator.
-func (b *balancer) balancedTree(lo, hi int) *exprpb.Expr {
- mid := (lo + hi + 1) / 2
-
- var left *exprpb.Expr
- if mid == lo {
- left = b.terms[mid]
- } else {
- left = b.balancedTree(lo, mid-1)
- }
-
- var right *exprpb.Expr
- if mid == hi {
- right = b.terms[mid+1]
- } else {
- right = b.balancedTree(mid+1, hi)
- }
- return b.helper.newGlobalCall(b.ops[mid], b.function, left, right)
-}
-
-type exprHelper struct {
- *parserHelper
- id int64
-}
-
-func (e *exprHelper) nextMacroID() int64 {
- return e.parserHelper.id(e.parserHelper.getLocation(e.id))
-}
-
-// LiteralBool implements the ExprHelper interface method.
-func (e *exprHelper) LiteralBool(value bool) *exprpb.Expr {
- return e.parserHelper.newLiteralBool(e.nextMacroID(), value)
-}
-
-// LiteralBytes implements the ExprHelper interface method.
-func (e *exprHelper) LiteralBytes(value []byte) *exprpb.Expr {
- return e.parserHelper.newLiteralBytes(e.nextMacroID(), value)
-}
-
-// LiteralDouble implements the ExprHelper interface method.
-func (e *exprHelper) LiteralDouble(value float64) *exprpb.Expr {
- return e.parserHelper.newLiteralDouble(e.nextMacroID(), value)
-}
-
-// LiteralInt implements the ExprHelper interface method.
-func (e *exprHelper) LiteralInt(value int64) *exprpb.Expr {
- return e.parserHelper.newLiteralInt(e.nextMacroID(), value)
-}
-
-// LiteralString implements the ExprHelper interface method.
-func (e *exprHelper) LiteralString(value string) *exprpb.Expr {
- return e.parserHelper.newLiteralString(e.nextMacroID(), value)
-}
-
-// LiteralUint implements the ExprHelper interface method.
-func (e *exprHelper) LiteralUint(value uint64) *exprpb.Expr {
- return e.parserHelper.newLiteralUint(e.nextMacroID(), value)
-}
-
-// NewList implements the ExprHelper interface method.
-func (e *exprHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
- return e.parserHelper.newList(e.nextMacroID(), elems...)
-}
-
-// NewMap implements the ExprHelper interface method.
-func (e *exprHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- return e.parserHelper.newMap(e.nextMacroID(), entries...)
-}
-
-// NewMapEntry implements the ExprHelper interface method.
-func (e *exprHelper) NewMapEntry(key *exprpb.Expr,
- val *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
- return e.parserHelper.newMapEntry(e.nextMacroID(), key, val)
-}
-
-// NewObject implements the ExprHelper interface method.
-func (e *exprHelper) NewObject(typeName string,
- fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
- return e.parserHelper.newObject(e.nextMacroID(), typeName, fieldInits...)
-}
-
-// NewObjectFieldInit implements the ExprHelper interface method.
-func (e *exprHelper) NewObjectFieldInit(field string,
- init *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
- return e.parserHelper.newObjectField(e.nextMacroID(), field, init)
-}
-
-// Fold implements the ExprHelper interface method.
-func (e *exprHelper) Fold(iterVar string,
- iterRange *exprpb.Expr,
- accuVar string,
- accuInit *exprpb.Expr,
- condition *exprpb.Expr,
- step *exprpb.Expr,
- result *exprpb.Expr) *exprpb.Expr {
- return e.parserHelper.newComprehension(
- e.nextMacroID(), iterVar, iterRange, accuVar, accuInit, condition, step, result)
-}
-
-// Ident implements the ExprHelper interface method.
-func (e *exprHelper) Ident(name string) *exprpb.Expr {
- return e.parserHelper.newIdent(e.nextMacroID(), name)
-}
-
-// AccuIdent implements the ExprHelper interface method.
-func (e *exprHelper) AccuIdent() *exprpb.Expr {
- return e.parserHelper.newIdent(e.nextMacroID(), AccumulatorName)
-}
-
-// GlobalCall implements the ExprHelper interface method.
-func (e *exprHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
- return e.parserHelper.newGlobalCall(e.nextMacroID(), function, args...)
-}
-
-// ReceiverCall implements the ExprHelper interface method.
-func (e *exprHelper) ReceiverCall(function string,
- target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
- return e.parserHelper.newReceiverCall(e.nextMacroID(), function, target, args...)
-}
-
-// PresenceTest implements the ExprHelper interface method.
-func (e *exprHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
- return e.parserHelper.newPresenceTest(e.nextMacroID(), operand, field)
-}
-
-// Select implements the ExprHelper interface method.
-func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
- return e.parserHelper.newSelect(e.nextMacroID(), operand, field)
-}
-
-// OffsetLocation implements the ExprHelper interface method.
-func (e *exprHelper) OffsetLocation(exprID int64) common.Location {
- offset := e.parserHelper.positions[exprID]
- location, _ := e.parserHelper.source.OffsetLocation(offset)
- return location
-}
-
-var (
- // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations.
- exprHelperPool = &sync.Pool{
- New: func() interface{} {
- return &exprHelper{}
- },
- }
-)
diff --git a/etcd/vendor/github.com/google/cel-go/parser/input.go b/etcd/vendor/github.com/google/cel-go/parser/input.go
deleted file mode 100644
index 6e4a4ba47b..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/input.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "github.com/antlr/antlr4/runtime/Go/antlr"
- "github.com/google/cel-go/common/runes"
-)
-
-type charStream struct {
- buf runes.Buffer
- pos int
- src string
-}
-
-// Consume implements (antlr.CharStream).Consume.
-func (c *charStream) Consume() {
- if c.pos >= c.buf.Len() {
- panic("cannot consume EOF")
- }
- c.pos++
-}
-
-// LA implements (antlr.CharStream).LA.
-func (c *charStream) LA(offset int) int {
- if offset == 0 {
- return 0
- }
- if offset < 0 {
- offset++
- }
- pos := c.pos + offset - 1
- if pos < 0 || pos >= c.buf.Len() {
- return antlr.TokenEOF
- }
- return int(c.buf.Get(pos))
-}
-
-// LT mimics (*antlr.InputStream).LT.
-func (c *charStream) LT(offset int) int {
- return c.LA(offset)
-}
-
-// Mark implements (antlr.CharStream).Mark.
-func (c *charStream) Mark() int {
- return -1
-}
-
-// Release implements (antlr.CharStream).Release.
-func (c *charStream) Release(marker int) {}
-
-// Index implements (antlr.CharStream).Index.
-func (c *charStream) Index() int {
- return c.pos
-}
-
-// Seek implements (antlr.CharStream).Seek.
-func (c *charStream) Seek(index int) {
- if index <= c.pos {
- c.pos = index
- return
- }
- if index < c.buf.Len() {
- c.pos = index
- } else {
- c.pos = c.buf.Len()
- }
-}
-
-// Size implements (antlr.CharStream).Size.
-func (c *charStream) Size() int {
- return c.buf.Len()
-}
-
-// GetSourceName implements (antlr.CharStream).GetSourceName.
-func (c *charStream) GetSourceName() string {
- return c.src
-}
-
-// GetText implements (antlr.CharStream).GetText.
-func (c *charStream) GetText(start, stop int) string {
- if stop >= c.buf.Len() {
- stop = c.buf.Len() - 1
- }
- if start >= c.buf.Len() {
- return ""
- }
- return c.buf.Slice(start, stop+1)
-}
-
-// GetTextFromTokens implements (antlr.CharStream).GetTextFromTokens.
-func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string {
- if start != nil && stop != nil {
- return c.GetText(start.GetTokenIndex(), stop.GetTokenIndex())
- }
- return ""
-}
-
-// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval.
-func (c *charStream) GetTextFromInterval(i *antlr.Interval) string {
- return c.GetText(i.Start, i.Stop)
-}
-
-// String mimics (*antlr.InputStream).String.
-func (c *charStream) String() string {
- return c.buf.Slice(0, c.buf.Len())
-}
-
-var _ antlr.CharStream = &charStream{}
-
-func newCharStream(buf runes.Buffer, desc string) antlr.CharStream {
- return &charStream{
- buf: buf,
- src: desc,
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/macro.go b/etcd/vendor/github.com/google/cel-go/parser/macro.go
deleted file mode 100644
index 9150b064e5..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/macro.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "fmt"
-
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/operators"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// NewGlobalMacro creates a Macro for a global function with the specified arg count.
-func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
- return ¯o{
- function: function,
- argCount: argCount,
- expander: expander}
-}
-
-// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
-func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
- return ¯o{
- function: function,
- argCount: argCount,
- expander: expander,
- receiverStyle: true}
-}
-
-// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
-func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
- return ¯o{
- function: function,
- expander: expander,
- varArgStyle: true}
-}
-
-// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
-func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
- return ¯o{
- function: function,
- expander: expander,
- receiverStyle: true,
- varArgStyle: true}
-}
-
-// Macro interface for describing the function signature to match and the MacroExpander to apply.
-//
-// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
-// a Macro should be created per arg-count.
-type Macro interface {
- // Function name to match.
- Function() string
-
- // ArgCount for the function call.
- //
- // When the macro is a var-arg style macro, the return value will be zero, but the MacroKey
- // will contain a `*` where the arg count would have been.
- ArgCount() int
-
- // IsReceiverStyle returns true if the macro matches a receiver style call.
- IsReceiverStyle() bool
-
- // MacroKey returns the macro signatures accepted by this macro.
- //
- // Format: `::`.
- //
- // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`.
- MacroKey() string
-
- // Expander returns the MacroExpander to apply when the macro key matches the parsed call
- // signature.
- Expander() MacroExpander
-}
-
-// Macro type which declares the function name and arg count expected for the
-// macro, as well as a macro expansion function.
-type macro struct {
- function string
- receiverStyle bool
- varArgStyle bool
- argCount int
- expander MacroExpander
-}
-
-// Function returns the macro's function name (i.e. the function whose syntax it mimics).
-func (m *macro) Function() string {
- return m.function
-}
-
-// ArgCount returns the number of arguments the macro expects.
-func (m *macro) ArgCount() int {
- return m.argCount
-}
-
-// IsReceiverStyle returns whether the macro is receiver style.
-func (m *macro) IsReceiverStyle() bool {
- return m.receiverStyle
-}
-
-// Expander implements the Macro interface method.
-func (m *macro) Expander() MacroExpander {
- return m.expander
-}
-
-// MacroKey implements the Macro interface method.
-func (m *macro) MacroKey() string {
- if m.varArgStyle {
- return makeVarArgMacroKey(m.function, m.receiverStyle)
- }
- return makeMacroKey(m.function, m.argCount, m.receiverStyle)
-}
-
-func makeMacroKey(name string, args int, receiverStyle bool) string {
- return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle)
-}
-
-func makeVarArgMacroKey(name string, receiverStyle bool) string {
- return fmt.Sprintf("%s:*:%v", name, receiverStyle)
-}
-
-// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
-// if the input arguments are not suitable for the expansion requirements for the macro in question.
-//
-// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
-// and produces as output an Expr ast node.
-//
-// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
-type MacroExpander func(eh ExprHelper,
- target *exprpb.Expr,
- args []*exprpb.Expr) (*exprpb.Expr, *common.Error)
-
-// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
-// consistent with the source position and expression id generation code leveraged by both
-// the parser and type-checker.
-type ExprHelper interface {
- // LiteralBool creates an Expr value for a bool literal.
- LiteralBool(value bool) *exprpb.Expr
-
- // LiteralBytes creates an Expr value for a byte literal.
- LiteralBytes(value []byte) *exprpb.Expr
-
- // LiteralDouble creates an Expr value for double literal.
- LiteralDouble(value float64) *exprpb.Expr
-
- // LiteralInt creates an Expr value for an int literal.
- LiteralInt(value int64) *exprpb.Expr
-
- // LiteralString creates am Expr value for a string literal.
- LiteralString(value string) *exprpb.Expr
-
- // LiteralUint creates an Expr value for a uint literal.
- LiteralUint(value uint64) *exprpb.Expr
-
- // NewList creates a CreateList instruction where the list is comprised of the optional set
- // of elements provided as arguments.
- NewList(elems ...*exprpb.Expr) *exprpb.Expr
-
- // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
- // optional set of key, value entries.
- NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
-
- // NewMapEntry creates a Map Entry for the key, value pair.
- NewMapEntry(key *exprpb.Expr, val *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry
-
- // NewObject creates a CreateStruct instruction for an object with a given type name and
- // optional set of field initializers.
- NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
-
- // NewObjectFieldInit creates a new Object field initializer from the field name and value.
- NewObjectFieldInit(field string, init *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry
-
- // Fold creates a fold comprehension instruction.
- //
- // - iterVar is the iteration variable name.
- // - iterRange represents the expression that resolves to a list or map where the elements or
- // keys (respectively) will be iterated over.
- // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
- // - accuInit is the initial expression whose value will be set for the accuVar prior to
- // folding.
- // - condition is the expression to test to determine whether to continue folding.
- // - step is the expression to evaluation at the conclusion of a single fold iteration.
- // - result is the computation to evaluate at the conclusion of the fold.
- //
- // The accuVar should not shadow variable names that you would like to reference within the
- // environment in the step and condition expressions. Presently, the name __result__ is commonly
- // used by built-in macros but this may change in the future.
- Fold(iterVar string,
- iterRange *exprpb.Expr,
- accuVar string,
- accuInit *exprpb.Expr,
- condition *exprpb.Expr,
- step *exprpb.Expr,
- result *exprpb.Expr) *exprpb.Expr
-
- // Ident creates an identifier Expr value.
- Ident(name string) *exprpb.Expr
-
- // AccuIdent returns an accumulator identifier for use with comprehension results.
- AccuIdent() *exprpb.Expr
-
- // GlobalCall creates a function call Expr value for a global (free) function.
- GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
-
- // ReceiverCall creates a function call Expr value for a receiver-style function.
- ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
-
- // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
- PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
-
- // Select create a field traversal Expr value.
- Select(operand *exprpb.Expr, field string) *exprpb.Expr
-
- // OffsetLocation returns the Location of the expression identifier.
- OffsetLocation(exprID int64) common.Location
-}
-
-var (
- // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
- // specify the field as a string.
- HasMacro = NewGlobalMacro(operators.Has, 1, MakeHas)
-
- // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
- // elements in the range satisfy the predicate.
- AllMacro = NewReceiverMacro(operators.All, 2, MakeAll)
-
- // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
- // some element in the range satisfies the predicate.
- ExistsMacro = NewReceiverMacro(operators.Exists, 2, MakeExists)
-
- // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
- // element in range the predicate holds.
- ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne)
-
- // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
- // to each element in the range to produce a new list.
- MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap)
-
- // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
- // first filters the elements in the range by the predicate, then applies the transform function
- // to produce a new list.
- MapFilterMacro = NewReceiverMacro(operators.Map, 3, MakeMap)
-
- // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
- // elements in the range, producing a new list from the elements that satisfy the predicate.
- FilterMacro = NewReceiverMacro(operators.Filter, 2, MakeFilter)
-
- // AllMacros includes the list of all spec-supported macros.
- AllMacros = []Macro{
- HasMacro,
- AllMacro,
- ExistsMacro,
- ExistsOneMacro,
- MapMacro,
- MapFilterMacro,
- FilterMacro,
- }
-
- // NoMacros list.
- NoMacros = []Macro{}
-)
-
-// AccumulatorName is the traditional variable name assigned to the fold accumulator variable.
-const AccumulatorName = "__result__"
-
-type quantifierKind int
-
-const (
- quantifierAll quantifierKind = iota
- quantifierExists
- quantifierExistsOne
-)
-
-// MakeAll expands the input call arguments into a comprehension that returns true if all of the
-// elements in the range match the predicate expressions:
-// .all(, )
-func MakeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return makeQuantifier(quantifierAll, eh, target, args)
-}
-
-// MakeExists expands the input call arguments into a comprehension that returns true if any of the
-// elements in the range match the predicate expressions:
-// .exists(, )
-func MakeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return makeQuantifier(quantifierExists, eh, target, args)
-}
-
-// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly
-// one of the elements in the range match the predicate expressions:
-// .exists_one(, )
-func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- return makeQuantifier(quantifierExistsOne, eh, target, args)
-}
-
-// MakeMap expands the input call arguments into a comprehension that transforms each element in the
-// input to produce an output list.
-//
-// There are two call patterns supported by map:
-// .map(, )
-// .map(, , )
-// In the second form only iterVar values which return true when provided to the predicate expression
-// are transformed.
-func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- v, found := extractIdent(args[0])
- if !found {
- return nil, &common.Error{Message: "argument is not an identifier"}
- }
-
- var fn *exprpb.Expr
- var filter *exprpb.Expr
-
- if len(args) == 3 {
- filter = args[1]
- fn = args[2]
- } else {
- filter = nil
- fn = args[1]
- }
-
- accuExpr := eh.Ident(AccumulatorName)
- init := eh.NewList()
- condition := eh.LiteralBool(true)
- step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn))
-
- if filter != nil {
- step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
- }
- return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
-}
-
-// MakeFilter expands the input call arguments into a comprehension which produces a list which contains
-// only elements which match the provided predicate expression:
-// .filter(, )
-func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- v, found := extractIdent(args[0])
- if !found {
- return nil, &common.Error{Message: "argument is not an identifier"}
- }
-
- filter := args[1]
- accuExpr := eh.Ident(AccumulatorName)
- init := eh.NewList()
- condition := eh.LiteralBool(true)
- step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0]))
- step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
- return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
-}
-
-// MakeHas expands the input call arguments into a presence test, e.g. has(.field)
-func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok {
- return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil
- }
- return nil, &common.Error{Message: "invalid argument to has() macro"}
-}
-
-func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
- v, found := extractIdent(args[0])
- if !found {
- location := eh.OffsetLocation(args[0].GetId())
- return nil, &common.Error{
- Message: "argument must be a simple name",
- Location: location,
- }
- }
-
- var init *exprpb.Expr
- var condition *exprpb.Expr
- var step *exprpb.Expr
- var result *exprpb.Expr
- switch kind {
- case quantifierAll:
- init = eh.LiteralBool(true)
- condition = eh.GlobalCall(operators.NotStrictlyFalse, eh.AccuIdent())
- step = eh.GlobalCall(operators.LogicalAnd, eh.AccuIdent(), args[1])
- result = eh.AccuIdent()
- case quantifierExists:
- init = eh.LiteralBool(false)
- condition = eh.GlobalCall(
- operators.NotStrictlyFalse,
- eh.GlobalCall(operators.LogicalNot, eh.AccuIdent()))
- step = eh.GlobalCall(operators.LogicalOr, eh.AccuIdent(), args[1])
- result = eh.AccuIdent()
- case quantifierExistsOne:
- zeroExpr := eh.LiteralInt(0)
- oneExpr := eh.LiteralInt(1)
- init = zeroExpr
- condition = eh.LiteralBool(true)
- step = eh.GlobalCall(operators.Conditional, args[1],
- eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent())
- result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr)
- default:
- return nil, &common.Error{Message: fmt.Sprintf("unrecognized quantifier '%v'", kind)}
- }
- return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil
-}
-
-func extractIdent(e *exprpb.Expr) (string, bool) {
- switch e.ExprKind.(type) {
- case *exprpb.Expr_IdentExpr:
- return e.GetIdentExpr().GetName(), true
- }
- return "", false
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/options.go b/etcd/vendor/github.com/google/cel-go/parser/options.go
deleted file mode 100644
index b50686a912..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/options.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import "fmt"
-
-type options struct {
- maxRecursionDepth int
- errorRecoveryTokenLookaheadLimit int
- errorRecoveryLimit int
- expressionSizeCodePointLimit int
- macros map[string]Macro
- populateMacroCalls bool
-}
-
-// Option configures the behavior of the parser.
-type Option func(*options) error
-
-// MaxRecursionDepth limits the maximum depth the parser will attempt to parse the expression before giving up.
-func MaxRecursionDepth(limit int) Option {
- return func(opts *options) error {
- if limit < -1 {
- return fmt.Errorf("max recursion depth must be greater than or equal to -1: %d", limit)
- }
- opts.maxRecursionDepth = limit
- return nil
- }
-}
-
-// ErrorRecoveryLookaheadTokenLimit limits the number of lexer tokens that may be considered during error recovery.
-//
-// Error recovery often involves looking ahead in the input to determine if there's a point at which parsing may
-// successfully resume. In some pathological cases, the parser can look through quite a large set of input which
-// in turn generates a lot of back-tracking and performance degredation.
-//
-// The limit must be > 1, and is recommended to be less than the default of 256.
-func ErrorRecoveryLookaheadTokenLimit(limit int) Option {
- return func(opts *options) error {
- if limit < 1 {
- return fmt.Errorf("error recovery lookahead token limit must be at least 1: %d", limit)
- }
- opts.errorRecoveryTokenLookaheadLimit = limit
- return nil
- }
-}
-
-// ErrorRecoveryLimit limits the number of attempts the parser will perform to recover from an error.
-func ErrorRecoveryLimit(limit int) Option {
- return func(opts *options) error {
- if limit < -1 {
- return fmt.Errorf("error recovery limit must be greater than or equal to -1: %d", limit)
- }
- opts.errorRecoveryLimit = limit
- return nil
- }
-}
-
-// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an
-// expression.
-func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option {
- return func(opts *options) error {
- if expressionSizeCodePointLimit < -1 {
- return fmt.Errorf("expression size code point limit must be greater than or equal to -1: %d", expressionSizeCodePointLimit)
- }
- opts.expressionSizeCodePointLimit = expressionSizeCodePointLimit
- return nil
- }
-}
-
-// Macros adds the given macros to the parser.
-func Macros(macros ...Macro) Option {
- return func(opts *options) error {
- for _, m := range macros {
- if m != nil {
- if opts.macros == nil {
- opts.macros = make(map[string]Macro)
- }
- opts.macros[m.MacroKey()] = m
- }
- }
- return nil
- }
-}
-
-// PopulateMacroCalls ensures that the original call signatures replaced by expanded macros
-// are preserved in the `SourceInfo` of parse result.
-func PopulateMacroCalls(populateMacroCalls bool) Option {
- return func(opts *options) error {
- opts.populateMacroCalls = populateMacroCalls
- return nil
- }
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/parser.go b/etcd/vendor/github.com/google/cel-go/parser/parser.go
deleted file mode 100644
index 072f624574..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/parser.go
+++ /dev/null
@@ -1,905 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package parser declares an expression parser with support for macro
-// expansion.
-package parser
-
-import (
- "fmt"
- "strconv"
- "strings"
- "sync"
-
- "github.com/antlr/antlr4/runtime/Go/antlr"
- "github.com/google/cel-go/common"
- "github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/runes"
- "github.com/google/cel-go/parser/gen"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// Parser encapsulates the context necessary to perform parsing for different expressions.
-type Parser struct {
- options
-}
-
-// NewParser builds and returns a new Parser using the provided options.
-func NewParser(opts ...Option) (*Parser, error) {
- p := &Parser{}
- for _, opt := range opts {
- if err := opt(&p.options); err != nil {
- return nil, err
- }
- }
- if p.maxRecursionDepth == 0 {
- p.maxRecursionDepth = 250
- }
- if p.maxRecursionDepth == -1 {
- p.maxRecursionDepth = int((^uint(0)) >> 1)
- }
- if p.errorRecoveryTokenLookaheadLimit == 0 {
- p.errorRecoveryTokenLookaheadLimit = 256
- }
- if p.errorRecoveryLimit == 0 {
- p.errorRecoveryLimit = 30
- }
- if p.errorRecoveryLimit == -1 {
- p.errorRecoveryLimit = int((^uint(0)) >> 1)
- }
- if p.expressionSizeCodePointLimit == 0 {
- p.expressionSizeCodePointLimit = 100_000
- }
- if p.expressionSizeCodePointLimit == -1 {
- p.expressionSizeCodePointLimit = int((^uint(0)) >> 1)
- }
- // Bool is false by default, so populateMacroCalls will be false by default
- return p, nil
-}
-
-// mustNewParser does the work of NewParser and panics if an error occurs.
-//
-// This function is only intended for internal use and is for backwards compatibility in Parse and
-// ParseWithMacros, where we know the options will result in an error.
-func mustNewParser(opts ...Option) *Parser {
- p, err := NewParser(opts...)
- if err != nil {
- panic(err)
- }
- return p
-}
-
-// Parse parses the expression represented by source and returns the result.
-func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
- impl := parser{
- errors: &parseErrors{common.NewErrors(source)},
- helper: newParserHelper(source),
- macros: p.macros,
- maxRecursionDepth: p.maxRecursionDepth,
- errorRecoveryLimit: p.errorRecoveryLimit,
- errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit,
- populateMacroCalls: p.populateMacroCalls,
- }
- buf, ok := source.(runes.Buffer)
- if !ok {
- buf = runes.NewBuffer(source.Content())
- }
- var e *exprpb.Expr
- if buf.Len() > p.expressionSizeCodePointLimit {
- e = impl.reportError(common.NoLocation,
- "expression code point size exceeds limit: size: %d, limit %d",
- buf.Len(), p.expressionSizeCodePointLimit)
- } else {
- e = impl.parse(buf, source.Description())
- }
- return &exprpb.ParsedExpr{
- Expr: e,
- SourceInfo: impl.helper.getSourceInfo(),
- }, impl.errors.Errors
-}
-
-// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid
-// field names for protos, and it would complicate the grammar to distinguish the cases.
-var reservedIds = map[string]struct{}{
- "as": {},
- "break": {},
- "const": {},
- "continue": {},
- "else": {},
- "false": {},
- "for": {},
- "function": {},
- "if": {},
- "import": {},
- "in": {},
- "let": {},
- "loop": {},
- "package": {},
- "namespace": {},
- "null": {},
- "return": {},
- "true": {},
- "var": {},
- "void": {},
- "while": {},
-}
-
-// Parse converts a source input a parsed expression.
-// This function calls ParseWithMacros with AllMacros.
-//
-// Deprecated: Use NewParser().Parse() instead.
-func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
- return mustNewParser(Macros(AllMacros...)).Parse(source)
-}
-
-type recursionError struct {
- message string
-}
-
-// Error implements error.
-func (re *recursionError) Error() string {
- return re.message
-}
-
-var _ error = &recursionError{}
-
-type recursionListener struct {
- maxDepth int
- ruleTypeDepth map[int]*int
-}
-
-func (rl *recursionListener) VisitTerminal(node antlr.TerminalNode) {}
-
-func (rl *recursionListener) VisitErrorNode(node antlr.ErrorNode) {}
-
-func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
- if ctx == nil {
- return
- }
- ruleIndex := ctx.GetRuleIndex()
- depth, found := rl.ruleTypeDepth[ruleIndex]
- if !found {
- var counter = 1
- rl.ruleTypeDepth[ruleIndex] = &counter
- depth = &counter
- } else {
- *depth++
- }
- if *depth >= rl.maxDepth {
- panic(&recursionError{
- message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth),
- })
- }
-}
-
-func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) {
- if ctx == nil {
- return
- }
- ruleIndex := ctx.GetRuleIndex()
- if depth, found := rl.ruleTypeDepth[ruleIndex]; found && *depth > 0 {
- *depth--
- }
-}
-
-var _ antlr.ParseTreeListener = &recursionListener{}
-
-type recoveryLimitError struct {
- message string
-}
-
-// Error implements error.
-func (rl *recoveryLimitError) Error() string {
- return rl.message
-}
-
-type lookaheadLimitError struct {
- message string
-}
-
-func (ll *lookaheadLimitError) Error() string {
- return ll.message
-}
-
-var _ error = &recoveryLimitError{}
-
-type recoveryLimitErrorStrategy struct {
- *antlr.DefaultErrorStrategy
- errorRecoveryLimit int
- errorRecoveryTokenLookaheadLimit int
- recoveryAttempts int
-}
-
-type lookaheadConsumer struct {
- antlr.Parser
- errorRecoveryTokenLookaheadLimit int
- lookaheadAttempts int
-}
-
-func (lc *lookaheadConsumer) Consume() antlr.Token {
- if lc.lookaheadAttempts >= lc.errorRecoveryTokenLookaheadLimit {
- panic(&lookaheadLimitError{
- message: fmt.Sprintf("error recovery token lookahead limit exceeded: %d", lc.errorRecoveryTokenLookaheadLimit),
- })
- }
- lc.lookaheadAttempts++
- return lc.Parser.Consume()
-}
-
-func (rl *recoveryLimitErrorStrategy) Recover(recognizer antlr.Parser, e antlr.RecognitionException) {
- rl.checkAttempts(recognizer)
- lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
- rl.DefaultErrorStrategy.Recover(lc, e)
-}
-
-func (rl *recoveryLimitErrorStrategy) RecoverInline(recognizer antlr.Parser) antlr.Token {
- rl.checkAttempts(recognizer)
- lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
- return rl.DefaultErrorStrategy.RecoverInline(lc)
-}
-
-func (rl *recoveryLimitErrorStrategy) checkAttempts(recognizer antlr.Parser) {
- if rl.recoveryAttempts == rl.errorRecoveryLimit {
- rl.recoveryAttempts++
- msg := fmt.Sprintf("error recovery attempt limit exceeded: %d", rl.errorRecoveryLimit)
- recognizer.NotifyErrorListeners(msg, nil, nil)
- panic(&recoveryLimitError{
- message: msg,
- })
- }
- rl.recoveryAttempts++
-}
-
-var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{}
-
-type parser struct {
- gen.BaseCELVisitor
- errors *parseErrors
- helper *parserHelper
- macros map[string]Macro
- recursionDepth int
- maxRecursionDepth int
- errorRecoveryLimit int
- errorRecoveryLookaheadTokenLimit int
- populateMacroCalls bool
-}
-
-var (
- _ gen.CELVisitor = (*parser)(nil)
-
- lexerPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
- l := gen.NewCELLexer(nil)
- l.RemoveErrorListeners()
- return l
- },
- }
-
- parserPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
- p := gen.NewCELParser(nil)
- p.RemoveErrorListeners()
- return p
- },
- }
-)
-
-func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
- // TODO: get rid of these pools once https://github.com/antlr/antlr4/pull/3571 is in a release
- lexer := lexerPool.Get().(*gen.CELLexer)
- prsr := parserPool.Get().(*gen.CELParser)
-
- // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, so this is
- // good enough until that is exported.
- prsrListener := &recursionListener{
- maxDepth: p.maxRecursionDepth,
- ruleTypeDepth: map[int]*int{},
- }
-
- defer func() {
- // Reset the lexer and parser before putting them back in the pool.
- lexer.RemoveErrorListeners()
- prsr.RemoveParseListener(prsrListener)
- prsr.RemoveErrorListeners()
- lexer.SetInputStream(nil)
- prsr.SetInputStream(nil)
- lexerPool.Put(lexer)
- parserPool.Put(prsr)
- }()
-
- lexer.SetInputStream(newCharStream(expr, desc))
- prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0))
-
- lexer.AddErrorListener(p)
- prsr.AddErrorListener(p)
- prsr.AddParseListener(prsrListener)
-
- prsr.SetErrorHandler(&recoveryLimitErrorStrategy{
- DefaultErrorStrategy: antlr.NewDefaultErrorStrategy(),
- errorRecoveryLimit: p.errorRecoveryLimit,
- errorRecoveryTokenLookaheadLimit: p.errorRecoveryLookaheadTokenLimit,
- })
-
- defer func() {
- if val := recover(); val != nil {
- switch err := val.(type) {
- case *lookaheadLimitError:
- p.errors.ReportError(common.NoLocation, err.Error())
- case *recursionError:
- p.errors.ReportError(common.NoLocation, err.Error())
- case *recoveryLimitError:
- // do nothing, listeners already notified and error reported.
- default:
- panic(val)
- }
- }
- }()
-
- return p.Visit(prsr.Start()).(*exprpb.Expr)
-}
-
-// Visitor implementations.
-func (p *parser) Visit(tree antlr.ParseTree) interface{} {
- p.recursionDepth++
- if p.recursionDepth > p.maxRecursionDepth {
- panic(&recursionError{message: "max recursion depth exceeded"})
- }
- defer func() {
- p.recursionDepth--
- }()
- switch tree.(type) {
- case *gen.StartContext:
- return p.VisitStart(tree.(*gen.StartContext))
- case *gen.ExprContext:
- return p.VisitExpr(tree.(*gen.ExprContext))
- case *gen.ConditionalAndContext:
- return p.VisitConditionalAnd(tree.(*gen.ConditionalAndContext))
- case *gen.ConditionalOrContext:
- return p.VisitConditionalOr(tree.(*gen.ConditionalOrContext))
- case *gen.RelationContext:
- return p.VisitRelation(tree.(*gen.RelationContext))
- case *gen.CalcContext:
- return p.VisitCalc(tree.(*gen.CalcContext))
- case *gen.LogicalNotContext:
- return p.VisitLogicalNot(tree.(*gen.LogicalNotContext))
- case *gen.MemberExprContext:
- return p.VisitMemberExpr(tree.(*gen.MemberExprContext))
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(tree.(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(tree.(*gen.SelectOrCallContext))
- case *gen.MapInitializerListContext:
- return p.VisitMapInitializerList(tree.(*gen.MapInitializerListContext))
- case *gen.NegateContext:
- return p.VisitNegate(tree.(*gen.NegateContext))
- case *gen.IndexContext:
- return p.VisitIndex(tree.(*gen.IndexContext))
- case *gen.UnaryContext:
- return p.VisitUnary(tree.(*gen.UnaryContext))
- case *gen.CreateListContext:
- return p.VisitCreateList(tree.(*gen.CreateListContext))
- case *gen.CreateMessageContext:
- return p.VisitCreateMessage(tree.(*gen.CreateMessageContext))
- case *gen.CreateStructContext:
- return p.VisitCreateStruct(tree.(*gen.CreateStructContext))
- }
-
- // Report at least one error if the parser reaches an unknown parse element.
- // Typically, this happens if the parser has already encountered a syntax error elsewhere.
- if len(p.errors.GetErrors()) == 0 {
- txt := "<>"
- if tree != nil {
- txt = fmt.Sprintf("<<%T>>", tree)
- }
- return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt)
- }
- return p.helper.newExpr(common.NoLocation)
-
-}
-
-// Visit a parse tree produced by CELParser#start.
-func (p *parser) VisitStart(ctx *gen.StartContext) interface{} {
- return p.Visit(ctx.Expr())
-}
-
-// Visit a parse tree produced by CELParser#expr.
-func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
- result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOp() == nil {
- return result
- }
- opID := p.helper.id(ctx.GetOp())
- ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr)
- ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse)
-}
-
-// Visit a parse tree produced by CELParser#conditionalOr.
-func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
- result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
- b := newBalancer(p.helper, operators.LogicalOr, result)
- rest := ctx.GetE1()
- for i, op := range ctx.GetOps() {
- if i >= len(rest) {
- return p.reportError(ctx, "unexpected character, wanted '||'")
- }
- next := p.Visit(rest[i]).(*exprpb.Expr)
- opID := p.helper.id(op)
- b.addTerm(opID, next)
- }
- return b.balance()
-}
-
-// Visit a parse tree produced by CELParser#conditionalAnd.
-func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{} {
- result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
- b := newBalancer(p.helper, operators.LogicalAnd, result)
- rest := ctx.GetE1()
- for i, op := range ctx.GetOps() {
- if i >= len(rest) {
- return p.reportError(ctx, "unexpected character, wanted '&&'")
- }
- next := p.Visit(rest[i]).(*exprpb.Expr)
- opID := p.helper.id(op)
- b.addTerm(opID, next)
- }
- return b.balance()
-}
-
-// Visit a parse tree produced by CELParser#relation.
-func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
- if ctx.Calc() != nil {
- return p.Visit(ctx.Calc())
- }
- opText := ""
- if ctx.GetOp() != nil {
- opText = ctx.GetOp().GetText()
- }
- if op, found := operators.Find(opText); found {
- lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr)
- opID := p.helper.id(ctx.GetOp())
- rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, op, lhs, rhs)
- }
- return p.reportError(ctx, "operator not found")
-}
-
-// Visit a parse tree produced by CELParser#calc.
-func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
- if ctx.Unary() != nil {
- return p.Visit(ctx.Unary())
- }
- opText := ""
- if ctx.GetOp() != nil {
- opText = ctx.GetOp().GetText()
- }
- if op, found := operators.Find(opText); found {
- lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr)
- opID := p.helper.id(ctx.GetOp())
- rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, op, lhs, rhs)
- }
- return p.reportError(ctx, "operator not found")
-}
-
-func (p *parser) VisitUnary(ctx *gen.UnaryContext) interface{} {
- return p.helper.newLiteralString(ctx, "<>")
-}
-
-// Visit a parse tree produced by CELParser#MemberExpr.
-func (p *parser) VisitMemberExpr(ctx *gen.MemberExprContext) interface{} {
- switch ctx.Member().(type) {
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(ctx.Member().(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(ctx.Member().(*gen.SelectOrCallContext))
- case *gen.IndexContext:
- return p.VisitIndex(ctx.Member().(*gen.IndexContext))
- case *gen.CreateMessageContext:
- return p.VisitCreateMessage(ctx.Member().(*gen.CreateMessageContext))
- }
- return p.reportError(ctx, "unsupported simple expression")
-}
-
-// Visit a parse tree produced by CELParser#LogicalNot.
-func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
- if len(ctx.GetOps())%2 == 0 {
- return p.Visit(ctx.Member())
- }
- opID := p.helper.id(ctx.GetOps()[0])
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.LogicalNot, target)
-}
-
-func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
- if len(ctx.GetOps())%2 == 0 {
- return p.Visit(ctx.Member())
- }
- opID := p.helper.id(ctx.GetOps()[0])
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.Negate, target)
-}
-
-// Visit a parse tree produced by CELParser#SelectOrCall.
-func (p *parser) VisitSelectOrCall(ctx *gen.SelectOrCallContext) interface{} {
- operand := p.Visit(ctx.Member()).(*exprpb.Expr)
- // Handle the error case where no valid identifier is specified.
- if ctx.GetId() == nil {
- return p.helper.newExpr(ctx)
- }
- id := ctx.GetId().GetText()
- if ctx.GetOpen() != nil {
- opID := p.helper.id(ctx.GetOpen())
- return p.receiverCallOrMacro(opID, id, operand, p.visitList(ctx.GetArgs())...)
- }
- return p.helper.newSelect(ctx.GetOp(), operand, id)
-}
-
-// Visit a parse tree produced by CELParser#PrimaryExpr.
-func (p *parser) VisitPrimaryExpr(ctx *gen.PrimaryExprContext) interface{} {
- switch ctx.Primary().(type) {
- case *gen.NestedContext:
- return p.VisitNested(ctx.Primary().(*gen.NestedContext))
- case *gen.IdentOrGlobalCallContext:
- return p.VisitIdentOrGlobalCall(ctx.Primary().(*gen.IdentOrGlobalCallContext))
- case *gen.CreateListContext:
- return p.VisitCreateList(ctx.Primary().(*gen.CreateListContext))
- case *gen.CreateStructContext:
- return p.VisitCreateStruct(ctx.Primary().(*gen.CreateStructContext))
- case *gen.ConstantLiteralContext:
- return p.VisitConstantLiteral(ctx.Primary().(*gen.ConstantLiteralContext))
- }
-
- return p.reportError(ctx, "invalid primary expression")
-}
-
-// Visit a parse tree produced by CELParser#Index.
-func (p *parser) VisitIndex(ctx *gen.IndexContext) interface{} {
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- opID := p.helper.id(ctx.GetOp())
- index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.Index, target, index)
-}
-
-// Visit a parse tree produced by CELParser#CreateMessage.
-func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) interface{} {
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- objID := p.helper.id(ctx.GetOp())
- if messageName, found := p.extractQualifiedName(target); found {
- entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
- return p.helper.newObject(objID, messageName, entries...)
- }
- return p.helper.newExpr(objID)
-}
-
-// Visit a parse tree of field initializers.
-func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) interface{} {
- if ctx == nil || ctx.GetFields() == nil {
- // This is the result of a syntax error handled elswhere, return empty.
- return []*exprpb.Expr_CreateStruct_Entry{}
- }
-
- result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields()))
- cols := ctx.GetCols()
- vals := ctx.GetValues()
- for i, f := range ctx.GetFields() {
- if i >= len(cols) || i >= len(vals) {
- // This is the result of a syntax error detected elsewhere.
- return []*exprpb.Expr_CreateStruct_Entry{}
- }
- initID := p.helper.id(cols[i])
- value := p.Visit(vals[i]).(*exprpb.Expr)
- field := p.helper.newObjectField(initID, f.GetText(), value)
- result[i] = field
- }
- return result
-}
-
-// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
-func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) interface{} {
- identName := ""
- if ctx.GetLeadingDot() != nil {
- identName = "."
- }
- // Handle the error case where no valid identifier is specified.
- if ctx.GetId() == nil {
- return p.helper.newExpr(ctx)
- }
- // Handle reserved identifiers.
- id := ctx.GetId().GetText()
- if _, ok := reservedIds[id]; ok {
- return p.reportError(ctx, "reserved identifier: %s", id)
- }
- identName += id
- if ctx.GetOp() != nil {
- opID := p.helper.id(ctx.GetOp())
- return p.globalCallOrMacro(opID, identName, p.visitList(ctx.GetArgs())...)
- }
- return p.helper.newIdent(ctx.GetId(), identName)
-}
-
-// Visit a parse tree produced by CELParser#Nested.
-func (p *parser) VisitNested(ctx *gen.NestedContext) interface{} {
- return p.Visit(ctx.GetE())
-}
-
-// Visit a parse tree produced by CELParser#CreateList.
-func (p *parser) VisitCreateList(ctx *gen.CreateListContext) interface{} {
- listID := p.helper.id(ctx.GetOp())
- return p.helper.newList(listID, p.visitList(ctx.GetElems())...)
-}
-
-// Visit a parse tree produced by CELParser#CreateStruct.
-func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
- structID := p.helper.id(ctx.GetOp())
- entries := []*exprpb.Expr_CreateStruct_Entry{}
- if ctx.GetEntries() != nil {
- entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
- }
- return p.helper.newMap(structID, entries...)
-}
-
-// Visit a parse tree produced by CELParser#ConstantLiteral.
-func (p *parser) VisitConstantLiteral(ctx *gen.ConstantLiteralContext) interface{} {
- switch ctx.Literal().(type) {
- case *gen.IntContext:
- return p.VisitInt(ctx.Literal().(*gen.IntContext))
- case *gen.UintContext:
- return p.VisitUint(ctx.Literal().(*gen.UintContext))
- case *gen.DoubleContext:
- return p.VisitDouble(ctx.Literal().(*gen.DoubleContext))
- case *gen.StringContext:
- return p.VisitString(ctx.Literal().(*gen.StringContext))
- case *gen.BytesContext:
- return p.VisitBytes(ctx.Literal().(*gen.BytesContext))
- case *gen.BoolFalseContext:
- return p.VisitBoolFalse(ctx.Literal().(*gen.BoolFalseContext))
- case *gen.BoolTrueContext:
- return p.VisitBoolTrue(ctx.Literal().(*gen.BoolTrueContext))
- case *gen.NullContext:
- return p.VisitNull(ctx.Literal().(*gen.NullContext))
- }
- return p.reportError(ctx, "invalid literal")
-}
-
-// Visit a parse tree produced by CELParser#mapInitializerList.
-func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) interface{} {
- if ctx == nil || ctx.GetKeys() == nil {
- // This is the result of a syntax error handled elswhere, return empty.
- return []*exprpb.Expr_CreateStruct_Entry{}
- }
-
- result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols()))
- keys := ctx.GetKeys()
- vals := ctx.GetValues()
- for i, col := range ctx.GetCols() {
- colID := p.helper.id(col)
- if i >= len(keys) || i >= len(vals) {
- // This is the result of a syntax error detected elsewhere.
- return []*exprpb.Expr_CreateStruct_Entry{}
- }
- key := p.Visit(keys[i]).(*exprpb.Expr)
- value := p.Visit(vals[i]).(*exprpb.Expr)
- entry := p.helper.newMapEntry(colID, key, value)
- result[i] = entry
- }
- return result
-}
-
-// Visit a parse tree produced by CELParser#Int.
-func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
- text := ctx.GetTok().GetText()
- base := 10
- if strings.HasPrefix(text, "0x") {
- base = 16
- text = text[2:]
- }
- if ctx.GetSign() != nil {
- text = ctx.GetSign().GetText() + text
- }
- i, err := strconv.ParseInt(text, base, 64)
- if err != nil {
- return p.reportError(ctx, "invalid int literal")
- }
- return p.helper.newLiteralInt(ctx, i)
-}
-
-// Visit a parse tree produced by CELParser#Uint.
-func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
- text := ctx.GetTok().GetText()
- // trim the 'u' designator included in the uint literal.
- text = text[:len(text)-1]
- base := 10
- if strings.HasPrefix(text, "0x") {
- base = 16
- text = text[2:]
- }
- i, err := strconv.ParseUint(text, base, 64)
- if err != nil {
- return p.reportError(ctx, "invalid uint literal")
- }
- return p.helper.newLiteralUint(ctx, i)
-}
-
-// Visit a parse tree produced by CELParser#Double.
-func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
- txt := ctx.GetTok().GetText()
- if ctx.GetSign() != nil {
- txt = ctx.GetSign().GetText() + txt
- }
- f, err := strconv.ParseFloat(txt, 64)
- if err != nil {
- return p.reportError(ctx, "invalid double literal")
- }
- return p.helper.newLiteralDouble(ctx, f)
-
-}
-
-// Visit a parse tree produced by CELParser#String.
-func (p *parser) VisitString(ctx *gen.StringContext) interface{} {
- s := p.unquote(ctx, ctx.GetText(), false)
- return p.helper.newLiteralString(ctx, s)
-}
-
-// Visit a parse tree produced by CELParser#Bytes.
-func (p *parser) VisitBytes(ctx *gen.BytesContext) interface{} {
- b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true))
- return p.helper.newLiteralBytes(ctx, b)
-}
-
-// Visit a parse tree produced by CELParser#BoolTrue.
-func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) interface{} {
- return p.helper.newLiteralBool(ctx, true)
-}
-
-// Visit a parse tree produced by CELParser#BoolFalse.
-func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) interface{} {
- return p.helper.newLiteralBool(ctx, false)
-}
-
-// Visit a parse tree produced by CELParser#Null.
-func (p *parser) VisitNull(ctx *gen.NullContext) interface{} {
- return p.helper.newLiteral(ctx,
- &exprpb.Constant{
- ConstantKind: &exprpb.Constant_NullValue{
- NullValue: structpb.NullValue_NULL_VALUE}})
-}
-
-func (p *parser) visitList(ctx gen.IExprListContext) []*exprpb.Expr {
- if ctx == nil {
- return []*exprpb.Expr{}
- }
- return p.visitSlice(ctx.GetE())
-}
-
-func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
- if expressions == nil {
- return []*exprpb.Expr{}
- }
- result := make([]*exprpb.Expr, len(expressions))
- for i, e := range expressions {
- ex := p.Visit(e).(*exprpb.Expr)
- result[i] = ex
- }
- return result
-}
-
-func (p *parser) extractQualifiedName(e *exprpb.Expr) (string, bool) {
- if e == nil {
- return "", false
- }
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- return e.GetIdentExpr().GetName(), true
- case *exprpb.Expr_SelectExpr:
- s := e.GetSelectExpr()
- if prefix, found := p.extractQualifiedName(s.GetOperand()); found {
- return prefix + "." + s.GetField(), true
- }
- }
- // TODO: Add a method to Source to get location from character offset.
- location := p.helper.getLocation(e.GetId())
- p.reportError(location, "expected a qualified name")
- return "", false
-}
-
-func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string {
- text, err := unescape(value, isBytes)
- if err != nil {
- p.reportError(ctx, "%s", err.Error())
- return value
- }
- return text
-}
-
-func (p *parser) reportError(ctx interface{}, format string, args ...interface{}) *exprpb.Expr {
- var location common.Location
- switch ctx.(type) {
- case common.Location:
- location = ctx.(common.Location)
- case antlr.Token, antlr.ParserRuleContext:
- err := p.helper.newExpr(ctx)
- location = p.helper.getLocation(err.GetId())
- }
- err := p.helper.newExpr(ctx)
- // Provide arguments to the report error.
- p.errors.ReportError(location, format, args...)
- return err
-}
-
-// ANTLR Parse listener implementations
-func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
- // TODO: Snippet
- l := p.helper.source.NewLocation(line, column)
- p.errors.syntaxError(l, msg)
-}
-
-func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
- // Intentional
-}
-
-func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
- // Intentional
-}
-
-func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) {
- // Intentional
-}
-
-func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr {
- if expr, found := p.expandMacro(exprID, function, nil, args...); found {
- return expr
- }
- return p.helper.newGlobalCall(exprID, function, args...)
-}
-
-func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
- if expr, found := p.expandMacro(exprID, function, target, args...); found {
- return expr
- }
- return p.helper.newReceiverCall(exprID, function, target, args...)
-}
-
-func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) {
- macro, found := p.macros[makeMacroKey(function, len(args), target != nil)]
- if !found {
- macro, found = p.macros[makeVarArgMacroKey(function, target != nil)]
- if !found {
- return nil, false
- }
- }
- eh := exprHelperPool.Get().(*exprHelper)
- defer exprHelperPool.Put(eh)
- eh.parserHelper = p.helper
- eh.id = exprID
- expr, err := macro.Expander()(eh, target, args)
- if err != nil {
- if err.Location != nil {
- return p.reportError(err.Location, err.Message), true
- }
- return p.reportError(p.helper.getLocation(exprID), err.Message), true
- }
- if p.populateMacroCalls {
- p.helper.addMacroCall(expr.GetId(), function, target, args...)
- }
- return expr, true
-}
diff --git a/etcd/vendor/github.com/google/cel-go/parser/unescape.go b/etcd/vendor/github.com/google/cel-go/parser/unescape.go
deleted file mode 100644
index 27c57a9f3a..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/unescape.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-)
-
-// Unescape takes a quoted string, unquotes, and unescapes it.
-//
-// This function performs escaping compatible with GoogleSQL.
-func unescape(value string, isBytes bool) (string, error) {
- // All strings normalize newlines to the \n representation.
- value = newlineNormalizer.Replace(value)
- n := len(value)
-
- // Nothing to unescape / decode.
- if n < 2 {
- return value, fmt.Errorf("unable to unescape string")
- }
-
- // Raw string preceded by the 'r|R' prefix.
- isRawLiteral := false
- if value[0] == 'r' || value[0] == 'R' {
- value = value[1:]
- n = len(value)
- isRawLiteral = true
- }
-
- // Quoted string of some form, must have same first and last char.
- if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
- return value, fmt.Errorf("unable to unescape string")
- }
-
- // Normalize the multi-line CEL string representation to a standard
- // Go quoted string.
- if n >= 6 {
- if strings.HasPrefix(value, "'''") {
- if !strings.HasSuffix(value, "'''") {
- return value, fmt.Errorf("unable to unescape string")
- }
- value = "\"" + value[3:n-3] + "\""
- } else if strings.HasPrefix(value, `"""`) {
- if !strings.HasSuffix(value, `"""`) {
- return value, fmt.Errorf("unable to unescape string")
- }
- value = "\"" + value[3:n-3] + "\""
- }
- n = len(value)
- }
- value = value[1 : n-1]
- // If there is nothing to escape, then return.
- if isRawLiteral || !strings.ContainsRune(value, '\\') {
- return value, nil
- }
-
- // Otherwise the string contains escape characters.
- // The following logic is adapted from `strconv/quote.go`
- var runeTmp [utf8.UTFMax]byte
- buf := make([]byte, 0, 3*n/2)
- for len(value) > 0 {
- c, encode, rest, err := unescapeChar(value, isBytes)
- if err != nil {
- return "", err
- }
- value = rest
- if c < utf8.RuneSelf || !encode {
- buf = append(buf, byte(c))
- } else {
- n := utf8.EncodeRune(runeTmp[:], c)
- buf = append(buf, runeTmp[:n]...)
- }
- }
- return string(buf), nil
-}
-
-// unescapeChar takes a string input and returns the following info:
-//
-// value - the escaped unicode rune at the front of the string.
-// encode - the value should be unicode-encoded
-// tail - the remainder of the input string.
-// err - error value, if the character could not be unescaped.
-//
-// When encode is true the return value may still fit within a single byte,
-// but unicode encoding is attempted which is more expensive than when the
-// value is known to self-represent as a single byte.
-//
-// If isBytes is set, unescape as a bytes literal so octal and hex escapes
-// represent byte values, not unicode code points.
-func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) {
- // 1. Character is not an escape sequence.
- switch c := s[0]; {
- case c >= utf8.RuneSelf:
- r, size := utf8.DecodeRuneInString(s)
- return r, true, s[size:], nil
- case c != '\\':
- return rune(s[0]), false, s[1:], nil
- }
-
- // 2. Last character is the start of an escape sequence.
- if len(s) <= 1 {
- err = fmt.Errorf("unable to unescape string, found '\\' as last character")
- return
- }
-
- c := s[1]
- s = s[2:]
- // 3. Common escape sequences shared with Google SQL
- switch c {
- case 'a':
- value = '\a'
- case 'b':
- value = '\b'
- case 'f':
- value = '\f'
- case 'n':
- value = '\n'
- case 'r':
- value = '\r'
- case 't':
- value = '\t'
- case 'v':
- value = '\v'
- case '\\':
- value = '\\'
- case '\'':
- value = '\''
- case '"':
- value = '"'
- case '`':
- value = '`'
- case '?':
- value = '?'
-
- // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
- case 'x', 'X', 'u', 'U':
- n := 0
- encode = true
- switch c {
- case 'x', 'X':
- n = 2
- encode = !isBytes
- case 'u':
- n = 4
- if isBytes {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- case 'U':
- n = 8
- if isBytes {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- }
- var v rune
- if len(s) < n {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- for j := 0; j < n; j++ {
- x, ok := unhex(s[j])
- if !ok {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- v = v<<4 | x
- }
- s = s[n:]
- if !isBytes && v > utf8.MaxRune {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- value = v
-
- // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
- case '0', '1', '2', '3':
- if len(s) < 2 {
- err = fmt.Errorf("unable to unescape octal sequence in string")
- return
- }
- v := rune(c - '0')
- for j := 0; j < 2; j++ {
- x := s[j]
- if x < '0' || x > '7' {
- err = fmt.Errorf("unable to unescape octal sequence in string")
- return
- }
- v = v*8 + rune(x-'0')
- }
- if !isBytes && v > utf8.MaxRune {
- err = fmt.Errorf("unable to unescape string")
- return
- }
- value = v
- s = s[2:]
- encode = !isBytes
-
- // Unknown escape sequence.
- default:
- err = fmt.Errorf("unable to unescape string")
- }
-
- tail = s
- return
-}
-
-func unhex(b byte) (rune, bool) {
- c := rune(b)
- switch {
- case '0' <= c && c <= '9':
- return c - '0', true
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10, true
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10, true
- }
- return 0, false
-}
-
-var (
- newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
-)
diff --git a/etcd/vendor/github.com/google/cel-go/parser/unparser.go b/etcd/vendor/github.com/google/cel-go/parser/unparser.go
deleted file mode 100644
index a459bb4a98..0000000000
--- a/etcd/vendor/github.com/google/cel-go/parser/unparser.go
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/google/cel-go/common/operators"
-
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-// Unparse takes an input expression and source position information and generates a human-readable
-// expression.
-//
-// Note, unparsing an AST will often generate the same expression as was originally parsed, but some
-// formatting may be lost in translation, notably:
-//
-// - All quoted literals are doubled quoted.
-// - Byte literals are represented as octal escapes (same as Google SQL).
-// - Floating point values are converted to the small number of digits needed to represent the value.
-// - Spacing around punctuation marks may be lost.
-// - Parentheses will only be applied when they affect operator precedence.
-//
-// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as
-// performing word wrapping on expressions.
-func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) (string, error) {
- unparserOpts := &unparserOption{
- wrapOnColumn: defaultWrapOnColumn,
- wrapAfterColumnLimit: defaultWrapAfterColumnLimit,
- operatorsToWrapOn: defaultOperatorsToWrapOn,
- }
-
- var err error
- for _, opt := range opts {
- unparserOpts, err = opt(unparserOpts)
- if err != nil {
- return "", err
- }
- }
-
- un := &unparser{
- info: info,
- options: unparserOpts,
- }
- err = un.visit(expr)
- if err != nil {
- return "", err
- }
- return un.str.String(), nil
-}
-
-// unparser visits an expression to reconstruct a human-readable string from an AST.
-type unparser struct {
- str strings.Builder
- info *exprpb.SourceInfo
- options *unparserOption
- lastWrappedIndex int
-}
-
-func (un *unparser) visit(expr *exprpb.Expr) error {
- if expr == nil {
- return errors.New("unsupported expression")
- }
- visited, err := un.visitMaybeMacroCall(expr)
- if visited || err != nil {
- return err
- }
- switch expr.GetExprKind().(type) {
- case *exprpb.Expr_CallExpr:
- return un.visitCall(expr)
- case *exprpb.Expr_ConstExpr:
- return un.visitConst(expr)
- case *exprpb.Expr_IdentExpr:
- return un.visitIdent(expr)
- case *exprpb.Expr_ListExpr:
- return un.visitList(expr)
- case *exprpb.Expr_SelectExpr:
- return un.visitSelect(expr)
- case *exprpb.Expr_StructExpr:
- return un.visitStruct(expr)
- default:
- return fmt.Errorf("unsupported expression: %v", expr)
- }
-}
-
-func (un *unparser) visitCall(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- fun := c.GetFunction()
- switch fun {
- // ternary operator
- case operators.Conditional:
- return un.visitCallConditional(expr)
- // index operator
- case operators.Index:
- return un.visitCallIndex(expr)
- // unary operators
- case operators.LogicalNot, operators.Negate:
- return un.visitCallUnary(expr)
- // binary operators
- case operators.Add,
- operators.Divide,
- operators.Equals,
- operators.Greater,
- operators.GreaterEquals,
- operators.In,
- operators.Less,
- operators.LessEquals,
- operators.LogicalAnd,
- operators.LogicalOr,
- operators.Modulo,
- operators.Multiply,
- operators.NotEquals,
- operators.OldIn,
- operators.Subtract:
- return un.visitCallBinary(expr)
- // standard function calls.
- default:
- return un.visitCallFunc(expr)
- }
-}
-
-func (un *unparser) visitCallBinary(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- fun := c.GetFunction()
- args := c.GetArgs()
- lhs := args[0]
- // add parens if the current operator is lower precedence than the lhs expr operator.
- lhsParen := isComplexOperatorWithRespectTo(fun, lhs)
- rhs := args[1]
- // add parens if the current operator is lower precedence than the rhs expr operator,
- // or the same precedence and the operator is left recursive.
- rhsParen := isComplexOperatorWithRespectTo(fun, rhs)
- if !rhsParen && isLeftRecursive(fun) {
- rhsParen = isSamePrecedence(fun, rhs)
- }
- err := un.visitMaybeNested(lhs, lhsParen)
- if err != nil {
- return err
- }
- unmangled, found := operators.FindReverseBinaryOperator(fun)
- if !found {
- return fmt.Errorf("cannot unmangle operator: %s", fun)
- }
-
- un.writeOperatorWithWrapping(fun, unmangled)
- return un.visitMaybeNested(rhs, rhsParen)
-}
-
-func (un *unparser) visitCallConditional(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- args := c.GetArgs()
- // add parens if operand is a conditional itself.
- nested := isSamePrecedence(operators.Conditional, args[0]) ||
- isComplexOperator(args[0])
- err := un.visitMaybeNested(args[0], nested)
- if err != nil {
- return err
- }
- un.writeOperatorWithWrapping(operators.Conditional, "?")
-
- // add parens if operand is a conditional itself.
- nested = isSamePrecedence(operators.Conditional, args[1]) ||
- isComplexOperator(args[1])
- err = un.visitMaybeNested(args[1], nested)
- if err != nil {
- return err
- }
-
- un.str.WriteString(" : ")
- // add parens if operand is a conditional itself.
- nested = isSamePrecedence(operators.Conditional, args[2]) ||
- isComplexOperator(args[2])
-
- return un.visitMaybeNested(args[2], nested)
-}
-
-func (un *unparser) visitCallFunc(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- fun := c.GetFunction()
- args := c.GetArgs()
- if c.GetTarget() != nil {
- nested := isBinaryOrTernaryOperator(c.GetTarget())
- err := un.visitMaybeNested(c.GetTarget(), nested)
- if err != nil {
- return err
- }
- un.str.WriteString(".")
- }
- un.str.WriteString(fun)
- un.str.WriteString("(")
- for i, arg := range args {
- err := un.visit(arg)
- if err != nil {
- return err
- }
- if i < len(args)-1 {
- un.str.WriteString(", ")
- }
- }
- un.str.WriteString(")")
- return nil
-}
-
-func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- args := c.GetArgs()
- nested := isBinaryOrTernaryOperator(args[0])
- err := un.visitMaybeNested(args[0], nested)
- if err != nil {
- return err
- }
- un.str.WriteString("[")
- err = un.visit(args[1])
- if err != nil {
- return err
- }
- un.str.WriteString("]")
- return nil
-}
-
-func (un *unparser) visitCallUnary(expr *exprpb.Expr) error {
- c := expr.GetCallExpr()
- fun := c.GetFunction()
- args := c.GetArgs()
- unmangled, found := operators.FindReverse(fun)
- if !found {
- return fmt.Errorf("cannot unmangle operator: %s", fun)
- }
- un.str.WriteString(unmangled)
- nested := isComplexOperator(args[0])
- return un.visitMaybeNested(args[0], nested)
-}
-
-func (un *unparser) visitConst(expr *exprpb.Expr) error {
- c := expr.GetConstExpr()
- switch c.GetConstantKind().(type) {
- case *exprpb.Constant_BoolValue:
- un.str.WriteString(strconv.FormatBool(c.GetBoolValue()))
- case *exprpb.Constant_BytesValue:
- // bytes constants are surrounded with b""
- b := c.GetBytesValue()
- un.str.WriteString(`b"`)
- un.str.WriteString(bytesToOctets(b))
- un.str.WriteString(`"`)
- case *exprpb.Constant_DoubleValue:
- // represent the float using the minimum required digits
- d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64)
- un.str.WriteString(d)
- case *exprpb.Constant_Int64Value:
- i := strconv.FormatInt(c.GetInt64Value(), 10)
- un.str.WriteString(i)
- case *exprpb.Constant_NullValue:
- un.str.WriteString("null")
- case *exprpb.Constant_StringValue:
- // strings will be double quoted with quotes escaped.
- un.str.WriteString(strconv.Quote(c.GetStringValue()))
- case *exprpb.Constant_Uint64Value:
- // uint literals have a 'u' suffix.
- ui := strconv.FormatUint(c.GetUint64Value(), 10)
- un.str.WriteString(ui)
- un.str.WriteString("u")
- default:
- return fmt.Errorf("unsupported constant: %v", expr)
- }
- return nil
-}
-
-func (un *unparser) visitIdent(expr *exprpb.Expr) error {
- un.str.WriteString(expr.GetIdentExpr().GetName())
- return nil
-}
-
-func (un *unparser) visitList(expr *exprpb.Expr) error {
- l := expr.GetListExpr()
- elems := l.GetElements()
- un.str.WriteString("[")
- for i, elem := range elems {
- err := un.visit(elem)
- if err != nil {
- return err
- }
- if i < len(elems)-1 {
- un.str.WriteString(", ")
- }
- }
- un.str.WriteString("]")
- return nil
-}
-
-func (un *unparser) visitSelect(expr *exprpb.Expr) error {
- sel := expr.GetSelectExpr()
- // handle the case when the select expression was generated by the has() macro.
- if sel.GetTestOnly() {
- un.str.WriteString("has(")
- }
- nested := !sel.GetTestOnly() && isBinaryOrTernaryOperator(sel.GetOperand())
- err := un.visitMaybeNested(sel.GetOperand(), nested)
- if err != nil {
- return err
- }
- un.str.WriteString(".")
- un.str.WriteString(sel.GetField())
- if sel.GetTestOnly() {
- un.str.WriteString(")")
- }
- return nil
-}
-
-func (un *unparser) visitStruct(expr *exprpb.Expr) error {
- s := expr.GetStructExpr()
- // If the message name is non-empty, then this should be treated as message construction.
- if s.GetMessageName() != "" {
- return un.visitStructMsg(expr)
- }
- // Otherwise, build a map.
- return un.visitStructMap(expr)
-}
-
-func (un *unparser) visitStructMsg(expr *exprpb.Expr) error {
- m := expr.GetStructExpr()
- entries := m.GetEntries()
- un.str.WriteString(m.GetMessageName())
- un.str.WriteString("{")
- for i, entry := range entries {
- f := entry.GetFieldKey()
- un.str.WriteString(f)
- un.str.WriteString(": ")
- v := entry.GetValue()
- err := un.visit(v)
- if err != nil {
- return err
- }
- if i < len(entries)-1 {
- un.str.WriteString(", ")
- }
- }
- un.str.WriteString("}")
- return nil
-}
-
-func (un *unparser) visitStructMap(expr *exprpb.Expr) error {
- m := expr.GetStructExpr()
- entries := m.GetEntries()
- un.str.WriteString("{")
- for i, entry := range entries {
- k := entry.GetMapKey()
- err := un.visit(k)
- if err != nil {
- return err
- }
- un.str.WriteString(": ")
- v := entry.GetValue()
- err = un.visit(v)
- if err != nil {
- return err
- }
- if i < len(entries)-1 {
- un.str.WriteString(", ")
- }
- }
- un.str.WriteString("}")
- return nil
-}
-
-func (un *unparser) visitMaybeMacroCall(expr *exprpb.Expr) (bool, error) {
- macroCalls := un.info.GetMacroCalls()
- call, found := macroCalls[expr.GetId()]
- if !found {
- return false, nil
- }
- return true, un.visit(call)
-}
-
-func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error {
- if nested {
- un.str.WriteString("(")
- }
- err := un.visit(expr)
- if err != nil {
- return err
- }
- if nested {
- un.str.WriteString(")")
- }
- return nil
-}
-
-// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as
-// this can have an effect of how parentheses affect the order of operations in the AST.
-func isLeftRecursive(op string) bool {
- return op != operators.LogicalAnd && op != operators.LogicalOr
-}
-
-// isSamePrecedence indicates whether the precedence of the input operator is the same as the
-// precedence of the (possible) operation represented in the input Expr.
-//
-// If the expr is not a Call, the result is false.
-func isSamePrecedence(op string, expr *exprpb.Expr) bool {
- if expr.GetCallExpr() == nil {
- return false
- }
- c := expr.GetCallExpr()
- other := c.GetFunction()
- return operators.Precedence(op) == operators.Precedence(other)
-}
-
-// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence
-// than the (possible) operation represented in the input Expr.
-//
-// If the expr is not a Call, the result is false.
-func isLowerPrecedence(op string, expr *exprpb.Expr) bool {
- c := expr.GetCallExpr()
- other := c.GetFunction()
- return operators.Precedence(op) < operators.Precedence(other)
-}
-
-// Indicates whether the expr is a complex operator, i.e., a call expression
-// with 2 or more arguments.
-func isComplexOperator(expr *exprpb.Expr) bool {
- if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 {
- return true
- }
- return false
-}
-
-// Indicates whether it is a complex operation compared to another.
-// expr is *not* considered complex if it is not a call expression or has
-// less than two arguments, or if it has a higher precedence than op.
-func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool {
- if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
- return false
- }
- return isLowerPrecedence(op, expr)
-}
-
-// Indicate whether this is a binary or ternary operator.
-func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool {
- if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 {
- return false
- }
- _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction())
- return isBinaryOp || isSamePrecedence(operators.Conditional, expr)
-}
-
-// bytesToOctets converts byte sequences to a string using a three digit octal encoded value
-// per byte.
-func bytesToOctets(byteVal []byte) string {
- var b strings.Builder
- for _, c := range byteVal {
- fmt.Fprintf(&b, "\\%03o", c)
- }
- return b.String()
-}
-
-// writeOperatorWithWrapping outputs the operator and inserts a newline for operators configured
-// in the unparser options.
-func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool {
- _, wrapOperatorExists := un.options.operatorsToWrapOn[fun]
- lineLength := un.str.Len() - un.lastWrappedIndex + len(fun)
-
- if wrapOperatorExists && lineLength >= un.options.wrapOnColumn {
- un.lastWrappedIndex = un.str.Len()
- // wrapAfterColumnLimit flag dictates whether the newline is placed
- // before or after the operator
- if un.options.wrapAfterColumnLimit {
- // Input: a && b
- // Output: a &&\nb
- un.str.WriteString(" ")
- un.str.WriteString(unmangled)
- un.str.WriteString("\n")
- } else {
- // Input: a && b
- // Output: a\n&& b
- un.str.WriteString("\n")
- un.str.WriteString(unmangled)
- un.str.WriteString(" ")
- }
- return true
- } else {
- un.str.WriteString(" ")
- un.str.WriteString(unmangled)
- un.str.WriteString(" ")
- }
- return false
-}
-
-// Defined defaults for the unparser options
-var (
- defaultWrapOnColumn = 80
- defaultWrapAfterColumnLimit = true
- defaultOperatorsToWrapOn = map[string]bool{
- operators.LogicalAnd: true,
- operators.LogicalOr: true,
- }
-)
-
-// UnparserOption is a functional option for configuring the output formatting
-// of the Unparse function.
-type UnparserOption func(*unparserOption) (*unparserOption, error)
-
-// Internal representation of the UnparserOption type
-type unparserOption struct {
- wrapOnColumn int
- operatorsToWrapOn map[string]bool
- wrapAfterColumnLimit bool
-}
-
-// WrapOnColumn wraps the output expression when its string length exceeds a specified limit
-// for operators set by WrapOnOperators function or by default, "&&" and "||" will be wrapped.
-//
-// Example usage:
-//
-// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd))
-//
-// This will insert a newline immediately after the logical AND operator for the below example input:
-//
-// Input:
-// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
-//
-// Output:
-// 'my-principal-group' in request.auth.claims &&
-// request.auth.claims.iat > now - duration('5m')
-func WrapOnColumn(col int) UnparserOption {
- return func(opt *unparserOption) (*unparserOption, error) {
- if col < 1 {
- return nil, fmt.Errorf("Invalid unparser option. Wrap column value must be greater than or equal to 1. Got %v instead", col)
- }
- opt.wrapOnColumn = col
- return opt, nil
- }
-}
-
-// WrapOnOperators specifies which operators to perform word wrapping on an output expression when its string length
-// exceeds the column limit set by WrapOnColumn function.
-//
-// Word wrapping is supported on non-unary symbolic operators. Refer to operators.go for the full list
-//
-// This will replace any previously supplied operators instead of merging them.
-func WrapOnOperators(symbols ...string) UnparserOption {
- return func(opt *unparserOption) (*unparserOption, error) {
- opt.operatorsToWrapOn = make(map[string]bool)
- for _, symbol := range symbols {
- _, found := operators.FindReverse(symbol)
- if !found {
- return nil, fmt.Errorf("Invalid unparser option. Unsupported operator: %s", symbol)
- }
- arity := operators.Arity(symbol)
- if arity < 2 {
- return nil, fmt.Errorf("Invalid unparser option. Unary operators are unsupported: %s", symbol)
- }
-
- opt.operatorsToWrapOn[symbol] = true
- }
-
- return opt, nil
- }
-}
-
-// WrapAfterColumnLimit dictates whether to insert a newline before or after the specified operator
-// when word wrapping is performed.
-//
-// Example usage:
-//
-// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd), WrapAfterColumnLimit(false))
-//
-// This will insert a newline immediately before the logical AND operator for the below example input, ensuring
-// that the length of a line never exceeds the specified column limit:
-//
-// Input:
-// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
-//
-// Output:
-// 'my-principal-group' in request.auth.claims
-// && request.auth.claims.iat > now - duration('5m')
-func WrapAfterColumnLimit(wrapAfter bool) UnparserOption {
- return func(opt *unparserOption) (*unparserOption, error) {
- opt.wrapAfterColumnLimit = wrapAfter
- return opt, nil
- }
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/LICENSE b/etcd/vendor/github.com/moby/sys/mountinfo/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/doc.go b/etcd/vendor/github.com/moby/sys/mountinfo/doc.go
deleted file mode 100644
index b80e05efd0..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Package mountinfo provides a set of functions to retrieve information about OS mounts.
-//
-// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD and OpenBSD,
-// and a shallow implementation for Windows, but in general this is Linux-only package, so
-// the rest of the document only applies to Linux, unless explicitly specified otherwise.
-//
-// In Linux, information about mounts seen by the current process is available from
-// /proc/self/mountinfo. Note that due to mount namespaces, different processes can
-// see different mounts. A per-process mountinfo table is available from /proc//mountinfo,
-// where is a numerical process identifier.
-//
-// In general, /proc is not a very efficient interface, and mountinfo is not an exception.
-// For example, there is no way to get information about a specific mount point (i.e. it
-// is all-or-nothing). This package tries to hide the /proc ineffectiveness by using
-// parse filters while reading mountinfo. A filter can skip some entries, or stop
-// processing the rest of the file once the needed information is found.
-//
-// For mountinfo filters that accept path as an argument, the path must be absolute,
-// having all symlinks resolved, and being cleaned (i.e. no extra slashes or dots).
-// One way to achieve all of the above is to employ filepath.Abs followed by
-// filepath.EvalSymlinks (the latter calls filepath.Clean on the result so
-// there is no need to explicitly call filepath.Clean).
-//
-// NOTE that in many cases there is no need to consult mountinfo at all. Here are some
-// of the cases where mountinfo should not be parsed:
-//
-// 1. Before performing a mount. Usually, this is not needed, but if required (say to
-// prevent over-mounts), to check whether a directory is mounted, call os.Lstat
-// on it and its parent directory, and compare their st.Sys().(*syscall.Stat_t).Dev
-// fields -- if they differ, then the directory is the mount point. NOTE this does
-// not work for bind mounts. Optionally, the filesystem type can also be checked
-// by calling unix.Statfs and checking the Type field (i.e. filesystem type).
-//
-// 2. After performing a mount. If there is no error returned, the mount succeeded;
-// checking the mount table for a new mount is redundant and expensive.
-//
-// 3. Before performing an unmount. It is more efficient to do an unmount and ignore
-// a specific error (EINVAL) which tells the directory is not mounted.
-//
-// 4. After performing an unmount. If there is no error returned, the unmount succeeded.
-//
-// 5. To find the mount point root of a specific directory. You can perform os.Stat()
-// on the directory and traverse up until the Dev field of a parent directory differs.
-package mountinfo
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/etcd/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
deleted file mode 100644
index e78e726196..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mounted_linux.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package mountinfo
-
-import (
- "os"
- "path/filepath"
-
- "golang.org/x/sys/unix"
-)
-
-// MountedFast is a method of detecting a mount point without reading
-// mountinfo from procfs. A caller can only trust the result if no error
-// and sure == true are returned. Otherwise, other methods (e.g. parsing
-// /proc/mounts) have to be used. If unsure, use Mounted instead (which
-// uses MountedFast, but falls back to parsing mountinfo if needed).
-//
-// If a non-existent path is specified, an appropriate error is returned.
-// In case the caller is not interested in this particular error, it should
-// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
-//
-// This function is only available on Linux. When available (since kernel
-// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
-// the implementation falls back to using stat(2), which can reliably detect
-// normal (but not bind) mounts.
-func MountedFast(path string) (mounted, sure bool, err error) {
- // Root is always mounted.
- if path == string(os.PathSeparator) {
- return true, true, nil
- }
-
- path, err = normalizePath(path)
- if err != nil {
- return false, false, err
- }
- mounted, sure, err = mountedFast(path)
- return
-}
-
-// mountedByOpenat2 is a method of detecting a mount that works for all kinds
-// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel.
-func mountedByOpenat2(path string) (bool, error) {
- dir, last := filepath.Split(path)
-
- dirfd, err := unix.Openat2(unix.AT_FDCWD, dir, &unix.OpenHow{
- Flags: unix.O_PATH | unix.O_CLOEXEC,
- })
- if err != nil {
- return false, &os.PathError{Op: "openat2", Path: dir, Err: err}
- }
- fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{
- Flags: unix.O_PATH | unix.O_CLOEXEC | unix.O_NOFOLLOW,
- Resolve: unix.RESOLVE_NO_XDEV,
- })
- _ = unix.Close(dirfd)
- switch err { //nolint:errorlint // unix errors are bare
- case nil: // definitely not a mount
- _ = unix.Close(fd)
- return false, nil
- case unix.EXDEV: // definitely a mount
- return true, nil
- }
- // not sure
- return false, &os.PathError{Op: "openat2", Path: path, Err: err}
-}
-
-// mountedFast is similar to MountedFast, except it expects a normalized path.
-func mountedFast(path string) (mounted, sure bool, err error) {
- // Root is always mounted.
- if path == string(os.PathSeparator) {
- return true, true, nil
- }
-
- // Try a fast path, using openat2() with RESOLVE_NO_XDEV.
- mounted, err = mountedByOpenat2(path)
- if err == nil {
- return mounted, true, nil
- }
-
- // Another fast path: compare st.st_dev fields.
- mounted, err = mountedByStat(path)
- // This does not work for bind mounts, so false negative
- // is possible, therefore only trust if return is true.
- if mounted && err == nil {
- return true, true, nil
- }
-
- return
-}
-
-func mounted(path string) (bool, error) {
- path, err := normalizePath(path)
- if err != nil {
- return false, err
- }
- mounted, sure, err := mountedFast(path)
- if sure && err == nil {
- return mounted, nil
- }
-
- // Fallback to parsing mountinfo.
- return mountedByMountinfo(path)
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/etcd/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
deleted file mode 100644
index c7b7678f9a..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mounted_unix.go
+++ /dev/null
@@ -1,53 +0,0 @@
-//go:build linux || freebsd || openbsd || darwin
-// +build linux freebsd openbsd darwin
-
-package mountinfo
-
-import (
- "os"
- "path/filepath"
-
- "golang.org/x/sys/unix"
-)
-
-func mountedByStat(path string) (bool, error) {
- var st unix.Stat_t
-
- if err := unix.Lstat(path, &st); err != nil {
- return false, &os.PathError{Op: "stat", Path: path, Err: err}
- }
- dev := st.Dev
- parent := filepath.Dir(path)
- if err := unix.Lstat(parent, &st); err != nil {
- return false, &os.PathError{Op: "stat", Path: parent, Err: err}
- }
- if dev != st.Dev {
- // Device differs from that of parent,
- // so definitely a mount point.
- return true, nil
- }
- // NB: this does not detect bind mounts on Linux.
- return false, nil
-}
-
-func normalizePath(path string) (realPath string, err error) {
- if realPath, err = filepath.Abs(path); err != nil {
- return "", err
- }
- if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
- return "", err
- }
- if _, err := os.Stat(realPath); err != nil {
- return "", err
- }
- return realPath, nil
-}
-
-func mountedByMountinfo(path string) (bool, error) {
- entries, err := GetMounts(SingleEntryFilter(path))
- if err != nil {
- return false, err
- }
-
- return len(entries) > 0, nil
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo.go
deleted file mode 100644
index 574aeb8767..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package mountinfo
-
-import (
- "os"
-)
-
-// GetMounts retrieves a list of mounts for the current running process,
-// with an optional filter applied (use nil for no filter).
-func GetMounts(f FilterFunc) ([]*Info, error) {
- return parseMountTable(f)
-}
-
-// Mounted determines if a specified path is a mount point. In case of any
-// error, false (and an error) is returned.
-//
-// If a non-existent path is specified, an appropriate error is returned.
-// In case the caller is not interested in this particular error, it should
-// be handled separately using e.g. errors.Is(err, fs.ErrNotExist).
-func Mounted(path string) (bool, error) {
- // root is always mounted
- if path == string(os.PathSeparator) {
- return true, nil
- }
- return mounted(path)
-}
-
-// Info reveals information about a particular mounted filesystem. This
-// struct is populated from the content in the /proc//mountinfo file.
-type Info struct {
- // ID is a unique identifier of the mount (may be reused after umount).
- ID int
-
- // Parent is the ID of the parent mount (or of self for the root
- // of this mount namespace's mount tree).
- Parent int
-
- // Major and Minor are the major and the minor components of the Dev
- // field of unix.Stat_t structure returned by unix.*Stat calls for
- // files on this filesystem.
- Major, Minor int
-
- // Root is the pathname of the directory in the filesystem which forms
- // the root of this mount.
- Root string
-
- // Mountpoint is the pathname of the mount point relative to the
- // process's root directory.
- Mountpoint string
-
- // Options is a comma-separated list of mount options.
- Options string
-
- // Optional are zero or more fields of the form "tag[:value]",
- // separated by a space. Currently, the possible optional fields are
- // "shared", "master", "propagate_from", and "unbindable". For more
- // information, see mount_namespaces(7) Linux man page.
- Optional string
-
- // FSType is the filesystem type in the form "type[.subtype]".
- FSType string
-
- // Source is filesystem-specific information, or "none".
- Source string
-
- // VFSOptions is a comma-separated list of superblock options.
- VFSOptions string
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
deleted file mode 100644
index 8420f58c7a..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go
+++ /dev/null
@@ -1,56 +0,0 @@
-//go:build freebsd || openbsd || darwin
-// +build freebsd openbsd darwin
-
-package mountinfo
-
-import "golang.org/x/sys/unix"
-
-// parseMountTable returns information about mounted filesystems
-func parseMountTable(filter FilterFunc) ([]*Info, error) {
- count, err := unix.Getfsstat(nil, unix.MNT_WAIT)
- if err != nil {
- return nil, err
- }
-
- entries := make([]unix.Statfs_t, count)
- _, err = unix.Getfsstat(entries, unix.MNT_WAIT)
- if err != nil {
- return nil, err
- }
-
- var out []*Info
- for _, entry := range entries {
- var skip, stop bool
- mountinfo := getMountinfo(&entry)
-
- if filter != nil {
- // filter out entries we're not interested in
- skip, stop = filter(mountinfo)
- if skip {
- continue
- }
- }
-
- out = append(out, mountinfo)
- if stop {
- break
- }
- }
- return out, nil
-}
-
-func mounted(path string) (bool, error) {
- path, err := normalizePath(path)
- if err != nil {
- return false, err
- }
- // Fast path: compare st.st_dev fields.
- // This should always work for FreeBSD and OpenBSD.
- mounted, err := mountedByStat(path)
- if err == nil {
- return mounted, nil
- }
-
- // Fallback to parsing mountinfo
- return mountedByMountinfo(path)
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
deleted file mode 100644
index 16079c3c54..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package mountinfo
-
-import "strings"
-
-// FilterFunc is a type defining a callback function for GetMount(),
-// used to filter out mountinfo entries we're not interested in,
-// and/or stop further processing if we found what we wanted.
-//
-// It takes a pointer to the Info struct (fully populated with all available
-// fields on the GOOS platform), and returns two booleans:
-//
-// skip: true if the entry should be skipped;
-//
-// stop: true if parsing should be stopped after the entry.
-type FilterFunc func(*Info) (skip, stop bool)
-
-// PrefixFilter discards all entries whose mount points do not start with, or
-// are equal to the path specified in prefix. The prefix path must be absolute,
-// have all symlinks resolved, and cleaned (i.e. no extra slashes or dots).
-//
-// PrefixFilter treats prefix as a path, not a partial prefix, which means that
-// given "/foo", "/foo/bar" and "/foobar" entries, PrefixFilter("/foo") returns
-// "/foo" and "/foo/bar", and discards "/foobar".
-func PrefixFilter(prefix string) FilterFunc {
- return func(m *Info) (bool, bool) {
- skip := !strings.HasPrefix(m.Mountpoint+"/", prefix+"/")
- return skip, false
- }
-}
-
-// SingleEntryFilter looks for a specific entry.
-func SingleEntryFilter(mp string) FilterFunc {
- return func(m *Info) (bool, bool) {
- if m.Mountpoint == mp {
- return false, true // don't skip, stop now
- }
- return true, false // skip, keep going
- }
-}
-
-// ParentsFilter returns all entries whose mount points
-// can be parents of a path specified, discarding others.
-//
-// For example, given /var/lib/docker/something, entries
-// like /var/lib/docker, /var and / are returned.
-func ParentsFilter(path string) FilterFunc {
- return func(m *Info) (bool, bool) {
- skip := !strings.HasPrefix(path, m.Mountpoint)
- return skip, false
- }
-}
-
-// FSTypeFilter returns all entries that match provided fstype(s).
-func FSTypeFilter(fstype ...string) FilterFunc {
- return func(m *Info) (bool, bool) {
- for _, t := range fstype {
- if m.FSType == t {
- return false, false // don't skip, keep going
- }
- }
- return true, false // skip, keep going
- }
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
deleted file mode 100644
index ecaaa7a9c1..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build freebsd || darwin
-// +build freebsd darwin
-
-package mountinfo
-
-import "golang.org/x/sys/unix"
-
-func getMountinfo(entry *unix.Statfs_t) *Info {
- return &Info{
- Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]),
- FSType: unix.ByteSliceToString(entry.Fstypename[:]),
- Source: unix.ByteSliceToString(entry.Mntfromname[:]),
- }
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
deleted file mode 100644
index 59332b07bf..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package mountinfo
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// GetMountsFromReader retrieves a list of mounts from the
-// reader provided, with an optional filter applied (use nil
-// for no filter). This can be useful in tests or benchmarks
-// that provide fake mountinfo data, or when a source other
-// than /proc/self/mountinfo needs to be read from.
-//
-// This function is Linux-specific.
-func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) {
- s := bufio.NewScanner(r)
- out := []*Info{}
- for s.Scan() {
- var err error
-
- /*
- See http://man7.org/linux/man-pages/man5/proc.5.html
-
- 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
- (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
-
- (1) mount ID: unique identifier of the mount (may be reused after umount)
- (2) parent ID: ID of parent (or of self for the top of the mount tree)
- (3) major:minor: value of st_dev for files on filesystem
- (4) root: root of the mount within the filesystem
- (5) mount point: mount point relative to the process's root
- (6) mount options: per mount options
- (7) optional fields: zero or more fields of the form "tag[:value]"
- (8) separator: marks the end of the optional fields
- (9) filesystem type: name of filesystem of the form "type[.subtype]"
- (10) mount source: filesystem specific information or "none"
- (11) super options: per super block options
-
- In other words, we have:
- * 6 mandatory fields (1)..(6)
- * 0 or more optional fields (7)
- * a separator field (8)
- * 3 mandatory fields (9)..(11)
- */
-
- text := s.Text()
- fields := strings.Split(text, " ")
- numFields := len(fields)
- if numFields < 10 {
- // should be at least 10 fields
- return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
- }
-
- // separator field
- sepIdx := numFields - 4
- // In Linux <= 3.9 mounting a cifs with spaces in a share
- // name (like "//srv/My Docs") _may_ end up having a space
- // in the last field of mountinfo (like "unc=//serv/My Docs").
- // Since kernel 3.10-rc1, cifs option "unc=" is ignored,
- // so spaces should not appear.
- //
- // Check for a separator, and work around the spaces bug
- for fields[sepIdx] != "-" {
- sepIdx--
- if sepIdx == 5 {
- return nil, fmt.Errorf("parsing '%s' failed: missing - separator", text)
- }
- }
-
- p := &Info{}
-
- p.Mountpoint, err = unescape(fields[4])
- if err != nil {
- return nil, fmt.Errorf("parsing '%s' failed: mount point: %w", fields[4], err)
- }
- p.FSType, err = unescape(fields[sepIdx+1])
- if err != nil {
- return nil, fmt.Errorf("parsing '%s' failed: fstype: %w", fields[sepIdx+1], err)
- }
- p.Source, err = unescape(fields[sepIdx+2])
- if err != nil {
- return nil, fmt.Errorf("parsing '%s' failed: source: %w", fields[sepIdx+2], err)
- }
- p.VFSOptions = fields[sepIdx+3]
-
- // ignore any numbers parsing errors, as there should not be any
- p.ID, _ = strconv.Atoi(fields[0])
- p.Parent, _ = strconv.Atoi(fields[1])
- mm := strings.SplitN(fields[2], ":", 3)
- if len(mm) != 2 {
- return nil, fmt.Errorf("parsing '%s' failed: unexpected major:minor pair %s", text, mm)
- }
- p.Major, _ = strconv.Atoi(mm[0])
- p.Minor, _ = strconv.Atoi(mm[1])
-
- p.Root, err = unescape(fields[3])
- if err != nil {
- return nil, fmt.Errorf("parsing '%s' failed: root: %w", fields[3], err)
- }
-
- p.Options = fields[5]
-
- // zero or more optional fields
- p.Optional = strings.Join(fields[6:sepIdx], " ")
-
- // Run the filter after parsing all fields.
- var skip, stop bool
- if filter != nil {
- skip, stop = filter(p)
- if skip {
- continue
- }
- }
-
- out = append(out, p)
- if stop {
- break
- }
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func parseMountTable(filter FilterFunc) ([]*Info, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return GetMountsFromReader(f, filter)
-}
-
-// PidMountInfo retrieves the list of mounts from a given process' mount
-// namespace. Unless there is a need to get mounts from a mount namespace
-// different from that of a calling process, use GetMounts.
-//
-// This function is Linux-specific.
-//
-// Deprecated: this will be removed before v1; use GetMountsFromReader with
-// opened /proc//mountinfo as an argument instead.
-func PidMountInfo(pid int) ([]*Info, error) {
- f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return GetMountsFromReader(f, nil)
-}
-
-// A few specific characters in mountinfo path entries (root and mountpoint)
-// are escaped using a backslash followed by a character's ascii code in octal.
-//
-// space -- as \040
-// tab (aka \t) -- as \011
-// newline (aka \n) -- as \012
-// backslash (aka \\) -- as \134
-//
-// This function converts path from mountinfo back, i.e. it unescapes the above sequences.
-func unescape(path string) (string, error) {
- // try to avoid copying
- if strings.IndexByte(path, '\\') == -1 {
- return path, nil
- }
-
- // The following code is UTF-8 transparent as it only looks for some
- // specific characters (backslash and 0..7) with values < utf8.RuneSelf,
- // and everything else is passed through as is.
- buf := make([]byte, len(path))
- bufLen := 0
- for i := 0; i < len(path); i++ {
- if path[i] != '\\' {
- buf[bufLen] = path[i]
- bufLen++
- continue
- }
- s := path[i:]
- if len(s) < 4 {
- // too short
- return "", fmt.Errorf("bad escape sequence %q: too short", s)
- }
- c := s[1]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7':
- v := c - '0'
- for j := 2; j < 4; j++ { // one digit already; two more
- if s[j] < '0' || s[j] > '7' {
- return "", fmt.Errorf("bad escape sequence %q: not a digit", s[:3])
- }
- x := s[j] - '0'
- v = (v << 3) | x
- }
- if v > 255 {
- return "", fmt.Errorf("bad escape sequence %q: out of range" + s[:3])
- }
- buf[bufLen] = v
- bufLen++
- i += 3
- continue
- default:
- return "", fmt.Errorf("bad escape sequence %q: not a digit" + s[:3])
-
- }
- }
-
- return string(buf[:bufLen]), nil
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
deleted file mode 100644
index f682c2d3b5..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package mountinfo
-
-import "golang.org/x/sys/unix"
-
-func getMountinfo(entry *unix.Statfs_t) *Info {
- return &Info{
- Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]),
- FSType: unix.ByteSliceToString(entry.F_fstypename[:]),
- Source: unix.ByteSliceToString(entry.F_mntfromname[:]),
- }
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
deleted file mode 100644
index c2e64bc81c..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build !windows && !linux && !freebsd && !openbsd && !darwin
-// +build !windows,!linux,!freebsd,!openbsd,!darwin
-
-package mountinfo
-
-import (
- "fmt"
- "runtime"
-)
-
-var errNotImplemented = fmt.Errorf("not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-
-func parseMountTable(_ FilterFunc) ([]*Info, error) {
- return nil, errNotImplemented
-}
-
-func mounted(path string) (bool, error) {
- return false, errNotImplemented
-}
diff --git a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go b/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
deleted file mode 100644
index 13fad165e5..0000000000
--- a/etcd/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package mountinfo
-
-func parseMountTable(_ FilterFunc) ([]*Info, error) {
- // Do NOT return an error!
- return nil, nil
-}
-
-func mounted(_ string) (bool, error) {
- return false, nil
-}
diff --git a/etcd/vendor/github.com/mxk/go-flowrate/LICENSE b/etcd/vendor/github.com/mxk/go-flowrate/LICENSE
deleted file mode 100644
index e9f9f628ba..0000000000
--- a/etcd/vendor/github.com/mxk/go-flowrate/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the
- distribution.
-
- * Neither the name of the go-flowrate project nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go b/etcd/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
deleted file mode 100644
index 1b727721e1..0000000000
--- a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
+++ /dev/null
@@ -1,267 +0,0 @@
-//
-// Written by Maxim Khitrov (November 2012)
-//
-
-// Package flowrate provides the tools for monitoring and limiting the flow rate
-// of an arbitrary data stream.
-package flowrate
-
-import (
- "math"
- "sync"
- "time"
-)
-
-// Monitor monitors and limits the transfer rate of a data stream.
-type Monitor struct {
- mu sync.Mutex // Mutex guarding access to all internal fields
- active bool // Flag indicating an active transfer
- start time.Duration // Transfer start time (clock() value)
- bytes int64 // Total number of bytes transferred
- samples int64 // Total number of samples taken
-
- rSample float64 // Most recent transfer rate sample (bytes per second)
- rEMA float64 // Exponential moving average of rSample
- rPeak float64 // Peak transfer rate (max of all rSamples)
- rWindow float64 // rEMA window (seconds)
-
- sBytes int64 // Number of bytes transferred since sLast
- sLast time.Duration // Most recent sample time (stop time when inactive)
- sRate time.Duration // Sampling rate
-
- tBytes int64 // Number of bytes expected in the current transfer
- tLast time.Duration // Time of the most recent transfer of at least 1 byte
-}
-
-// New creates a new flow control monitor. Instantaneous transfer rate is
-// measured and updated for each sampleRate interval. windowSize determines the
-// weight of each sample in the exponential moving average (EMA) calculation.
-// The exact formulas are:
-//
-// sampleTime = currentTime - prevSampleTime
-// sampleRate = byteCount / sampleTime
-// weight = 1 - exp(-sampleTime/windowSize)
-// newRate = weight*sampleRate + (1-weight)*oldRate
-//
-// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
-// respectively.
-func New(sampleRate, windowSize time.Duration) *Monitor {
- if sampleRate = clockRound(sampleRate); sampleRate <= 0 {
- sampleRate = 5 * clockRate
- }
- if windowSize <= 0 {
- windowSize = 1 * time.Second
- }
- now := clock()
- return &Monitor{
- active: true,
- start: now,
- rWindow: windowSize.Seconds(),
- sLast: now,
- sRate: sampleRate,
- tLast: now,
- }
-}
-
-// Update records the transfer of n bytes and returns n. It should be called
-// after each Read/Write operation, even if n is 0.
-func (m *Monitor) Update(n int) int {
- m.mu.Lock()
- m.update(n)
- m.mu.Unlock()
- return n
-}
-
-// IO is a convenience method intended to wrap io.Reader and io.Writer method
-// execution. It calls m.Update(n) and then returns (n, err) unmodified.
-func (m *Monitor) IO(n int, err error) (int, error) {
- return m.Update(n), err
-}
-
-// Done marks the transfer as finished and prevents any further updates or
-// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and
-// Limit methods become NOOPs. It returns the total number of bytes transferred.
-func (m *Monitor) Done() int64 {
- m.mu.Lock()
- if now := m.update(0); m.sBytes > 0 {
- m.reset(now)
- }
- m.active = false
- m.tLast = 0
- n := m.bytes
- m.mu.Unlock()
- return n
-}
-
-// timeRemLimit is the maximum Status.TimeRem value.
-const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second
-
-// Status represents the current Monitor status. All transfer rates are in bytes
-// per second rounded to the nearest byte.
-type Status struct {
- Active bool // Flag indicating an active transfer
- Start time.Time // Transfer start time
- Duration time.Duration // Time period covered by the statistics
- Idle time.Duration // Time since the last transfer of at least 1 byte
- Bytes int64 // Total number of bytes transferred
- Samples int64 // Total number of samples taken
- InstRate int64 // Instantaneous transfer rate
- CurRate int64 // Current transfer rate (EMA of InstRate)
- AvgRate int64 // Average transfer rate (Bytes / Duration)
- PeakRate int64 // Maximum instantaneous transfer rate
- BytesRem int64 // Number of bytes remaining in the transfer
- TimeRem time.Duration // Estimated time to completion
- Progress Percent // Overall transfer progress
-}
-
-// Status returns current transfer status information. The returned value
-// becomes static after a call to Done.
-func (m *Monitor) Status() Status {
- m.mu.Lock()
- now := m.update(0)
- s := Status{
- Active: m.active,
- Start: clockToTime(m.start),
- Duration: m.sLast - m.start,
- Idle: now - m.tLast,
- Bytes: m.bytes,
- Samples: m.samples,
- PeakRate: round(m.rPeak),
- BytesRem: m.tBytes - m.bytes,
- Progress: percentOf(float64(m.bytes), float64(m.tBytes)),
- }
- if s.BytesRem < 0 {
- s.BytesRem = 0
- }
- if s.Duration > 0 {
- rAvg := float64(s.Bytes) / s.Duration.Seconds()
- s.AvgRate = round(rAvg)
- if s.Active {
- s.InstRate = round(m.rSample)
- s.CurRate = round(m.rEMA)
- if s.BytesRem > 0 {
- if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 {
- ns := float64(s.BytesRem) / tRate * 1e9
- if ns > float64(timeRemLimit) {
- ns = float64(timeRemLimit)
- }
- s.TimeRem = clockRound(time.Duration(ns))
- }
- }
- }
- }
- m.mu.Unlock()
- return s
-}
-
-// Limit restricts the instantaneous (per-sample) data flow to rate bytes per
-// second. It returns the maximum number of bytes (0 <= n <= want) that may be
-// transferred immediately without exceeding the limit. If block == true, the
-// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1,
-// or the transfer is inactive (after a call to Done).
-//
-// At least one byte is always allowed to be transferred in any given sampling
-// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate
-// is 10 bytes per second.
-//
-// For usage examples, see the implementation of Reader and Writer in io.go.
-func (m *Monitor) Limit(want int, rate int64, block bool) (n int) {
- if want < 1 || rate < 1 {
- return want
- }
- m.mu.Lock()
-
- // Determine the maximum number of bytes that can be sent in one sample
- limit := round(float64(rate) * m.sRate.Seconds())
- if limit <= 0 {
- limit = 1
- }
-
- // If block == true, wait until m.sBytes < limit
- if now := m.update(0); block {
- for m.sBytes >= limit && m.active {
- now = m.waitNextSample(now)
- }
- }
-
- // Make limit <= want (unlimited if the transfer is no longer active)
- if limit -= m.sBytes; limit > int64(want) || !m.active {
- limit = int64(want)
- }
- m.mu.Unlock()
-
- if limit < 0 {
- limit = 0
- }
- return int(limit)
-}
-
-// SetTransferSize specifies the total size of the data transfer, which allows
-// the Monitor to calculate the overall progress and time to completion.
-func (m *Monitor) SetTransferSize(bytes int64) {
- if bytes < 0 {
- bytes = 0
- }
- m.mu.Lock()
- m.tBytes = bytes
- m.mu.Unlock()
-}
-
-// update accumulates the transferred byte count for the current sample until
-// clock() - m.sLast >= m.sRate. The monitor status is updated once the current
-// sample is done.
-func (m *Monitor) update(n int) (now time.Duration) {
- if !m.active {
- return
- }
- if now = clock(); n > 0 {
- m.tLast = now
- }
- m.sBytes += int64(n)
- if sTime := now - m.sLast; sTime >= m.sRate {
- t := sTime.Seconds()
- if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak {
- m.rPeak = m.rSample
- }
-
- // Exponential moving average using a method similar to *nix load
- // average calculation. Longer sampling periods carry greater weight.
- if m.samples > 0 {
- w := math.Exp(-t / m.rWindow)
- m.rEMA = m.rSample + w*(m.rEMA-m.rSample)
- } else {
- m.rEMA = m.rSample
- }
- m.reset(now)
- }
- return
-}
-
-// reset clears the current sample state in preparation for the next sample.
-func (m *Monitor) reset(sampleTime time.Duration) {
- m.bytes += m.sBytes
- m.samples++
- m.sBytes = 0
- m.sLast = sampleTime
-}
-
-// waitNextSample sleeps for the remainder of the current sample. The lock is
-// released and reacquired during the actual sleep period, so it's possible for
-// the transfer to be inactive when this method returns.
-func (m *Monitor) waitNextSample(now time.Duration) time.Duration {
- const minWait = 5 * time.Millisecond
- current := m.sLast
-
- // sleep until the last sample time changes (ideally, just one iteration)
- for m.sLast == current && m.active {
- d := current + m.sRate - now
- m.mu.Unlock()
- if d < minWait {
- d = minWait
- }
- time.Sleep(d)
- m.mu.Lock()
- now = m.update(0)
- }
- return now
-}
diff --git a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/io.go b/etcd/vendor/github.com/mxk/go-flowrate/flowrate/io.go
deleted file mode 100644
index fbe0909725..0000000000
--- a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/io.go
+++ /dev/null
@@ -1,133 +0,0 @@
-//
-// Written by Maxim Khitrov (November 2012)
-//
-
-package flowrate
-
-import (
- "errors"
- "io"
-)
-
-// ErrLimit is returned by the Writer when a non-blocking write is short due to
-// the transfer rate limit.
-var ErrLimit = errors.New("flowrate: flow rate limit exceeded")
-
-// Limiter is implemented by the Reader and Writer to provide a consistent
-// interface for monitoring and controlling data transfer.
-type Limiter interface {
- Done() int64
- Status() Status
- SetTransferSize(bytes int64)
- SetLimit(new int64) (old int64)
- SetBlocking(new bool) (old bool)
-}
-
-// Reader implements io.ReadCloser with a restriction on the rate of data
-// transfer.
-type Reader struct {
- io.Reader // Data source
- *Monitor // Flow control monitor
-
- limit int64 // Rate limit in bytes per second (unlimited when <= 0)
- block bool // What to do when no new bytes can be read due to the limit
-}
-
-// NewReader restricts all Read operations on r to limit bytes per second.
-func NewReader(r io.Reader, limit int64) *Reader {
- return &Reader{r, New(0, 0), limit, true}
-}
-
-// Read reads up to len(p) bytes into p without exceeding the current transfer
-// rate limit. It returns (0, nil) immediately if r is non-blocking and no new
-// bytes can be read at this time.
-func (r *Reader) Read(p []byte) (n int, err error) {
- p = p[:r.Limit(len(p), r.limit, r.block)]
- if len(p) > 0 {
- n, err = r.IO(r.Reader.Read(p))
- }
- return
-}
-
-// SetLimit changes the transfer rate limit to new bytes per second and returns
-// the previous setting.
-func (r *Reader) SetLimit(new int64) (old int64) {
- old, r.limit = r.limit, new
- return
-}
-
-// SetBlocking changes the blocking behavior and returns the previous setting. A
-// Read call on a non-blocking reader returns immediately if no additional bytes
-// may be read at this time due to the rate limit.
-func (r *Reader) SetBlocking(new bool) (old bool) {
- old, r.block = r.block, new
- return
-}
-
-// Close closes the underlying reader if it implements the io.Closer interface.
-func (r *Reader) Close() error {
- defer r.Done()
- if c, ok := r.Reader.(io.Closer); ok {
- return c.Close()
- }
- return nil
-}
-
-// Writer implements io.WriteCloser with a restriction on the rate of data
-// transfer.
-type Writer struct {
- io.Writer // Data destination
- *Monitor // Flow control monitor
-
- limit int64 // Rate limit in bytes per second (unlimited when <= 0)
- block bool // What to do when no new bytes can be written due to the limit
-}
-
-// NewWriter restricts all Write operations on w to limit bytes per second. The
-// transfer rate and the default blocking behavior (true) can be changed
-// directly on the returned *Writer.
-func NewWriter(w io.Writer, limit int64) *Writer {
- return &Writer{w, New(0, 0), limit, true}
-}
-
-// Write writes len(p) bytes from p to the underlying data stream without
-// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is
-// non-blocking and no additional bytes can be written at this time.
-func (w *Writer) Write(p []byte) (n int, err error) {
- var c int
- for len(p) > 0 && err == nil {
- s := p[:w.Limit(len(p), w.limit, w.block)]
- if len(s) > 0 {
- c, err = w.IO(w.Writer.Write(s))
- } else {
- return n, ErrLimit
- }
- p = p[c:]
- n += c
- }
- return
-}
-
-// SetLimit changes the transfer rate limit to new bytes per second and returns
-// the previous setting.
-func (w *Writer) SetLimit(new int64) (old int64) {
- old, w.limit = w.limit, new
- return
-}
-
-// SetBlocking changes the blocking behavior and returns the previous setting. A
-// Write call on a non-blocking writer returns as soon as no additional bytes
-// may be written at this time due to the rate limit.
-func (w *Writer) SetBlocking(new bool) (old bool) {
- old, w.block = w.block, new
- return
-}
-
-// Close closes the underlying writer if it implements the io.Closer interface.
-func (w *Writer) Close() error {
- defer w.Done()
- if c, ok := w.Writer.(io.Closer); ok {
- return c.Close()
- }
- return nil
-}
diff --git a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/util.go b/etcd/vendor/github.com/mxk/go-flowrate/flowrate/util.go
deleted file mode 100644
index 4caac583fc..0000000000
--- a/etcd/vendor/github.com/mxk/go-flowrate/flowrate/util.go
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-// Written by Maxim Khitrov (November 2012)
-//
-
-package flowrate
-
-import (
- "math"
- "strconv"
- "time"
-)
-
-// clockRate is the resolution and precision of clock().
-const clockRate = 20 * time.Millisecond
-
-// czero is the process start time rounded down to the nearest clockRate
-// increment.
-var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate
-
-// clock returns a low resolution timestamp relative to the process start time.
-func clock() time.Duration {
- return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero
-}
-
-// clockToTime converts a clock() timestamp to an absolute time.Time value.
-func clockToTime(c time.Duration) time.Time {
- return time.Unix(0, int64(czero+c))
-}
-
-// clockRound returns d rounded to the nearest clockRate increment.
-func clockRound(d time.Duration) time.Duration {
- return (d + clockRate>>1) / clockRate * clockRate
-}
-
-// round returns x rounded to the nearest int64 (non-negative values only).
-func round(x float64) int64 {
- if _, frac := math.Modf(x); frac >= 0.5 {
- return int64(math.Ceil(x))
- }
- return int64(math.Floor(x))
-}
-
-// Percent represents a percentage in increments of 1/1000th of a percent.
-type Percent uint32
-
-// percentOf calculates what percent of the total is x.
-func percentOf(x, total float64) Percent {
- if x < 0 || total <= 0 {
- return 0
- } else if p := round(x / total * 1e5); p <= math.MaxUint32 {
- return Percent(p)
- }
- return Percent(math.MaxUint32)
-}
-
-func (p Percent) Float() float64 {
- return float64(p) * 1e-3
-}
-
-func (p Percent) String() string {
- var buf [12]byte
- b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10)
- n := len(b)
- b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10)
- b[n] = '.'
- return string(append(b, '%'))
-}
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/.mailmap b/etcd/vendor/github.com/opencontainers/go-digest/.mailmap
deleted file mode 100644
index eaf8b2f9e6..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/.mailmap
+++ /dev/null
@@ -1,4 +0,0 @@
-Aaron Lehmann
-Derek McGowan
-Stephen J Day
-Haibing Zhou
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/etcd/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
deleted file mode 100644
index b6165f83ca..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-version: 2
-
-requirements:
- signed_off_by:
- required: true
-
-always_pending:
- title_regex: '^WIP'
- explanation: 'Work in progress...'
-
-group_defaults:
- required: 2
- approve_by_comment:
- enabled: true
- approve_regex: '^LGTM'
- reject_regex: '^Rejected'
- reset_on_push:
- enabled: true
- author_approval:
- ignored: true
- conditions:
- branches:
- - master
-
-groups:
- go-digest:
- teams:
- - go-digest-maintainers
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/.travis.yml b/etcd/vendor/github.com/opencontainers/go-digest/.travis.yml
deleted file mode 100644
index 5775f885c1..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/.travis.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-language: go
-go:
- - 1.12.x
- - 1.13.x
- - master
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/etcd/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
deleted file mode 100644
index e4d962ac16..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# Contributing to Docker open source projects
-
-Want to hack on this project? Awesome! Here are instructions to get you started.
-
-This project is a part of the [Docker](https://www.docker.com) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read Docker's
-[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
-[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
-[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
-[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
-
-For an in-depth description of our contribution process, visit the
-contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch. Your
-signature certifies that you wrote the patch or otherwise have the right to pass
-it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-1 Letterman Drive
-Suite D4700
-San Francisco, CA, 94129
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-Then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith
-
-Use your real name (sorry, no pseudonyms or anonymous contributions.)
-
-If you set your `user.name` and `user.email` git configs, you can sign your
-commit automatically with `git commit -s`.
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/LICENSE b/etcd/vendor/github.com/opencontainers/go-digest/LICENSE
deleted file mode 100644
index 3ac8ab6487..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/LICENSE
+++ /dev/null
@@ -1,192 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2019, 2020 OCI Contributors
- Copyright 2016 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/etcd/vendor/github.com/opencontainers/go-digest/LICENSE.docs
deleted file mode 100644
index e26cd4fc8e..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/LICENSE.docs
+++ /dev/null
@@ -1,425 +0,0 @@
-Attribution-ShareAlike 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
- Considerations for licensors: Our public licenses are
- intended for use by those authorized to give the public
- permission to use material in ways otherwise restricted by
- copyright and certain other rights. Our licenses are
- irrevocable. Licensors should read and understand the terms
- and conditions of the license they choose before applying it.
- Licensors should also secure all rights necessary before
- applying our licenses so that the public can reuse the
- material as expected. Licensors should clearly mark any
- material not subject to the license. This includes other CC-
- licensed material, or material used under an exception or
- limitation to copyright. More considerations for licensors:
- wiki.creativecommons.org/Considerations_for_licensors
-
- Considerations for the public: By using one of our public
- licenses, a licensor grants the public permission to use the
- licensed material under specified terms and conditions. If
- the licensor's permission is not necessary for any reason--for
- example, because of any applicable exception or limitation to
- copyright--then that use is not regulated by the license. Our
- licenses grant only permissions under copyright and certain
- other rights that a licensor has authority to grant. Use of
- the licensed material may still be restricted for other
- reasons, including because others have copyright or other
- rights in the material. A licensor may make special requests,
- such as asking that all changes be marked or described.
- Although not required by our licenses, you are encouraged to
- respect those requests where reasonable. More_considerations
- for the public:
- wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution-ShareAlike 4.0 International Public
-License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You
-such rights in consideration of benefits the Licensor receives from
-making the Licensed Material available under these terms and
-conditions.
-
-
-Section 1 -- Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material
- and in which the Licensed Material is translated, altered,
- arranged, transformed, or otherwise modified in a manner requiring
- permission under the Copyright and Similar Rights held by the
- Licensor. For purposes of this Public License, where the Licensed
- Material is a musical work, performance, or sound recording,
- Adapted Material is always produced where the Licensed Material is
- synched in timed relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright
- and Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative
- Commons as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright
- Treaty adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or
- any other exception or limitation to Copyright and Similar Rights
- that applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name
- of a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database,
- or other material to which the Licensor applied this Public
- License.
-
- i. Licensed Rights means the rights granted to You subject to the
- terms and conditions of this Public License, which are limited to
- all Copyright and Similar Rights that apply to Your use of the
- Licensed Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such
- as reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the
- public may access the material from a place and at a time
- individually chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially
- equivalent rights anywhere in the world.
-
- m. You means the individual or entity exercising the Licensed Rights
- under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License,
- the Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- a. reproduce and Share the Licensed Material, in whole or
- in part; and
-
- b. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with
- its terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section
- 6(a).
-
- 4. Media and formats; technical modifications allowed. The
- Licensor authorizes You to exercise the Licensed Rights in
- all media and formats whether now known or hereafter created,
- and to make technical modifications necessary to do so. The
- Licensor waives and/or agrees not to assert any right or
- authority to forbid You from making technical modifications
- necessary to exercise the Licensed Rights, including
- technical modifications necessary to circumvent Effective
- Technological Measures. For purposes of this Public License,
- simply making modifications authorized by this Section 2(a)
- (4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- a. Offer from the Licensor -- Licensed Material. Every
- recipient of the Licensed Material automatically
- receives an offer from the Licensor to exercise the
- Licensed Rights under the terms and conditions of this
- Public License.
-
- b. Additional offer from the Licensor -- Adapted Material.
- Every recipient of Adapted Material from You
- automatically receives an offer from the Licensor to
- exercise the Licensed Rights in the Adapted Material
- under the conditions of the Adapter's License You apply.
-
- c. No downstream restrictions. You may not offer or impose
- any additional or different terms or conditions on, or
- apply any Effective Technological Measures to, the
- Licensed Material if doing so restricts exercise of the
- Licensed Rights by any recipient of the Licensed
- Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You
- are, or that Your use of the Licensed Material is, connected
- with, or sponsored, endorsed, or granted official status by,
- the Licensor or others designated to receive attribution as
- provided in Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not
- licensed under this Public License, nor are publicity,
- privacy, and/or other similar personality rights; however, to
- the extent possible, the Licensor waives and/or agrees not to
- assert any such rights held by the Licensor to the limited
- extent necessary to allow You to exercise the Licensed
- Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this
- Public License.
-
- 3. To the extent possible, the Licensor waives any right to
- collect royalties from You for the exercise of the Licensed
- Rights, whether directly or through a collecting society
- under any voluntary or waivable statutory or compulsory
- licensing scheme. In all other cases the Licensor expressly
- reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified
- form), You must:
-
- a. retain the following if it is supplied by the Licensor
- with the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by
- the Licensor (including by pseudonym if
- designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of
- warranties;
-
- v. a URI or hyperlink to the Licensed Material to the
- extent reasonably practicable;
-
- b. indicate if You modified the Licensed Material and
- retain an indication of any previous modifications; and
-
- c. indicate the Licensed Material is licensed under this
- Public License, and include the text of, or the URI or
- hyperlink to, this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required
- information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable.
-
- b. ShareAlike.
-
- In addition to the conditions in Section 3(a), if You Share
- Adapted Material You produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right
- to extract, reuse, reproduce, and Share all or a substantial
- portion of the contents of the database;
-
- b. if You include all or a substantial portion of the database
- contents in a database in which You have Sui Generis Database
- Rights, then the database in which You have Sui Generis Database
- Rights (but not its individual contents) is Adapted Material,
-
- including for purposes of Section 3(b); and
- c. You must comply with the conditions in Section 3(a) if You Share
- all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
- a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
- EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
- AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
- ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
- IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
- WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
- ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
- KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
- ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
- b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
- TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
- NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
- INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
- COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
- USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
- ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
- DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
- IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent
- possible, most closely approximates an absolute disclaimer and
- waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided
- it is cured within 30 days of Your discovery of the
- violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations
- of this Public License.
-
- c. For the avoidance of doubt, the Licensor may also offer the
- Licensed Material under separate terms or conditions or stop
- distributing the Licensed Material at any time; however, doing so
- will not terminate this Public License.
-
- d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
- License.
-
-
-Section 7 -- Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different
- terms or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and
- shall not be interpreted to, reduce, limit, restrict, or impose
- conditions on any use of the Licensed Material that could lawfully
- be made without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted
- as a limitation upon, or waiver of, any privileges and immunities
- that apply to the Licensor or You, including from the legal
- processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public licenses.
-Notwithstanding, Creative Commons may elect to apply one of its public
-licenses to material it publishes and in those instances will be
-considered the "Licensor." Except for the limited purpose of indicating
-that material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/etcd/vendor/github.com/opencontainers/go-digest/MAINTAINERS
deleted file mode 100644
index 843b1b2061..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/MAINTAINERS
+++ /dev/null
@@ -1,5 +0,0 @@
-Derek McGowan (@dmcgowan)
-Stephen Day (@stevvooe)
-Vincent Batts (@vbatts)
-Akihiro Suda (@AkihiroSuda)
-Sebastiaan van Stijn (@thaJeztah)
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/README.md b/etcd/vendor/github.com/opencontainers/go-digest/README.md
deleted file mode 100644
index a11287207e..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/README.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# go-digest
-
-[](https://godoc.org/github.com/opencontainers/go-digest) [](https://goreportcard.com/report/github.com/opencontainers/go-digest) [](https://travis-ci.org/opencontainers/go-digest)
-
-Common digest package used across the container ecosystem.
-
-Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
-
-# What is a digest?
-
-A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function).
-
-The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems:
-
-```go
-id := digest.FromBytes([]byte("my content"))
-```
-
-In the example above, the id can be used to uniquely identify the byte slice "my content".
-This allows two disparate applications to agree on a verifiable identifier without having to trust one another.
-
-An identifying digest can be verified, as follows:
-
-```go
-if id != digest.FromBytes([]byte("my content")) {
- return errors.New("the content has changed!")
-}
-```
-
-A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense:
-
-```go
-rd := getContent()
-verifier := id.Verifier()
-io.Copy(verifier, rd)
-
-if !verifier.Verified() {
- return errors.New("the content has changed!")
-}
-```
-
-Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system.
-
-# Usage
-
-While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package.
-
-1. Make sure to import the hash implementations into your application or the package will panic.
- You should have something like the following in the main (or other entrypoint) of your application:
-
- ```go
- import (
- _ "crypto/sha256"
- _ "crypto/sha512"
- )
- ```
- This may seem inconvenient but it allows you replace the hash
- implementations with others, such as https://github.com/stevvooe/resumable.
-
-2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input.
- While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application.
-
-3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests.
-
-# Stability
-
-The Go API, at this stage, is considered stable, unless otherwise noted.
-
-As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
-
-# Contributing
-
-This package is considered fairly complete.
-It has been in production in thousands (millions?) of deployments and is fairly battle-hardened.
-New additions will be met with skepticism.
-If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR.
-
-## Code of Conduct
-
-Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct].
-
-## Security
-
-If you find an issue, please follow the [security][security] protocol to report it.
-
-# Copyright and license
-
-Copyright © 2019, 2020 OCI Contributors
-Copyright © 2016 Docker, Inc.
-All rights reserved, except as follows.
-Code is released under the [Apache 2.0 license](LICENSE).
-This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs).
-You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
-
-[security]: https://github.com/opencontainers/org/blob/master/security
-[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/algorithm.go b/etcd/vendor/github.com/opencontainers/go-digest/algorithm.go
deleted file mode 100644
index 490951dc3f..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/algorithm.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "crypto"
- "fmt"
- "hash"
- "io"
- "regexp"
-)
-
-// Algorithm identifies and implementation of a digester by an identifier.
-// Note the that this defines both the hash algorithm used and the string
-// encoding.
-type Algorithm string
-
-// supported digest types
-const (
- SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
- SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
- SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
-
- // Canonical is the primary digest algorithm used with the distribution
- // project. Other digests may be used but this one is the primary storage
- // digest.
- Canonical = SHA256
-)
-
-var (
- // TODO(stevvooe): Follow the pattern of the standard crypto package for
- // registration of digests. Effectively, we are a registerable set and
- // common symbol access.
-
- // algorithms maps values to hash.Hash implementations. Other algorithms
- // may be available but they cannot be calculated by the digest package.
- algorithms = map[Algorithm]crypto.Hash{
- SHA256: crypto.SHA256,
- SHA384: crypto.SHA384,
- SHA512: crypto.SHA512,
- }
-
- // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
- // Note that /A-F/ disallowed.
- anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
- SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
- SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
- SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
- }
-)
-
-// Available returns true if the digest type is available for use. If this
-// returns false, Digester and Hash will return nil.
-func (a Algorithm) Available() bool {
- h, ok := algorithms[a]
- if !ok {
- return false
- }
-
- // check availability of the hash, as well
- return h.Available()
-}
-
-func (a Algorithm) String() string {
- return string(a)
-}
-
-// Size returns number of bytes returned by the hash.
-func (a Algorithm) Size() int {
- h, ok := algorithms[a]
- if !ok {
- return 0
- }
- return h.Size()
-}
-
-// Set implemented to allow use of Algorithm as a command line flag.
-func (a *Algorithm) Set(value string) error {
- if value == "" {
- *a = Canonical
- } else {
- // just do a type conversion, support is queried with Available.
- *a = Algorithm(value)
- }
-
- if !a.Available() {
- return ErrDigestUnsupported
- }
-
- return nil
-}
-
-// Digester returns a new digester for the specified algorithm. If the algorithm
-// does not have a digester implementation, nil will be returned. This can be
-// checked by calling Available before calling Digester.
-func (a Algorithm) Digester() Digester {
- return &digester{
- alg: a,
- hash: a.Hash(),
- }
-}
-
-// Hash returns a new hash as used by the algorithm. If not available, the
-// method will panic. Check Algorithm.Available() before calling.
-func (a Algorithm) Hash() hash.Hash {
- if !a.Available() {
- // Empty algorithm string is invalid
- if a == "" {
- panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
- }
-
- // NOTE(stevvooe): A missing hash is usually a programming error that
- // must be resolved at compile time. We don't import in the digest
- // package to allow users to choose their hash implementation (such as
- // when using stevvooe/resumable or a hardware accelerated package).
- //
- // Applications that may want to resolve the hash at runtime should
- // call Algorithm.Available before call Algorithm.Hash().
- panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
- }
-
- return algorithms[a].New()
-}
-
-// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
-// the encoded portion of the digest.
-func (a Algorithm) Encode(d []byte) string {
- // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
- // add support for back registration, we can modify this accordingly.
- return fmt.Sprintf("%x", d)
-}
-
-// FromReader returns the digest of the reader using the algorithm.
-func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
- digester := a.Digester()
-
- if _, err := io.Copy(digester.Hash(), rd); err != nil {
- return "", err
- }
-
- return digester.Digest(), nil
-}
-
-// FromBytes digests the input and returns a Digest.
-func (a Algorithm) FromBytes(p []byte) Digest {
- digester := a.Digester()
-
- if _, err := digester.Hash().Write(p); err != nil {
- // Writes to a Hash should never fail. None of the existing
- // hash implementations in the stdlib or hashes vendored
- // here can return errors from Write. Having a panic in this
- // condition instead of having FromBytes return an error value
- // avoids unnecessary error handling paths in all callers.
- panic("write to hash function returned error: " + err.Error())
- }
-
- return digester.Digest()
-}
-
-// FromString digests the string input and returns a Digest.
-func (a Algorithm) FromString(s string) Digest {
- return a.FromBytes([]byte(s))
-}
-
-// Validate validates the encoded portion string
-func (a Algorithm) Validate(encoded string) error {
- r, ok := anchoredEncodedRegexps[a]
- if !ok {
- return ErrDigestUnsupported
- }
- // Digests much always be hex-encoded, ensuring that their hex portion will
- // always be size*2
- if a.Size()*2 != len(encoded) {
- return ErrDigestInvalidLength
- }
- if r.MatchString(encoded) {
- return nil
- }
- return ErrDigestInvalidFormat
-}
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/digest.go b/etcd/vendor/github.com/opencontainers/go-digest/digest.go
deleted file mode 100644
index 518b5e7154..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/digest.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "fmt"
- "hash"
- "io"
- "regexp"
- "strings"
-)
-
-// Digest allows simple protection of hex formatted digest strings, prefixed
-// by their algorithm. Strings of type Digest have some guarantee of being in
-// the correct format and it provides quick access to the components of a
-// digest string.
-//
-// The following is an example of the contents of Digest types:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// This allows to abstract the digest behind this type and work only in those
-// terms.
-type Digest string
-
-// NewDigest returns a Digest from alg and a hash.Hash object.
-func NewDigest(alg Algorithm, h hash.Hash) Digest {
- return NewDigestFromBytes(alg, h.Sum(nil))
-}
-
-// NewDigestFromBytes returns a new digest from the byte contents of p.
-// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
-// functions. This is also useful for rebuilding digests from binary
-// serializations.
-func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
- return NewDigestFromEncoded(alg, alg.Encode(p))
-}
-
-// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
-func NewDigestFromHex(alg, hex string) Digest {
- return NewDigestFromEncoded(Algorithm(alg), hex)
-}
-
-// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
-func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
- return Digest(fmt.Sprintf("%s:%s", alg, encoded))
-}
-
-// DigestRegexp matches valid digest types.
-var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
-
-// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
-var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
-
-var (
- // ErrDigestInvalidFormat returned when digest format invalid.
- ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
-
- // ErrDigestInvalidLength returned when digest has invalid length.
- ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
-
- // ErrDigestUnsupported returned when the digest algorithm is unsupported.
- ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
-)
-
-// Parse parses s and returns the validated digest object. An error will
-// be returned if the format is invalid.
-func Parse(s string) (Digest, error) {
- d := Digest(s)
- return d, d.Validate()
-}
-
-// FromReader consumes the content of rd until io.EOF, returning canonical digest.
-func FromReader(rd io.Reader) (Digest, error) {
- return Canonical.FromReader(rd)
-}
-
-// FromBytes digests the input and returns a Digest.
-func FromBytes(p []byte) Digest {
- return Canonical.FromBytes(p)
-}
-
-// FromString digests the input and returns a Digest.
-func FromString(s string) Digest {
- return Canonical.FromString(s)
-}
-
-// Validate checks that the contents of d is a valid digest, returning an
-// error if not.
-func (d Digest) Validate() error {
- s := string(d)
- i := strings.Index(s, ":")
- if i <= 0 || i+1 == len(s) {
- return ErrDigestInvalidFormat
- }
- algorithm, encoded := Algorithm(s[:i]), s[i+1:]
- if !algorithm.Available() {
- if !DigestRegexpAnchored.MatchString(s) {
- return ErrDigestInvalidFormat
- }
- return ErrDigestUnsupported
- }
- return algorithm.Validate(encoded)
-}
-
-// Algorithm returns the algorithm portion of the digest. This will panic if
-// the underlying digest is not in a valid format.
-func (d Digest) Algorithm() Algorithm {
- return Algorithm(d[:d.sepIndex()])
-}
-
-// Verifier returns a writer object that can be used to verify a stream of
-// content against the digest. If the digest is invalid, the method will panic.
-func (d Digest) Verifier() Verifier {
- return hashVerifier{
- hash: d.Algorithm().Hash(),
- digest: d,
- }
-}
-
-// Encoded returns the encoded portion of the digest. This will panic if the
-// underlying digest is not in a valid format.
-func (d Digest) Encoded() string {
- return string(d[d.sepIndex()+1:])
-}
-
-// Hex is deprecated. Please use Digest.Encoded.
-func (d Digest) Hex() string {
- return d.Encoded()
-}
-
-func (d Digest) String() string {
- return string(d)
-}
-
-func (d Digest) sepIndex() int {
- i := strings.Index(string(d), ":")
-
- if i < 0 {
- panic(fmt.Sprintf("no ':' separator in digest %q", d))
- }
-
- return i
-}
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/digester.go b/etcd/vendor/github.com/opencontainers/go-digest/digester.go
deleted file mode 100644
index ede9077571..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/digester.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import "hash"
-
-// Digester calculates the digest of written data. Writes should go directly
-// to the return value of Hash, while calling Digest will return the current
-// value of the digest.
-type Digester interface {
- Hash() hash.Hash // provides direct access to underlying hash instance.
- Digest() Digest
-}
-
-// digester provides a simple digester definition that embeds a hasher.
-type digester struct {
- alg Algorithm
- hash hash.Hash
-}
-
-func (d *digester) Hash() hash.Hash {
- return d.hash
-}
-
-func (d *digester) Digest() Digest {
- return NewDigest(d.alg, d.hash)
-}
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/doc.go b/etcd/vendor/github.com/opencontainers/go-digest/doc.go
deleted file mode 100644
index 83d3a936ca..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package digest provides a generalized type to opaquely represent message
-// digests and their operations within the registry. The Digest type is
-// designed to serve as a flexible identifier in a content-addressable system.
-// More importantly, it provides tools and wrappers to work with
-// hash.Hash-based digests with little effort.
-//
-// Basics
-//
-// The format of a digest is simply a string with two parts, dubbed the
-// "algorithm" and the "digest", separated by a colon:
-//
-// :
-//
-// An example of a sha256 digest representation follows:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// The "algorithm" portion defines both the hashing algorithm used to calculate
-// the digest and the encoding of the resulting digest, which defaults to "hex"
-// if not otherwise specified. Currently, all supported algorithms have their
-// digests encoded in hex strings.
-//
-// In the example above, the string "sha256" is the algorithm and the hex bytes
-// are the "digest".
-//
-// Because the Digest type is simply a string, once a valid Digest is
-// obtained, comparisons are cheap, quick and simple to express with the
-// standard equality operator.
-//
-// Verification
-//
-// The main benefit of using the Digest type is simple verification against a
-// given digest. The Verifier interface, modeled after the stdlib hash.Hash
-// interface, provides a common write sink for digest verification. After
-// writing is complete, calling the Verifier.Verified method will indicate
-// whether or not the stream of bytes matches the target digest.
-//
-// Missing Features
-//
-// In addition to the above, we intend to add the following features to this
-// package:
-//
-// 1. A Digester type that supports write sink digest calculation.
-//
-// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
-//
-package digest
diff --git a/etcd/vendor/github.com/opencontainers/go-digest/verifiers.go b/etcd/vendor/github.com/opencontainers/go-digest/verifiers.go
deleted file mode 100644
index afef506f46..0000000000
--- a/etcd/vendor/github.com/opencontainers/go-digest/verifiers.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "hash"
- "io"
-)
-
-// Verifier presents a general verification interface to be used with message
-// digests and other byte stream verifications. Users instantiate a Verifier
-// from one of the various methods, write the data under test to it then check
-// the result with the Verified method.
-type Verifier interface {
- io.Writer
-
- // Verified will return true if the content written to Verifier matches
- // the digest.
- Verified() bool
-}
-
-type hashVerifier struct {
- digest Digest
- hash hash.Hash
-}
-
-func (hv hashVerifier) Write(p []byte) (n int, err error) {
- return hv.hash.Write(p)
-}
-
-func (hv hashVerifier) Verified() bool {
- return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/LICENSE b/etcd/vendor/github.com/opencontainers/selinux/LICENSE
deleted file mode 100644
index 8dada3edaf..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/doc.go
deleted file mode 100644
index 0ac7d819e6..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/doc.go
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
-Package selinux provides a high-level interface for interacting with selinux.
-
-Usage:
-
- import "github.com/opencontainers/selinux/go-selinux"
-
- // Ensure that selinux is enforcing mode.
- if selinux.EnforceMode() != selinux.Enforcing {
- selinux.SetEnforceMode(selinux.Enforcing)
- }
-
-*/
-package selinux
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
deleted file mode 100644
index fea096c180..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package label
-
-import (
- "fmt"
-
- "github.com/opencontainers/selinux/go-selinux"
-)
-
-// Deprecated: use selinux.ROFileLabel
-var ROMountLabel = selinux.ROFileLabel
-
-// SetProcessLabel takes a process label and tells the kernel to assign the
-// label to the next program executed by the current process.
-// Deprecated: use selinux.SetExecLabel
-var SetProcessLabel = selinux.SetExecLabel
-
-// ProcessLabel returns the process label that the kernel will assign
-// to the next program executed by the current process. If "" is returned
-// this indicates that the default labeling will happen for the process.
-// Deprecated: use selinux.ExecLabel
-var ProcessLabel = selinux.ExecLabel
-
-// SetSocketLabel takes a process label and tells the kernel to assign the
-// label to the next socket that gets created
-// Deprecated: use selinux.SetSocketLabel
-var SetSocketLabel = selinux.SetSocketLabel
-
-// SocketLabel retrieves the current default socket label setting
-// Deprecated: use selinux.SocketLabel
-var SocketLabel = selinux.SocketLabel
-
-// SetKeyLabel takes a process label and tells the kernel to assign the
-// label to the next kernel keyring that gets created
-// Deprecated: use selinux.SetKeyLabel
-var SetKeyLabel = selinux.SetKeyLabel
-
-// KeyLabel retrieves the current default kernel keyring label setting
-// Deprecated: use selinux.KeyLabel
-var KeyLabel = selinux.KeyLabel
-
-// FileLabel returns the label for specified path
-// Deprecated: use selinux.FileLabel
-var FileLabel = selinux.FileLabel
-
-// PidLabel will return the label of the process running with the specified pid
-// Deprecated: use selinux.PidLabel
-var PidLabel = selinux.PidLabel
-
-// Init initialises the labeling system
-func Init() {
- _ = selinux.GetEnabled()
-}
-
-// ClearLabels will clear all reserved labels
-// Deprecated: use selinux.ClearLabels
-var ClearLabels = selinux.ClearLabels
-
-// ReserveLabel will record the fact that the MCS label has already been used.
-// This will prevent InitLabels from using the MCS label in a newly created
-// container
-// Deprecated: use selinux.ReserveLabel
-func ReserveLabel(label string) error {
- selinux.ReserveLabel(label)
- return nil
-}
-
-// ReleaseLabel will remove the reservation of the MCS label.
-// This will allow InitLabels to use the MCS label in a newly created
-// containers
-// Deprecated: use selinux.ReleaseLabel
-func ReleaseLabel(label string) error {
- selinux.ReleaseLabel(label)
- return nil
-}
-
-// DupSecOpt takes a process label and returns security options that
-// can be used to set duplicate labels on future container processes
-// Deprecated: use selinux.DupSecOpt
-var DupSecOpt = selinux.DupSecOpt
-
-// FormatMountLabel returns a string to be used by the mount command.
-// The format of this string will be used to alter the labeling of the mountpoint.
-// The string returned is suitable to be used as the options field of the mount command.
-// If you need to have additional mount point options, you can pass them in as
-// the first parameter. Second parameter is the label that you wish to apply
-// to all content in the mount point.
-func FormatMountLabel(src, mountLabel string) string {
- if mountLabel != "" {
- switch src {
- case "":
- src = fmt.Sprintf("context=%q", mountLabel)
- default:
- src = fmt.Sprintf("%s,context=%q", src, mountLabel)
- }
- }
- return src
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
deleted file mode 100644
index 12de0ae5d6..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package label
-
-import (
- "errors"
- "fmt"
- "os"
- "os/user"
- "strings"
-
- "github.com/opencontainers/selinux/go-selinux"
-)
-
-// Valid Label Options
-var validOptions = map[string]bool{
- "disable": true,
- "type": true,
- "filetype": true,
- "user": true,
- "role": true,
- "level": true,
-}
-
-var ErrIncompatibleLabel = errors.New("Bad SELinux option z and Z can not be used together")
-
-// InitLabels returns the process label and file labels to be used within
-// the container. A list of options can be passed into this function to alter
-// the labels. The labels returned will include a random MCS String, that is
-// guaranteed to be unique.
-// If the disabled flag is passed in, the process label will not be set, but the mount label will be set
-// to the container_file label with the maximum category. This label is not usable by any confined label.
-func InitLabels(options []string) (plabel string, mlabel string, retErr error) {
- if !selinux.GetEnabled() {
- return "", "", nil
- }
- processLabel, mountLabel := selinux.ContainerLabels()
- if processLabel != "" {
- defer func() {
- if retErr != nil {
- selinux.ReleaseLabel(mountLabel)
- }
- }()
- pcon, err := selinux.NewContext(processLabel)
- if err != nil {
- return "", "", err
- }
- mcsLevel := pcon["level"]
- mcon, err := selinux.NewContext(mountLabel)
- if err != nil {
- return "", "", err
- }
- for _, opt := range options {
- if opt == "disable" {
- selinux.ReleaseLabel(mountLabel)
- return "", selinux.PrivContainerMountLabel(), nil
- }
- if i := strings.Index(opt, ":"); i == -1 {
- return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt)
- }
- con := strings.SplitN(opt, ":", 2)
- if !validOptions[con[0]] {
- return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type, filetype'", con[0])
- }
- if con[0] == "filetype" {
- mcon["type"] = con[1]
- continue
- }
- pcon[con[0]] = con[1]
- if con[0] == "level" || con[0] == "user" {
- mcon[con[0]] = con[1]
- }
- }
- if pcon.Get() != processLabel {
- if pcon["level"] != mcsLevel {
- selinux.ReleaseLabel(processLabel)
- }
- processLabel = pcon.Get()
- selinux.ReserveLabel(processLabel)
- }
- mountLabel = mcon.Get()
- }
- return processLabel, mountLabel, nil
-}
-
-// Deprecated: The GenLabels function is only to be used during the transition
-// to the official API. Use InitLabels(strings.Fields(options)) instead.
-func GenLabels(options string) (string, string, error) {
- return InitLabels(strings.Fields(options))
-}
-
-// SetFileLabel modifies the "path" label to the specified file label
-func SetFileLabel(path string, fileLabel string) error {
- if !selinux.GetEnabled() || fileLabel == "" {
- return nil
- }
- return selinux.SetFileLabel(path, fileLabel)
-}
-
-// SetFileCreateLabel tells the kernel the label for all files to be created
-func SetFileCreateLabel(fileLabel string) error {
- if !selinux.GetEnabled() {
- return nil
- }
- return selinux.SetFSCreateLabel(fileLabel)
-}
-
-// Relabel changes the label of path and all the entries beneath the path.
-// It changes the MCS label to s0 if shared is true.
-// This will allow all containers to share the content.
-//
-// The path itself is guaranteed to be relabeled last.
-func Relabel(path string, fileLabel string, shared bool) error {
- if !selinux.GetEnabled() || fileLabel == "" {
- return nil
- }
-
- exclude_paths := map[string]bool{
- "/": true,
- "/bin": true,
- "/boot": true,
- "/dev": true,
- "/etc": true,
- "/etc/passwd": true,
- "/etc/pki": true,
- "/etc/shadow": true,
- "/home": true,
- "/lib": true,
- "/lib64": true,
- "/media": true,
- "/opt": true,
- "/proc": true,
- "/root": true,
- "/run": true,
- "/sbin": true,
- "/srv": true,
- "/sys": true,
- "/tmp": true,
- "/usr": true,
- "/var": true,
- "/var/lib": true,
- "/var/log": true,
- }
-
- if home := os.Getenv("HOME"); home != "" {
- exclude_paths[home] = true
- }
-
- if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
- if usr, err := user.Lookup(sudoUser); err == nil {
- exclude_paths[usr.HomeDir] = true
- }
- }
-
- if path != "/" {
- path = strings.TrimSuffix(path, "/")
- }
- if exclude_paths[path] {
- return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
- }
-
- if shared {
- c, err := selinux.NewContext(fileLabel)
- if err != nil {
- return err
- }
-
- c["level"] = "s0"
- fileLabel = c.Get()
- }
- if err := selinux.Chcon(path, fileLabel, true); err != nil {
- return err
- }
- return nil
-}
-
-// DisableSecOpt returns a security opt that can disable labeling
-// support for future container processes
-// Deprecated: use selinux.DisableSecOpt
-var DisableSecOpt = selinux.DisableSecOpt
-
-// Validate checks that the label does not include unexpected options
-func Validate(label string) error {
- if strings.Contains(label, "z") && strings.Contains(label, "Z") {
- return ErrIncompatibleLabel
- }
- return nil
-}
-
-// RelabelNeeded checks whether the user requested a relabel
-func RelabelNeeded(label string) bool {
- return strings.Contains(label, "z") || strings.Contains(label, "Z")
-}
-
-// IsShared checks that the label includes a "shared" mark
-func IsShared(label string) bool {
- return strings.Contains(label, "z")
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
deleted file mode 100644
index 02d206239c..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build !linux
-
-package label
-
-// InitLabels returns the process label and file labels to be used within
-// the container. A list of options can be passed into this function to alter
-// the labels.
-func InitLabels(options []string) (string, string, error) {
- return "", "", nil
-}
-
-// Deprecated: The GenLabels function is only to be used during the transition
-// to the official API. Use InitLabels(strings.Fields(options)) instead.
-func GenLabels(options string) (string, string, error) {
- return "", "", nil
-}
-
-func SetFileLabel(path string, fileLabel string) error {
- return nil
-}
-
-func SetFileCreateLabel(fileLabel string) error {
- return nil
-}
-
-func Relabel(path string, fileLabel string, shared bool) error {
- return nil
-}
-
-// DisableSecOpt returns a security opt that can disable labeling
-// support for future container processes
-func DisableSecOpt() []string {
- return nil
-}
-
-// Validate checks that the label does not include unexpected options
-func Validate(label string) error {
- return nil
-}
-
-// RelabelNeeded checks whether the user requested a relabel
-func RelabelNeeded(label string) bool {
- return false
-}
-
-// IsShared checks that the label includes a "shared" mark
-func IsShared(label string) bool {
- return false
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
deleted file mode 100644
index 897ecbac41..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build linux,go1.16
-
-package selinux
-
-import (
- "errors"
- "io/fs"
- "os"
-
- "github.com/opencontainers/selinux/pkg/pwalkdir"
-)
-
-func rchcon(fpath, label string) error {
- return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
- e := setFileLabel(p, label)
- // Walk a file tree can race with removal, so ignore ENOENT.
- if errors.Is(e, os.ErrNotExist) {
- return nil
- }
- return e
- })
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
deleted file mode 100644
index 2c8b033ce0..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build linux,!go1.16
-
-package selinux
-
-import (
- "errors"
- "os"
-
- "github.com/opencontainers/selinux/pkg/pwalk"
-)
-
-func rchcon(fpath, label string) error {
- return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
- e := setFileLabel(p, label)
- // Walk a file tree can race with removal, so ignore ENOENT.
- if errors.Is(e, os.ErrNotExist) {
- return nil
- }
- return e
- })
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
deleted file mode 100644
index 5a59d151f6..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package selinux
-
-import (
- "errors"
-)
-
-const (
- // Enforcing constant indicate SELinux is in enforcing mode
- Enforcing = 1
- // Permissive constant to indicate SELinux is in permissive mode
- Permissive = 0
- // Disabled constant to indicate SELinux is disabled
- Disabled = -1
- // maxCategory is the maximum number of categories used within containers
- maxCategory = 1024
- // DefaultCategoryRange is the upper bound on the category range
- DefaultCategoryRange = uint32(maxCategory)
-)
-
-var (
- // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
- ErrMCSAlreadyExists = errors.New("MCS label already exists")
- // ErrEmptyPath is returned when an empty path has been specified.
- ErrEmptyPath = errors.New("empty path")
-
- // InvalidLabel is returned when an invalid label is specified.
- InvalidLabel = errors.New("Invalid Label")
-
- // ErrIncomparable is returned two levels are not comparable
- ErrIncomparable = errors.New("incomparable levels")
- // ErrLevelSyntax is returned when a sensitivity or category do not have correct syntax in a level
- ErrLevelSyntax = errors.New("invalid level syntax")
-
- // ErrContextMissing is returned if a requested context is not found in a file.
- ErrContextMissing = errors.New("context does not have a match")
- // ErrVerifierNil is returned when a context verifier function is nil.
- ErrVerifierNil = errors.New("verifier function is nil")
-
- // CategoryRange allows the upper bound on the category range to be adjusted
- CategoryRange = DefaultCategoryRange
-
- privContainerMountLabel string
-)
-
-// Context is a representation of the SELinux label broken into 4 parts
-type Context map[string]string
-
-// SetDisabled disables SELinux support for the package
-func SetDisabled() {
- setDisabled()
-}
-
-// GetEnabled returns whether SELinux is currently enabled.
-func GetEnabled() bool {
- return getEnabled()
-}
-
-// ClassIndex returns the int index for an object class in the loaded policy,
-// or -1 and an error
-func ClassIndex(class string) (int, error) {
- return classIndex(class)
-}
-
-// SetFileLabel sets the SELinux label for this path, following symlinks,
-// or returns an error.
-func SetFileLabel(fpath string, label string) error {
- return setFileLabel(fpath, label)
-}
-
-// LsetFileLabel sets the SELinux label for this path, not following symlinks,
-// or returns an error.
-func LsetFileLabel(fpath string, label string) error {
- return lSetFileLabel(fpath, label)
-}
-
-// FileLabel returns the SELinux label for this path, following symlinks,
-// or returns an error.
-func FileLabel(fpath string) (string, error) {
- return fileLabel(fpath)
-}
-
-// LfileLabel returns the SELinux label for this path, not following symlinks,
-// or returns an error.
-func LfileLabel(fpath string) (string, error) {
- return lFileLabel(fpath)
-}
-
-// SetFSCreateLabel tells the kernel what label to use for all file system objects
-// created by this task.
-// Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel
-// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until file system
-// objects created by this task are finished to guarantee another goroutine does not migrate
-// to the current thread before execution is complete.
-func SetFSCreateLabel(label string) error {
- return setFSCreateLabel(label)
-}
-
-// FSCreateLabel returns the default label the kernel which the kernel is using
-// for file system objects created by this task. "" indicates default.
-func FSCreateLabel() (string, error) {
- return fsCreateLabel()
-}
-
-// CurrentLabel returns the SELinux label of the current process thread, or an error.
-func CurrentLabel() (string, error) {
- return currentLabel()
-}
-
-// PidLabel returns the SELinux label of the given pid, or an error.
-func PidLabel(pid int) (string, error) {
- return pidLabel(pid)
-}
-
-// ExecLabel returns the SELinux label that the kernel will use for any programs
-// that are executed by the current process thread, or an error.
-func ExecLabel() (string, error) {
- return execLabel()
-}
-
-// CanonicalizeContext takes a context string and writes it to the kernel
-// the function then returns the context that the kernel will use. Use this
-// function to check if two contexts are equivalent
-func CanonicalizeContext(val string) (string, error) {
- return canonicalizeContext(val)
-}
-
-// ComputeCreateContext requests the type transition from source to target for
-// class from the kernel.
-func ComputeCreateContext(source string, target string, class string) (string, error) {
- return computeCreateContext(source, target, class)
-}
-
-// CalculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound)
-// of a source and target range.
-// The glblub is calculated as the greater of the low sensitivities and
-// the lower of the high sensitivities and the and of each category bitset.
-func CalculateGlbLub(sourceRange, targetRange string) (string, error) {
- return calculateGlbLub(sourceRange, targetRange)
-}
-
-// SetExecLabel sets the SELinux label that the kernel will use for any programs
-// that are executed by the current process thread, or an error. Calls to SetExecLabel
-// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until execution
-// of the program is finished to guarantee another goroutine does not migrate to the current
-// thread before execution is complete.
-func SetExecLabel(label string) error {
- return setExecLabel(label)
-}
-
-// SetTaskLabel sets the SELinux label for the current thread, or an error.
-// This requires the dyntransition permission. Calls to SetTaskLabel should
-// be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee
-// the current thread does not run in a new mislabeled thread.
-func SetTaskLabel(label string) error {
- return setTaskLabel(label)
-}
-
-// SetSocketLabel takes a process label and tells the kernel to assign the
-// label to the next socket that gets created. Calls to SetSocketLabel
-// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until
-// the the socket is created to guarantee another goroutine does not migrate
-// to the current thread before execution is complete.
-func SetSocketLabel(label string) error {
- return setSocketLabel(label)
-}
-
-// SocketLabel retrieves the current socket label setting
-func SocketLabel() (string, error) {
- return socketLabel()
-}
-
-// PeerLabel retrieves the label of the client on the other side of a socket
-func PeerLabel(fd uintptr) (string, error) {
- return peerLabel(fd)
-}
-
-// SetKeyLabel takes a process label and tells the kernel to assign the
-// label to the next kernel keyring that gets created. Calls to SetKeyLabel
-// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until
-// the kernel keyring is created to guarantee another goroutine does not migrate
-// to the current thread before execution is complete.
-func SetKeyLabel(label string) error {
- return setKeyLabel(label)
-}
-
-// KeyLabel retrieves the current kernel keyring label setting
-func KeyLabel() (string, error) {
- return keyLabel()
-}
-
-// Get returns the Context as a string
-func (c Context) Get() string {
- return c.get()
-}
-
-// NewContext creates a new Context struct from the specified label
-func NewContext(label string) (Context, error) {
- return newContext(label)
-}
-
-// ClearLabels clears all reserved labels
-func ClearLabels() {
- clearLabels()
-}
-
-// ReserveLabel reserves the MLS/MCS level component of the specified label
-func ReserveLabel(label string) {
- reserveLabel(label)
-}
-
-// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
-func EnforceMode() int {
- return enforceMode()
-}
-
-// SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
-// Disabled is not valid, since this needs to be set at boot time.
-func SetEnforceMode(mode int) error {
- return setEnforceMode(mode)
-}
-
-// DefaultEnforceMode returns the systems default SELinux mode Enforcing,
-// Permissive or Disabled. Note this is is just the default at boot time.
-// EnforceMode tells you the systems current mode.
-func DefaultEnforceMode() int {
- return defaultEnforceMode()
-}
-
-// ReleaseLabel un-reserves the MLS/MCS Level field of the specified label,
-// allowing it to be used by another process.
-func ReleaseLabel(label string) {
- releaseLabel(label)
-}
-
-// ROFileLabel returns the specified SELinux readonly file label
-func ROFileLabel() string {
- return roFileLabel()
-}
-
-// KVMContainerLabels returns the default processLabel and mountLabel to be used
-// for kvm containers by the calling process.
-func KVMContainerLabels() (string, string) {
- return kvmContainerLabels()
-}
-
-// InitContainerLabels returns the default processLabel and file labels to be
-// used for containers running an init system like systemd by the calling process.
-func InitContainerLabels() (string, string) {
- return initContainerLabels()
-}
-
-// ContainerLabels returns an allocated processLabel and fileLabel to be used for
-// container labeling by the calling process.
-func ContainerLabels() (processLabel string, fileLabel string) {
- return containerLabels()
-}
-
-// SecurityCheckContext validates that the SELinux label is understood by the kernel
-func SecurityCheckContext(val string) error {
- return securityCheckContext(val)
-}
-
-// CopyLevel returns a label with the MLS/MCS level from src label replaced on
-// the dest label.
-func CopyLevel(src, dest string) (string, error) {
- return copyLevel(src, dest)
-}
-
-// Chcon changes the fpath file object to the SELinux label label.
-// If fpath is a directory and recurse is true, then Chcon walks the
-// directory tree setting the label.
-//
-// The fpath itself is guaranteed to be relabeled last.
-func Chcon(fpath string, label string, recurse bool) error {
- return chcon(fpath, label, recurse)
-}
-
-// DupSecOpt takes an SELinux process label and returns security options that
-// can be used to set the SELinux Type and Level for future container processes.
-func DupSecOpt(src string) ([]string, error) {
- return dupSecOpt(src)
-}
-
-// DisableSecOpt returns a security opt that can be used to disable SELinux
-// labeling support for future container processes.
-func DisableSecOpt() []string {
- return disableSecOpt()
-}
-
-// GetDefaultContextWithLevel gets a single context for the specified SELinux user
-// identity that is reachable from the specified scon context. The context is based
-// on the per-user /etc/selinux/{SELINUXTYPE}/contexts/users/ if it exists,
-// and falls back to the global /etc/selinux/{SELINUXTYPE}/contexts/default_contexts
-// file.
-func GetDefaultContextWithLevel(user, level, scon string) (string, error) {
- return getDefaultContextWithLevel(user, level, scon)
-}
-
-// PrivContainerMountLabel returns mount label for privileged containers
-func PrivContainerMountLabel() string {
- // Make sure label is initialized.
- _ = label("")
- return privContainerMountLabel
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
deleted file mode 100644
index ee602ab96d..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ /dev/null
@@ -1,1262 +0,0 @@
-package selinux
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math/big"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
-
- "golang.org/x/sys/unix"
-)
-
-const (
- minSensLen = 2
- contextFile = "/usr/share/containers/selinux/contexts"
- selinuxDir = "/etc/selinux/"
- selinuxUsersDir = "contexts/users"
- defaultContexts = "contexts/default_contexts"
- selinuxConfig = selinuxDir + "config"
- selinuxfsMount = "/sys/fs/selinux"
- selinuxTypeTag = "SELINUXTYPE"
- selinuxTag = "SELINUX"
- xattrNameSelinux = "security.selinux"
-)
-
-type selinuxState struct {
- enabledSet bool
- enabled bool
- selinuxfsOnce sync.Once
- selinuxfs string
- mcsList map[string]bool
- sync.Mutex
-}
-
-type level struct {
- sens uint
- cats *big.Int
-}
-
-type mlsRange struct {
- low *level
- high *level
-}
-
-type defaultSECtx struct {
- user, level, scon string
- userRdr, defaultRdr io.Reader
-
- verifier func(string) error
-}
-
-type levelItem byte
-
-const (
- sensitivity levelItem = 's'
- category levelItem = 'c'
-)
-
-var (
- readOnlyFileLabel string
- state = selinuxState{
- mcsList: make(map[string]bool),
- }
-
- // for attrPath()
- attrPathOnce sync.Once
- haveThreadSelf bool
-
- // for policyRoot()
- policyRootOnce sync.Once
- policyRootVal string
-
- // for label()
- loadLabelsOnce sync.Once
- labels map[string]string
-)
-
-func policyRoot() string {
- policyRootOnce.Do(func() {
- policyRootVal = filepath.Join(selinuxDir, readConfig(selinuxTypeTag))
- })
-
- return policyRootVal
-}
-
-func (s *selinuxState) setEnable(enabled bool) bool {
- s.Lock()
- defer s.Unlock()
- s.enabledSet = true
- s.enabled = enabled
- return s.enabled
-}
-
-func (s *selinuxState) getEnabled() bool {
- s.Lock()
- enabled := s.enabled
- enabledSet := s.enabledSet
- s.Unlock()
- if enabledSet {
- return enabled
- }
-
- enabled = false
- if fs := getSelinuxMountPoint(); fs != "" {
- if con, _ := CurrentLabel(); con != "kernel" {
- enabled = true
- }
- }
- return s.setEnable(enabled)
-}
-
-// setDisabled disables SELinux support for the package
-func setDisabled() {
- state.setEnable(false)
-}
-
-func verifySELinuxfsMount(mnt string) bool {
- var buf unix.Statfs_t
- for {
- err := unix.Statfs(mnt, &buf)
- if err == nil {
- break
- }
- if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare
- continue
- }
- return false
- }
-
- if uint32(buf.Type) != uint32(unix.SELINUX_MAGIC) {
- return false
- }
- if (buf.Flags & unix.ST_RDONLY) != 0 {
- return false
- }
-
- return true
-}
-
-func findSELinuxfs() string {
- // fast path: check the default mount first
- if verifySELinuxfsMount(selinuxfsMount) {
- return selinuxfsMount
- }
-
- // check if selinuxfs is available before going the slow path
- fs, err := ioutil.ReadFile("/proc/filesystems")
- if err != nil {
- return ""
- }
- if !bytes.Contains(fs, []byte("\tselinuxfs\n")) {
- return ""
- }
-
- // slow path: try to find among the mounts
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return ""
- }
- defer f.Close()
-
- scanner := bufio.NewScanner(f)
- for {
- mnt := findSELinuxfsMount(scanner)
- if mnt == "" { // error or not found
- return ""
- }
- if verifySELinuxfsMount(mnt) {
- return mnt
- }
- }
-}
-
-// findSELinuxfsMount returns a next selinuxfs mount point found,
-// if there is one, or an empty string in case of EOF or error.
-func findSELinuxfsMount(s *bufio.Scanner) string {
- for s.Scan() {
- txt := s.Bytes()
- // The first field after - is fs type.
- // Safe as spaces in mountpoints are encoded as \040
- if !bytes.Contains(txt, []byte(" - selinuxfs ")) {
- continue
- }
- const mPos = 5 // mount point is 5th field
- fields := bytes.SplitN(txt, []byte(" "), mPos+1)
- if len(fields) < mPos+1 {
- continue
- }
- return string(fields[mPos-1])
- }
-
- return ""
-}
-
-func (s *selinuxState) getSELinuxfs() string {
- s.selinuxfsOnce.Do(func() {
- s.selinuxfs = findSELinuxfs()
- })
-
- return s.selinuxfs
-}
-
-// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
-// filesystem or an empty string if no mountpoint is found. Selinuxfs is
-// a proc-like pseudo-filesystem that exposes the SELinux policy API to
-// processes. The existence of an selinuxfs mount is used to determine
-// whether SELinux is currently enabled or not.
-func getSelinuxMountPoint() string {
- return state.getSELinuxfs()
-}
-
-// getEnabled returns whether SELinux is currently enabled.
-func getEnabled() bool {
- return state.getEnabled()
-}
-
-func readConfig(target string) string {
- in, err := os.Open(selinuxConfig)
- if err != nil {
- return ""
- }
- defer in.Close()
-
- scanner := bufio.NewScanner(in)
-
- for scanner.Scan() {
- line := bytes.TrimSpace(scanner.Bytes())
- if len(line) == 0 {
- // Skip blank lines
- continue
- }
- if line[0] == ';' || line[0] == '#' {
- // Skip comments
- continue
- }
- fields := bytes.SplitN(line, []byte{'='}, 2)
- if len(fields) != 2 {
- continue
- }
- if bytes.Equal(fields[0], []byte(target)) {
- return string(bytes.Trim(fields[1], `"`))
- }
- }
- return ""
-}
-
-func isProcHandle(fh *os.File) error {
- var buf unix.Statfs_t
-
- for {
- err := unix.Fstatfs(int(fh.Fd()), &buf)
- if err == nil {
- break
- }
- if err != unix.EINTR { //nolint:errorlint // unix errors are bare
- return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err}
- }
- }
- if buf.Type != unix.PROC_SUPER_MAGIC {
- return fmt.Errorf("file %q is not on procfs", fh.Name())
- }
-
- return nil
-}
-
-func readCon(fpath string) (string, error) {
- if fpath == "" {
- return "", ErrEmptyPath
- }
-
- in, err := os.Open(fpath)
- if err != nil {
- return "", err
- }
- defer in.Close()
-
- if err := isProcHandle(in); err != nil {
- return "", err
- }
- return readConFd(in)
-}
-
-func readConFd(in *os.File) (string, error) {
- data, err := ioutil.ReadAll(in)
- if err != nil {
- return "", err
- }
- return string(bytes.TrimSuffix(data, []byte{0})), nil
-}
-
-// classIndex returns the int index for an object class in the loaded policy,
-// or -1 and an error
-func classIndex(class string) (int, error) {
- permpath := fmt.Sprintf("class/%s/index", class)
- indexpath := filepath.Join(getSelinuxMountPoint(), permpath)
-
- indexB, err := ioutil.ReadFile(indexpath)
- if err != nil {
- return -1, err
- }
- index, err := strconv.Atoi(string(indexB))
- if err != nil {
- return -1, err
- }
-
- return index, nil
-}
-
-// lSetFileLabel sets the SELinux label for this path, not following symlinks,
-// or returns an error.
-func lSetFileLabel(fpath string, label string) error {
- if fpath == "" {
- return ErrEmptyPath
- }
- for {
- err := unix.Lsetxattr(fpath, xattrNameSelinux, []byte(label), 0)
- if err == nil {
- break
- }
- if err != unix.EINTR { //nolint:errorlint // unix errors are bare
- return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err}
- }
- }
-
- return nil
-}
-
-// setFileLabel sets the SELinux label for this path, following symlinks,
-// or returns an error.
-func setFileLabel(fpath string, label string) error {
- if fpath == "" {
- return ErrEmptyPath
- }
- for {
- err := unix.Setxattr(fpath, xattrNameSelinux, []byte(label), 0)
- if err == nil {
- break
- }
- if err != unix.EINTR { //nolint:errorlint // unix errors are bare
- return &os.PathError{Op: "setxattr", Path: fpath, Err: err}
- }
- }
-
- return nil
-}
-
-// fileLabel returns the SELinux label for this path, following symlinks,
-// or returns an error.
-func fileLabel(fpath string) (string, error) {
- if fpath == "" {
- return "", ErrEmptyPath
- }
-
- label, err := getxattr(fpath, xattrNameSelinux)
- if err != nil {
- return "", &os.PathError{Op: "getxattr", Path: fpath, Err: err}
- }
- // Trim the NUL byte at the end of the byte buffer, if present.
- if len(label) > 0 && label[len(label)-1] == '\x00' {
- label = label[:len(label)-1]
- }
- return string(label), nil
-}
-
-// lFileLabel returns the SELinux label for this path, not following symlinks,
-// or returns an error.
-func lFileLabel(fpath string) (string, error) {
- if fpath == "" {
- return "", ErrEmptyPath
- }
-
- label, err := lgetxattr(fpath, xattrNameSelinux)
- if err != nil {
- return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err}
- }
- // Trim the NUL byte at the end of the byte buffer, if present.
- if len(label) > 0 && label[len(label)-1] == '\x00' {
- label = label[:len(label)-1]
- }
- return string(label), nil
-}
-
-// setFSCreateLabel tells kernel the label to create all file system objects
-// created by this task. Setting label="" to return to default.
-func setFSCreateLabel(label string) error {
- return writeAttr("fscreate", label)
-}
-
-// fsCreateLabel returns the default label the kernel which the kernel is using
-// for file system objects created by this task. "" indicates default.
-func fsCreateLabel() (string, error) {
- return readAttr("fscreate")
-}
-
-// currentLabel returns the SELinux label of the current process thread, or an error.
-func currentLabel() (string, error) {
- return readAttr("current")
-}
-
-// pidLabel returns the SELinux label of the given pid, or an error.
-func pidLabel(pid int) (string, error) {
- return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
-}
-
-// ExecLabel returns the SELinux label that the kernel will use for any programs
-// that are executed by the current process thread, or an error.
-func execLabel() (string, error) {
- return readAttr("exec")
-}
-
-func writeCon(fpath, val string) error {
- if fpath == "" {
- return ErrEmptyPath
- }
- if val == "" {
- if !getEnabled() {
- return nil
- }
- }
-
- out, err := os.OpenFile(fpath, os.O_WRONLY, 0)
- if err != nil {
- return err
- }
- defer out.Close()
-
- if err := isProcHandle(out); err != nil {
- return err
- }
-
- if val != "" {
- _, err = out.Write([]byte(val))
- } else {
- _, err = out.Write(nil)
- }
- if err != nil {
- return err
- }
- return nil
-}
-
-func attrPath(attr string) string {
- // Linux >= 3.17 provides this
- const threadSelfPrefix = "/proc/thread-self/attr"
-
- attrPathOnce.Do(func() {
- st, err := os.Stat(threadSelfPrefix)
- if err == nil && st.Mode().IsDir() {
- haveThreadSelf = true
- }
- })
-
- if haveThreadSelf {
- return path.Join(threadSelfPrefix, attr)
- }
-
- return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr)
-}
-
-func readAttr(attr string) (string, error) {
- return readCon(attrPath(attr))
-}
-
-func writeAttr(attr, val string) error {
- return writeCon(attrPath(attr), val)
-}
-
-// canonicalizeContext takes a context string and writes it to the kernel
-// the function then returns the context that the kernel will use. Use this
-// function to check if two contexts are equivalent
-func canonicalizeContext(val string) (string, error) {
- return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val)
-}
-
-// computeCreateContext requests the type transition from source to target for
-// class from the kernel.
-func computeCreateContext(source string, target string, class string) (string, error) {
- classidx, err := classIndex(class)
- if err != nil {
- return "", err
- }
-
- return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx))
-}
-
-// catsToBitset stores categories in a bitset.
-func catsToBitset(cats string) (*big.Int, error) {
- bitset := new(big.Int)
-
- catlist := strings.Split(cats, ",")
- for _, r := range catlist {
- ranges := strings.SplitN(r, ".", 2)
- if len(ranges) > 1 {
- catstart, err := parseLevelItem(ranges[0], category)
- if err != nil {
- return nil, err
- }
- catend, err := parseLevelItem(ranges[1], category)
- if err != nil {
- return nil, err
- }
- for i := catstart; i <= catend; i++ {
- bitset.SetBit(bitset, int(i), 1)
- }
- } else {
- cat, err := parseLevelItem(ranges[0], category)
- if err != nil {
- return nil, err
- }
- bitset.SetBit(bitset, int(cat), 1)
- }
- }
-
- return bitset, nil
-}
-
-// parseLevelItem parses and verifies that a sensitivity or category are valid
-func parseLevelItem(s string, sep levelItem) (uint, error) {
- if len(s) < minSensLen || levelItem(s[0]) != sep {
- return 0, ErrLevelSyntax
- }
- val, err := strconv.ParseUint(s[1:], 10, 32)
- if err != nil {
- return 0, err
- }
-
- return uint(val), nil
-}
-
-// parseLevel fills a level from a string that contains
-// a sensitivity and categories
-func (l *level) parseLevel(levelStr string) error {
- lvl := strings.SplitN(levelStr, ":", 2)
- sens, err := parseLevelItem(lvl[0], sensitivity)
- if err != nil {
- return fmt.Errorf("failed to parse sensitivity: %w", err)
- }
- l.sens = sens
- if len(lvl) > 1 {
- cats, err := catsToBitset(lvl[1])
- if err != nil {
- return fmt.Errorf("failed to parse categories: %w", err)
- }
- l.cats = cats
- }
-
- return nil
-}
-
-// rangeStrToMLSRange marshals a string representation of a range.
-func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
- mlsRange := &mlsRange{}
- levelSlice := strings.SplitN(rangeStr, "-", 2)
-
- switch len(levelSlice) {
- // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023
- case 2:
- mlsRange.high = &level{}
- if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil {
- return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err)
- }
- fallthrough
- // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023
- case 1:
- mlsRange.low = &level{}
- if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil {
- return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err)
- }
- }
-
- if mlsRange.high == nil {
- mlsRange.high = mlsRange.low
- }
-
- return mlsRange, nil
-}
-
-// bitsetToStr takes a category bitset and returns it in the
-// canonical selinux syntax
-func bitsetToStr(c *big.Int) string {
- var str string
-
- length := 0
- for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ {
- if c.Bit(i) == 0 {
- continue
- }
- if length == 0 {
- if str != "" {
- str += ","
- }
- str += "c" + strconv.Itoa(i)
- }
- if c.Bit(i+1) == 1 {
- length++
- continue
- }
- if length == 1 {
- str += ",c" + strconv.Itoa(i)
- } else if length > 1 {
- str += ".c" + strconv.Itoa(i)
- }
- length = 0
- }
-
- return str
-}
-
-func (l1 *level) equal(l2 *level) bool {
- if l2 == nil || l1 == nil {
- return l1 == l2
- }
- if l1.sens != l2.sens {
- return false
- }
- if l2.cats == nil || l1.cats == nil {
- return l2.cats == l1.cats
- }
- return l1.cats.Cmp(l2.cats) == 0
-}
-
-// String returns an mlsRange as a string.
-func (m mlsRange) String() string {
- low := "s" + strconv.Itoa(int(m.low.sens))
- if m.low.cats != nil && m.low.cats.BitLen() > 0 {
- low += ":" + bitsetToStr(m.low.cats)
- }
-
- if m.low.equal(m.high) {
- return low
- }
-
- high := "s" + strconv.Itoa(int(m.high.sens))
- if m.high.cats != nil && m.high.cats.BitLen() > 0 {
- high += ":" + bitsetToStr(m.high.cats)
- }
-
- return low + "-" + high
-}
-
-func max(a, b uint) uint {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b uint) uint {
- if a < b {
- return a
- }
- return b
-}
-
-// calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound)
-// of a source and target range.
-// The glblub is calculated as the greater of the low sensitivities and
-// the lower of the high sensitivities and the and of each category bitset.
-func calculateGlbLub(sourceRange, targetRange string) (string, error) {
- s, err := rangeStrToMLSRange(sourceRange)
- if err != nil {
- return "", err
- }
- t, err := rangeStrToMLSRange(targetRange)
- if err != nil {
- return "", err
- }
-
- if s.high.sens < t.low.sens || t.high.sens < s.low.sens {
- /* these ranges have no common sensitivities */
- return "", ErrIncomparable
- }
-
- outrange := &mlsRange{low: &level{}, high: &level{}}
-
- /* take the greatest of the low */
- outrange.low.sens = max(s.low.sens, t.low.sens)
-
- /* take the least of the high */
- outrange.high.sens = min(s.high.sens, t.high.sens)
-
- /* find the intersecting categories */
- if s.low.cats != nil && t.low.cats != nil {
- outrange.low.cats = new(big.Int)
- outrange.low.cats.And(s.low.cats, t.low.cats)
- }
- if s.high.cats != nil && t.high.cats != nil {
- outrange.high.cats = new(big.Int)
- outrange.high.cats.And(s.high.cats, t.high.cats)
- }
-
- return outrange.String(), nil
-}
-
-func readWriteCon(fpath string, val string) (string, error) {
- if fpath == "" {
- return "", ErrEmptyPath
- }
- f, err := os.OpenFile(fpath, os.O_RDWR, 0)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- _, err = f.Write([]byte(val))
- if err != nil {
- return "", err
- }
-
- return readConFd(f)
-}
-
-// setExecLabel sets the SELinux label that the kernel will use for any programs
-// that are executed by the current process thread, or an error.
-func setExecLabel(label string) error {
- return writeAttr("exec", label)
-}
-
-// setTaskLabel sets the SELinux label for the current thread, or an error.
-// This requires the dyntransition permission.
-func setTaskLabel(label string) error {
- return writeAttr("current", label)
-}
-
-// setSocketLabel takes a process label and tells the kernel to assign the
-// label to the next socket that gets created
-func setSocketLabel(label string) error {
- return writeAttr("sockcreate", label)
-}
-
-// socketLabel retrieves the current socket label setting
-func socketLabel() (string, error) {
- return readAttr("sockcreate")
-}
-
-// peerLabel retrieves the label of the client on the other side of a socket
-func peerLabel(fd uintptr) (string, error) {
- label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
- if err != nil {
- return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err}
- }
- return label, nil
-}
-
-// setKeyLabel takes a process label and tells the kernel to assign the
-// label to the next kernel keyring that gets created
-func setKeyLabel(label string) error {
- err := writeCon("/proc/self/attr/keycreate", label)
- if errors.Is(err, os.ErrNotExist) {
- return nil
- }
- if label == "" && errors.Is(err, os.ErrPermission) {
- return nil
- }
- return err
-}
-
-// keyLabel retrieves the current kernel keyring label setting
-func keyLabel() (string, error) {
- return readCon("/proc/self/attr/keycreate")
-}
-
-// get returns the Context as a string
-func (c Context) get() string {
- if level := c["level"]; level != "" {
- return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level
- }
- return c["user"] + ":" + c["role"] + ":" + c["type"]
-}
-
-// newContext creates a new Context struct from the specified label
-func newContext(label string) (Context, error) {
- c := make(Context)
-
- if len(label) != 0 {
- con := strings.SplitN(label, ":", 4)
- if len(con) < 3 {
- return c, InvalidLabel
- }
- c["user"] = con[0]
- c["role"] = con[1]
- c["type"] = con[2]
- if len(con) > 3 {
- c["level"] = con[3]
- }
- }
- return c, nil
-}
-
-// clearLabels clears all reserved labels
-func clearLabels() {
- state.Lock()
- state.mcsList = make(map[string]bool)
- state.Unlock()
-}
-
-// reserveLabel reserves the MLS/MCS level component of the specified label
-func reserveLabel(label string) {
- if len(label) != 0 {
- con := strings.SplitN(label, ":", 4)
- if len(con) > 3 {
- _ = mcsAdd(con[3])
- }
- }
-}
-
-func selinuxEnforcePath() string {
- return path.Join(getSelinuxMountPoint(), "enforce")
-}
-
-// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
-func enforceMode() int {
- var enforce int
-
- enforceB, err := ioutil.ReadFile(selinuxEnforcePath())
- if err != nil {
- return -1
- }
- enforce, err = strconv.Atoi(string(enforceB))
- if err != nil {
- return -1
- }
- return enforce
-}
-
-// setEnforceMode sets the current SELinux mode Enforcing, Permissive.
-// Disabled is not valid, since this needs to be set at boot time.
-func setEnforceMode(mode int) error {
- return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
-}
-
-// defaultEnforceMode returns the systems default SELinux mode Enforcing,
-// Permissive or Disabled. Note this is is just the default at boot time.
-// EnforceMode tells you the systems current mode.
-func defaultEnforceMode() int {
- switch readConfig(selinuxTag) {
- case "enforcing":
- return Enforcing
- case "permissive":
- return Permissive
- }
- return Disabled
-}
-
-func mcsAdd(mcs string) error {
- if mcs == "" {
- return nil
- }
- state.Lock()
- defer state.Unlock()
- if state.mcsList[mcs] {
- return ErrMCSAlreadyExists
- }
- state.mcsList[mcs] = true
- return nil
-}
-
-func mcsDelete(mcs string) {
- if mcs == "" {
- return
- }
- state.Lock()
- defer state.Unlock()
- state.mcsList[mcs] = false
-}
-
-func intToMcs(id int, catRange uint32) string {
- var (
- SETSIZE = int(catRange)
- TIER = SETSIZE
- ORD = id
- )
-
- if id < 1 || id > 523776 {
- return ""
- }
-
- for ORD > TIER {
- ORD -= TIER
- TIER--
- }
- TIER = SETSIZE - TIER
- ORD += TIER
- return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
-}
-
-func uniqMcs(catRange uint32) string {
- var (
- n uint32
- c1, c2 uint32
- mcs string
- )
-
- for {
- _ = binary.Read(rand.Reader, binary.LittleEndian, &n)
- c1 = n % catRange
- _ = binary.Read(rand.Reader, binary.LittleEndian, &n)
- c2 = n % catRange
- if c1 == c2 {
- continue
- } else if c1 > c2 {
- c1, c2 = c2, c1
- }
- mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
- if err := mcsAdd(mcs); err != nil {
- continue
- }
- break
- }
- return mcs
-}
-
-// releaseLabel un-reserves the MLS/MCS Level field of the specified label,
-// allowing it to be used by another process.
-func releaseLabel(label string) {
- if len(label) != 0 {
- con := strings.SplitN(label, ":", 4)
- if len(con) > 3 {
- mcsDelete(con[3])
- }
- }
-}
-
-// roFileLabel returns the specified SELinux readonly file label
-func roFileLabel() string {
- return readOnlyFileLabel
-}
-
-func openContextFile() (*os.File, error) {
- if f, err := os.Open(contextFile); err == nil {
- return f, nil
- }
- return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts"))
-}
-
-func loadLabels() {
- labels = make(map[string]string)
- in, err := openContextFile()
- if err != nil {
- return
- }
- defer in.Close()
-
- scanner := bufio.NewScanner(in)
-
- for scanner.Scan() {
- line := bytes.TrimSpace(scanner.Bytes())
- if len(line) == 0 {
- // Skip blank lines
- continue
- }
- if line[0] == ';' || line[0] == '#' {
- // Skip comments
- continue
- }
- fields := bytes.SplitN(line, []byte{'='}, 2)
- if len(fields) != 2 {
- continue
- }
- key, val := bytes.TrimSpace(fields[0]), bytes.TrimSpace(fields[1])
- labels[string(key)] = string(bytes.Trim(val, `"`))
- }
-
- con, _ := NewContext(labels["file"])
- con["level"] = fmt.Sprintf("s0:c%d,c%d", maxCategory-2, maxCategory-1)
- privContainerMountLabel = con.get()
- reserveLabel(privContainerMountLabel)
-}
-
-func label(key string) string {
- loadLabelsOnce.Do(func() {
- loadLabels()
- })
- return labels[key]
-}
-
-// kvmContainerLabels returns the default processLabel and mountLabel to be used
-// for kvm containers by the calling process.
-func kvmContainerLabels() (string, string) {
- processLabel := label("kvm_process")
- if processLabel == "" {
- processLabel = label("process")
- }
-
- return addMcs(processLabel, label("file"))
-}
-
-// initContainerLabels returns the default processLabel and file labels to be
-// used for containers running an init system like systemd by the calling process.
-func initContainerLabels() (string, string) {
- processLabel := label("init_process")
- if processLabel == "" {
- processLabel = label("process")
- }
-
- return addMcs(processLabel, label("file"))
-}
-
-// containerLabels returns an allocated processLabel and fileLabel to be used for
-// container labeling by the calling process.
-func containerLabels() (processLabel string, fileLabel string) {
- if !getEnabled() {
- return "", ""
- }
-
- processLabel = label("process")
- fileLabel = label("file")
- readOnlyFileLabel = label("ro_file")
-
- if processLabel == "" || fileLabel == "" {
- return "", fileLabel
- }
-
- if readOnlyFileLabel == "" {
- readOnlyFileLabel = fileLabel
- }
-
- return addMcs(processLabel, fileLabel)
-}
-
-func addMcs(processLabel, fileLabel string) (string, string) {
- scon, _ := NewContext(processLabel)
- if scon["level"] != "" {
- mcs := uniqMcs(CategoryRange)
- scon["level"] = mcs
- processLabel = scon.Get()
- scon, _ = NewContext(fileLabel)
- scon["level"] = mcs
- fileLabel = scon.Get()
- }
- return processLabel, fileLabel
-}
-
-// securityCheckContext validates that the SELinux label is understood by the kernel
-func securityCheckContext(val string) error {
- return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
-}
-
-// copyLevel returns a label with the MLS/MCS level from src label replaced on
-// the dest label.
-func copyLevel(src, dest string) (string, error) {
- if src == "" {
- return "", nil
- }
- if err := SecurityCheckContext(src); err != nil {
- return "", err
- }
- if err := SecurityCheckContext(dest); err != nil {
- return "", err
- }
- scon, err := NewContext(src)
- if err != nil {
- return "", err
- }
- tcon, err := NewContext(dest)
- if err != nil {
- return "", err
- }
- mcsDelete(tcon["level"])
- _ = mcsAdd(scon["level"])
- tcon["level"] = scon["level"]
- return tcon.Get(), nil
-}
-
-// Prevent users from relabeling system files
-func badPrefix(fpath string) error {
- if fpath == "" {
- return ErrEmptyPath
- }
-
- badPrefixes := []string{"/usr"}
- for _, prefix := range badPrefixes {
- if strings.HasPrefix(fpath, prefix) {
- return fmt.Errorf("relabeling content in %s is not allowed", prefix)
- }
- }
- return nil
-}
-
-// chcon changes the fpath file object to the SELinux label label.
-// If fpath is a directory and recurse is true, then chcon walks the
-// directory tree setting the label.
-func chcon(fpath string, label string, recurse bool) error {
- if fpath == "" {
- return ErrEmptyPath
- }
- if label == "" {
- return nil
- }
- if err := badPrefix(fpath); err != nil {
- return err
- }
-
- if !recurse {
- return setFileLabel(fpath, label)
- }
-
- return rchcon(fpath, label)
-}
-
-// dupSecOpt takes an SELinux process label and returns security options that
-// can be used to set the SELinux Type and Level for future container processes.
-func dupSecOpt(src string) ([]string, error) {
- if src == "" {
- return nil, nil
- }
- con, err := NewContext(src)
- if err != nil {
- return nil, err
- }
- if con["user"] == "" ||
- con["role"] == "" ||
- con["type"] == "" {
- return nil, nil
- }
- dup := []string{
- "user:" + con["user"],
- "role:" + con["role"],
- "type:" + con["type"],
- }
-
- if con["level"] != "" {
- dup = append(dup, "level:"+con["level"])
- }
-
- return dup, nil
-}
-
-// disableSecOpt returns a security opt that can be used to disable SELinux
-// labeling support for future container processes.
-func disableSecOpt() []string {
- return []string{"disable"}
-}
-
-// findUserInContext scans the reader for a valid SELinux context
-// match that is verified with the verifier. Invalid contexts are
-// skipped. It returns a matched context or an empty string if no
-// match is found. If a scanner error occurs, it is returned.
-func findUserInContext(context Context, r io.Reader, verifier func(string) error) (string, error) {
- fromRole := context["role"]
- fromType := context["type"]
- scanner := bufio.NewScanner(r)
-
- for scanner.Scan() {
- fromConns := strings.Fields(scanner.Text())
- if len(fromConns) == 0 {
- // Skip blank lines
- continue
- }
-
- line := fromConns[0]
-
- if line[0] == ';' || line[0] == '#' {
- // Skip comments
- continue
- }
-
- // user context files contexts are formatted as
- // role_r:type_t:s0 where the user is missing.
- lineArr := strings.SplitN(line, ":", 4)
- // skip context with typo, or role and type do not match
- if len(lineArr) != 3 ||
- lineArr[0] != fromRole ||
- lineArr[1] != fromType {
- continue
- }
-
- for _, cc := range fromConns[1:] {
- toConns := strings.SplitN(cc, ":", 4)
- if len(toConns) != 3 {
- continue
- }
-
- context["role"] = toConns[0]
- context["type"] = toConns[1]
-
- outConn := context.get()
- if err := verifier(outConn); err != nil {
- continue
- }
-
- return outConn, nil
- }
- }
- if err := scanner.Err(); err != nil {
- return "", fmt.Errorf("failed to scan for context: %w", err)
- }
-
- return "", nil
-}
-
-func getDefaultContextFromReaders(c *defaultSECtx) (string, error) {
- if c.verifier == nil {
- return "", ErrVerifierNil
- }
-
- context, err := newContext(c.scon)
- if err != nil {
- return "", fmt.Errorf("failed to create label for %s: %w", c.scon, err)
- }
-
- // set so the verifier validates the matched context with the provided user and level.
- context["user"] = c.user
- context["level"] = c.level
-
- conn, err := findUserInContext(context, c.userRdr, c.verifier)
- if err != nil {
- return "", err
- }
-
- if conn != "" {
- return conn, nil
- }
-
- conn, err = findUserInContext(context, c.defaultRdr, c.verifier)
- if err != nil {
- return "", err
- }
-
- if conn != "" {
- return conn, nil
- }
-
- return "", fmt.Errorf("context %q not found: %w", c.scon, ErrContextMissing)
-}
-
-func getDefaultContextWithLevel(user, level, scon string) (string, error) {
- userPath := filepath.Join(policyRoot(), selinuxUsersDir, user)
- fu, err := os.Open(userPath)
- if err != nil {
- return "", err
- }
- defer fu.Close()
-
- defaultPath := filepath.Join(policyRoot(), defaultContexts)
- fd, err := os.Open(defaultPath)
- if err != nil {
- return "", err
- }
- defer fd.Close()
-
- c := defaultSECtx{
- user: user,
- level: level,
- scon: scon,
- userRdr: fu,
- defaultRdr: fd,
- verifier: securityCheckContext,
- }
-
- return getDefaultContextFromReaders(&c)
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
deleted file mode 100644
index 78743b020c..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// +build !linux
-
-package selinux
-
-func setDisabled() {
-}
-
-func getEnabled() bool {
- return false
-}
-
-func classIndex(class string) (int, error) {
- return -1, nil
-}
-
-func setFileLabel(fpath string, label string) error {
- return nil
-}
-
-func lSetFileLabel(fpath string, label string) error {
- return nil
-}
-
-func fileLabel(fpath string) (string, error) {
- return "", nil
-}
-
-func lFileLabel(fpath string) (string, error) {
- return "", nil
-}
-
-func setFSCreateLabel(label string) error {
- return nil
-}
-
-func fsCreateLabel() (string, error) {
- return "", nil
-}
-
-func currentLabel() (string, error) {
- return "", nil
-}
-
-func pidLabel(pid int) (string, error) {
- return "", nil
-}
-
-func execLabel() (string, error) {
- return "", nil
-}
-
-func canonicalizeContext(val string) (string, error) {
- return "", nil
-}
-
-func computeCreateContext(source string, target string, class string) (string, error) {
- return "", nil
-}
-
-func calculateGlbLub(sourceRange, targetRange string) (string, error) {
- return "", nil
-}
-
-func setExecLabel(label string) error {
- return nil
-}
-
-func setTaskLabel(label string) error {
- return nil
-}
-
-func setSocketLabel(label string) error {
- return nil
-}
-
-func socketLabel() (string, error) {
- return "", nil
-}
-
-func peerLabel(fd uintptr) (string, error) {
- return "", nil
-}
-
-func setKeyLabel(label string) error {
- return nil
-}
-
-func keyLabel() (string, error) {
- return "", nil
-}
-
-func (c Context) get() string {
- return ""
-}
-
-func newContext(label string) (Context, error) {
- c := make(Context)
- return c, nil
-}
-
-func clearLabels() {
-}
-
-func reserveLabel(label string) {
-}
-
-func enforceMode() int {
- return Disabled
-}
-
-func setEnforceMode(mode int) error {
- return nil
-}
-
-func defaultEnforceMode() int {
- return Disabled
-}
-
-func releaseLabel(label string) {
-}
-
-func roFileLabel() string {
- return ""
-}
-
-func kvmContainerLabels() (string, string) {
- return "", ""
-}
-
-func initContainerLabels() (string, string) {
- return "", ""
-}
-
-func containerLabels() (processLabel string, fileLabel string) {
- return "", ""
-}
-
-func securityCheckContext(val string) error {
- return nil
-}
-
-func copyLevel(src, dest string) (string, error) {
- return "", nil
-}
-
-func chcon(fpath string, label string, recurse bool) error {
- return nil
-}
-
-func dupSecOpt(src string) ([]string, error) {
- return nil, nil
-}
-
-func disableSecOpt() []string {
- return []string{"disable"}
-}
-
-func getDefaultContextWithLevel(user, level, scon string) (string, error) {
- return "", nil
-}
-
-func label(_ string) string {
- return ""
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/etcd/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
deleted file mode 100644
index 9e473ca168..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package selinux
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// lgetxattr returns a []byte slice containing the value of
-// an extended attribute attr set for path.
-func lgetxattr(path, attr string) ([]byte, error) {
- // Start with a 128 length byte array
- dest := make([]byte, 128)
- sz, errno := doLgetxattr(path, attr, dest)
- for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare
- // Buffer too small, use zero-sized buffer to get the actual size
- sz, errno = doLgetxattr(path, attr, []byte{})
- if errno != nil {
- return nil, errno
- }
-
- dest = make([]byte, sz)
- sz, errno = doLgetxattr(path, attr, dest)
- }
- if errno != nil {
- return nil, errno
- }
-
- return dest[:sz], nil
-}
-
-// doLgetxattr is a wrapper that retries on EINTR
-func doLgetxattr(path, attr string, dest []byte) (int, error) {
- for {
- sz, err := unix.Lgetxattr(path, attr, dest)
- if err != unix.EINTR { //nolint:errorlint // unix errors are bare
- return sz, err
- }
- }
-}
-
-// getxattr returns a []byte slice containing the value of
-// an extended attribute attr set for path.
-func getxattr(path, attr string) ([]byte, error) {
- // Start with a 128 length byte array
- dest := make([]byte, 128)
- sz, errno := dogetxattr(path, attr, dest)
- for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare
- // Buffer too small, use zero-sized buffer to get the actual size
- sz, errno = dogetxattr(path, attr, []byte{})
- if errno != nil {
- return nil, errno
- }
-
- dest = make([]byte, sz)
- sz, errno = dogetxattr(path, attr, dest)
- }
- if errno != nil {
- return nil, errno
- }
-
- return dest[:sz], nil
-}
-
-// dogetxattr is a wrapper that retries on EINTR
-func dogetxattr(path, attr string, dest []byte) (int, error) {
- for {
- sz, err := unix.Getxattr(path, attr, dest)
- if err != unix.EINTR { //nolint:errorlint // unix errors are bare
- return sz, err
- }
- }
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
deleted file mode 100644
index 7e78dce015..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## pwalk: parallel implementation of filepath.Walk
-
-This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk)
-which may speed it up by calling multiple callback functions (WalkFunc) in parallel,
-utilizing goroutines.
-
-By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
-This can be changed by using WalkN function which has the additional
-parameter, specifying the number of goroutines (concurrency).
-
-### pwalk vs pwalkdir
-
-This package is deprecated in favor of
-[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
-which is faster, but requires at least Go 1.16.
-
-### Caveats
-
-Please note the following limitations of this code:
-
-* Unlike filepath.Walk, the order of calls is non-deterministic;
-
-* Only primitive error handling is supported:
-
- * filepath.SkipDir is not supported;
-
- * no errors are ever passed to WalkFunc;
-
- * once any error is returned from any WalkFunc instance, no more new calls
- to WalkFunc are made, and the error is returned to the caller of Walk;
-
- * if more than one walkFunc instance will return an error, only one
- of such errors will be propagated and returned by Walk, others
- will be silently discarded.
-
-### Documentation
-
-For the official documentation, see
-https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc
-
-### Benchmarks
-
-For a WalkFunc that consists solely of the return statement, this
-implementation is about 10% slower than the standard library's
-filepath.Walk.
-
-Otherwise (if a WalkFunc is doing something) this is usually faster,
-except when the WalkN(..., 1) is used.
diff --git a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
deleted file mode 100644
index 202c80da59..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package pwalk
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "sync"
-)
-
-type WalkFunc = filepath.WalkFunc
-
-// Walk is a wrapper for filepath.Walk which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// twice the runtime.NumCPU() walkFn will be called at any one time.
-// If you want to change the maximum, use WalkN instead.
-//
-// The order of calls is non-deterministic.
-//
-// Note that this implementation only supports primitive error handling:
-//
-// - no errors are ever passed to walkFn;
-//
-// - once a walkFn returns any error, all further processing stops
-// and the error is returned to the caller of Walk;
-//
-// - filepath.SkipDir is not supported;
-//
-// - if more than one walkFn instance will return an error, only one
-// of such errors will be propagated and returned by Walk, others
-// will be silently discarded.
-func Walk(root string, walkFn WalkFunc) error {
- return WalkN(root, walkFn, runtime.NumCPU()*2)
-}
-
-// WalkN is a wrapper for filepath.Walk which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// num walkFn will be called at any one time.
-//
-// Please see Walk documentation for caveats of using this function.
-func WalkN(root string, walkFn WalkFunc, num int) error {
- // make sure limit is sensible
- if num < 1 {
- return fmt.Errorf("walk(%q): num must be > 0", root)
- }
-
- files := make(chan *walkArgs, 2*num)
- errCh := make(chan error, 1) // get the first error, ignore others
-
- // Start walking a tree asap
- var (
- err error
- wg sync.WaitGroup
-
- rootLen = len(root)
- rootEntry *walkArgs
- )
- wg.Add(1)
- go func() {
- err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error {
- if err != nil {
- close(files)
- return err
- }
- if len(p) == rootLen {
- // Root entry is processed separately below.
- rootEntry = &walkArgs{path: p, info: &info}
- return nil
- }
- // add a file to the queue unless a callback sent an error
- select {
- case e := <-errCh:
- close(files)
- return e
- default:
- files <- &walkArgs{path: p, info: &info}
- return nil
- }
- })
- if err == nil {
- close(files)
- }
- wg.Done()
- }()
-
- wg.Add(num)
- for i := 0; i < num; i++ {
- go func() {
- for file := range files {
- if e := walkFn(file.path, *file.info, nil); e != nil {
- select {
- case errCh <- e: // sent ok
- default: // buffer full
- }
- }
- }
- wg.Done()
- }()
- }
-
- wg.Wait()
-
- if err == nil {
- err = walkFn(rootEntry.path, *rootEntry.info, nil)
- }
-
- return err
-}
-
-// walkArgs holds the arguments that were passed to the Walk or WalkN
-// functions.
-type walkArgs struct {
- path string
- info *os.FileInfo
-}
diff --git a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
deleted file mode 100644
index 068ac40056..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## pwalkdir: parallel implementation of filepath.WalkDir
-
-This is a wrapper for [filepath.WalkDir](https://pkg.go.dev/path/filepath#WalkDir)
-which may speed it up by calling multiple callback functions (WalkDirFunc)
-in parallel, utilizing goroutines.
-
-By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
-This can be changed by using WalkN function which has the additional
-parameter, specifying the number of goroutines (concurrency).
-
-### pwalk vs pwalkdir
-
-This package is very similar to
-[pwalk](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
-but utilizes `filepath.WalkDir` (added to Go 1.16), which does not call stat(2)
-on every entry and is therefore faster (up to 3x, depending on usage scenario).
-
-Users who are OK with requiring Go 1.16+ should switch to this
-implementation.
-
-### Caveats
-
-Please note the following limitations of this code:
-
-* Unlike filepath.WalkDir, the order of calls is non-deterministic;
-
-* Only primitive error handling is supported:
-
- * fs.SkipDir is not supported;
-
- * no errors are ever passed to WalkDirFunc;
-
- * once any error is returned from any walkDirFunc instance, no more calls
- to WalkDirFunc are made, and the error is returned to the caller of WalkDir;
-
- * if more than one WalkDirFunc instance will return an error, only one
- of such errors will be propagated to and returned by WalkDir, others
- will be silently discarded.
-
-### Documentation
-
-For the official documentation, see
-https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir
-
-### Benchmarks
-
-For a WalkDirFunc that consists solely of the return statement, this
-implementation is about 15% slower than the standard library's
-filepath.WalkDir.
-
-Otherwise (if a WalkDirFunc is actually doing something) this is usually
-faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .`
-to see how different operations can benefit from it, as well as how the
-level of paralellism affects the speed.
diff --git a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
deleted file mode 100644
index a5796b2c4f..0000000000
--- a/etcd/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
+++ /dev/null
@@ -1,116 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-package pwalkdir
-
-import (
- "fmt"
- "io/fs"
- "path/filepath"
- "runtime"
- "sync"
-)
-
-// Walk is a wrapper for filepath.WalkDir which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// twice the runtime.NumCPU() walkFn will be called at any one time.
-// If you want to change the maximum, use WalkN instead.
-//
-// The order of calls is non-deterministic.
-//
-// Note that this implementation only supports primitive error handling:
-//
-// - no errors are ever passed to walkFn;
-//
-// - once a walkFn returns any error, all further processing stops
-// and the error is returned to the caller of Walk;
-//
-// - filepath.SkipDir is not supported;
-//
-// - if more than one walkFn instance will return an error, only one
-// of such errors will be propagated and returned by Walk, others
-// will be silently discarded.
-func Walk(root string, walkFn fs.WalkDirFunc) error {
- return WalkN(root, walkFn, runtime.NumCPU()*2)
-}
-
-// WalkN is a wrapper for filepath.WalkDir which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// num walkFn will be called at any one time.
-//
-// Please see Walk documentation for caveats of using this function.
-func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
- // make sure limit is sensible
- if num < 1 {
- return fmt.Errorf("walk(%q): num must be > 0", root)
- }
-
- files := make(chan *walkArgs, 2*num)
- errCh := make(chan error, 1) // Get the first error, ignore others.
-
- // Start walking a tree asap.
- var (
- err error
- wg sync.WaitGroup
-
- rootLen = len(root)
- rootEntry *walkArgs
- )
- wg.Add(1)
- go func() {
- err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error {
- if err != nil {
- close(files)
- return err
- }
- if len(p) == rootLen {
- // Root entry is processed separately below.
- rootEntry = &walkArgs{path: p, entry: entry}
- return nil
- }
- // Add a file to the queue unless a callback sent an error.
- select {
- case e := <-errCh:
- close(files)
- return e
- default:
- files <- &walkArgs{path: p, entry: entry}
- return nil
- }
- })
- if err == nil {
- close(files)
- }
- wg.Done()
- }()
-
- wg.Add(num)
- for i := 0; i < num; i++ {
- go func() {
- for file := range files {
- if e := walkFn(file.path, file.entry, nil); e != nil {
- select {
- case errCh <- e: // sent ok
- default: // buffer full
- }
- }
- }
- wg.Done()
- }()
- }
-
- wg.Wait()
-
- if err == nil {
- err = walkFn(rootEntry.path, rootEntry.entry, nil)
- }
-
- return err
-}
-
-// walkArgs holds the arguments that were passed to the Walk or WalkN
-// functions.
-type walkArgs struct {
- path string
- entry fs.DirEntry
-}
diff --git a/etcd/vendor/github.com/openshift/library-go/LICENSE b/etcd/vendor/github.com/openshift/library-go/LICENSE
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/etcd/vendor/github.com/openshift/library-go/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go b/etcd/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go
deleted file mode 100644
index a5cf8f99a5..0000000000
--- a/etcd/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package hardcodedauthorizer
-
-import (
- "context"
-
- "k8s.io/apiserver/pkg/authorization/authorizer"
-)
-
-type metricsAuthorizer struct{}
-
-// GetUser() user.Info - checked
-// GetVerb() string - checked
-// IsReadOnly() bool - na
-// GetNamespace() string - na
-// GetResource() string - na
-// GetSubresource() string - na
-// GetName() string - na
-// GetAPIGroup() string - na
-// GetAPIVersion() string - na
-// IsResourceRequest() bool - checked
-// GetPath() string - checked
-func (metricsAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
- if a.GetUser().GetName() != "system:serviceaccount:openshift-monitoring:prometheus-k8s" {
- return authorizer.DecisionNoOpinion, "", nil
- }
- if !a.IsResourceRequest() &&
- a.GetVerb() == "get" &&
- a.GetPath() == "/metrics" {
- return authorizer.DecisionAllow, "requesting metrics is allowed", nil
- }
-
- return authorizer.DecisionNoOpinion, "", nil
-}
-
-// NewHardCodedMetricsAuthorizer returns a hardcoded authorizer for checking metrics.
-func NewHardCodedMetricsAuthorizer() *metricsAuthorizer {
- return new(metricsAuthorizer)
-}
diff --git a/etcd/vendor/github.com/openshift/microshift/pkg/config/config.go b/etcd/vendor/github.com/openshift/microshift/pkg/config/config.go
index cc2c3247fd..df2ad6d6da 100644
--- a/etcd/vendor/github.com/openshift/microshift/pkg/config/config.go
+++ b/etcd/vendor/github.com/openshift/microshift/pkg/config/config.go
@@ -20,7 +20,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/logs"
"k8s.io/klog/v2"
- ctrl "k8s.io/kubernetes/pkg/controlplane"
"sigs.k8s.io/yaml"
"github.com/openshift/microshift/pkg/util"
@@ -59,6 +58,8 @@ type IngressConfig struct {
}
type EtcdConfig struct {
+ // Set a memory limit, in megabytes, on the etcd process; etcd will begin paging memory when it gets to this value. 0 means no limit.
+ MemoryLimit uint64
// The limit on the size of the etcd database; etcd will start failing writes if its size on disk reaches this value
QuotaBackendBytes int64
// If the backend is fragmented more than `maxFragmentedPercentage`
@@ -100,6 +101,17 @@ type Config struct {
Node Node `json:"node"`
ApiServer ApiServer `json:"apiServer"`
Debugging Debugging `json:"debugging"`
+ Etcd Etcd `json:"etcd"`
+}
+
+const (
+ // Etcd performance degrades significantly if the memory available is less than 128MB, enfore this minimum.
+ EtcdMinimumMemoryLimit = 128
+)
+
+type Etcd struct {
+ // Set a memory limit, in megabytes, on the etcd process; etcd will begin paging memory when it gets to this value. 0 means no limit.
+ MemoryLimitMB uint64 `json:"memoryLimitMB"`
}
type Network struct {
@@ -229,7 +241,7 @@ func NewMicroshiftConfig() *MicroshiftConfig {
return &MicroshiftConfig{
LogVLevel: 2,
SubjectAltNames: subjectAltNames,
- NodeName: nodeName,
+ NodeName: strings.ToLower(nodeName),
NodeIP: nodeIP,
BaseDomain: "example.com",
Cluster: ClusterConfig{
@@ -239,11 +251,12 @@ func NewMicroshiftConfig() *MicroshiftConfig {
ServiceNodePortRange: "30000-32767",
},
Etcd: EtcdConfig{
+ MemoryLimit: 0, // No limit
MinDefragBytes: 100 * 1024 * 1024, // 100MB
MaxFragmentedPercentage: 45, // percent
DefragCheckFreq: 5 * time.Minute,
DoStartupDefrag: true,
- QuotaBackendBytes: 2 * 1024 * 1024 * 1024, // 2GB
+ QuotaBackendBytes: 8 * 1024 * 1024 * 1024, // 8GB
},
}
}
@@ -254,7 +267,7 @@ func (c *MicroshiftConfig) isDefaultNodeName() bool {
if err != nil {
klog.Fatalf("Failed to get hostname %v", err)
}
- return c.NodeName == hostname
+ return c.NodeName == strings.ToLower(hostname)
}
// Read or set the NodeName that will be used for this MicroShift instance
@@ -378,7 +391,7 @@ func (c *MicroshiftConfig) ReadFromConfigFile(configFile string) error {
// Wire new Config type to existing MicroshiftConfig
c.LogVLevel = config.GetVerbosity()
if config.Node.HostnameOverride != "" {
- c.NodeName = config.Node.HostnameOverride
+ c.NodeName = strings.ToLower(config.Node.HostnameOverride)
}
if config.Node.NodeIP != "" {
c.NodeIP = config.Node.NodeIP
@@ -402,6 +415,15 @@ func (c *MicroshiftConfig) ReadFromConfigFile(configFile string) error {
c.KASAdvertiseAddress = config.ApiServer.AdvertiseAddress
}
+ if config.Etcd.MemoryLimitMB > 0 {
+ // If the memory limit is than the minimum, set it to the minimum and continue.
+ if config.Etcd.MemoryLimitMB < EtcdMinimumMemoryLimit {
+ c.Etcd.MemoryLimit = EtcdMinimumMemoryLimit
+ } else {
+ c.Etcd.MemoryLimit = config.Etcd.MemoryLimitMB
+ }
+ }
+
return nil
}
@@ -421,16 +443,24 @@ func (c *MicroshiftConfig) ReadAndValidate(configFile string) error {
}
c.Cluster.DNS = clusterDNS
- // If KAS advertise address is not configured then grab it from the service
+ // If KAS advertise address is not configured then compute it from the service
// CIDR automatically.
if len(c.KASAdvertiseAddress) == 0 {
// unchecked error because this was done when getting cluster DNS
_, svcNet, _ := net.ParseCIDR(c.Cluster.ServiceCIDR)
- _, apiServerServiceIP, err := ctrl.ServiceIPRange(*svcNet)
- if err != nil {
- return fmt.Errorf("error getting apiserver IP: %v", err)
+ // Since the KAS advertise address was not provided we will default to the
+ // next immediate subnet after the service CIDR. This is due to the fact
+ // that using the actual apiserver service IP as an endpoint slice breaks
+ // host network pods trying to reach apiserver, as the VIP 10.43.0.1:443 is
+ // not translated to 10.43.0.1:6443. It remains unchanged and therefore
+ // connects to the ingress router instead, triggering all sorts of errors.
+ nextSubnet, exceed := cidr.NextSubnet(svcNet, 32)
+ if exceed {
+ return fmt.Errorf("unable to compute next subnet from service CIDR")
}
- c.KASAdvertiseAddress = apiServerServiceIP.String()
+ // First and last are the same because of the /32 netmask.
+ firstValidIP, _ := cidr.AddressRange(nextSubnet)
+ c.KASAdvertiseAddress = firstValidIP.String()
c.SkipKASInterface = false
} else {
c.SkipKASInterface = true
diff --git a/etcd/vendor/github.com/openshift/microshift/pkg/util/cert.go b/etcd/vendor/github.com/openshift/microshift/pkg/util/cert.go
index c4821ec1af..1ee6d6dd50 100644
--- a/etcd/vendor/github.com/openshift/microshift/pkg/util/cert.go
+++ b/etcd/vendor/github.com/openshift/microshift/pkg/util/cert.go
@@ -21,7 +21,7 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/pkg/errors"
@@ -69,7 +69,7 @@ func GenKeys(pubPath, keyPath string) error {
return fmt.Errorf("failed to write the private key to %s: %v", keyPath, err)
}
- ioutil.WriteFile(pubPath, pubPEM, 0644)
+ os.WriteFile(pubPath, pubPEM, 0400)
return nil
}
diff --git a/etcd/vendor/github.com/openshift/microshift/pkg/util/net.go b/etcd/vendor/github.com/openshift/microshift/pkg/util/net.go
index a73bfcae0a..c984299855 100644
--- a/etcd/vendor/github.com/openshift/microshift/pkg/util/net.go
+++ b/etcd/vendor/github.com/openshift/microshift/pkg/util/net.go
@@ -32,19 +32,24 @@ import (
"k8s.io/klog/v2"
)
+var previousGatewayIP string = ""
+
func GetHostIP() (string, error) {
// Prefer OVN-K gateway IP if it is the CNI
gatewayIP, err := ovn.GetOVNGatewayIP()
if err != nil && !strings.Contains(err.Error(), "no such network interface") {
return "", err
}
- klog.V(2).Infof("ovn gateway IP address: %s", gatewayIP)
+ if gatewayIP != previousGatewayIP {
+ previousGatewayIP = gatewayIP
+ klog.V(2).Infof("ovn gateway IP address: %s", gatewayIP)
+ }
ip, err := net.ChooseHostInterface()
if err == nil {
return ip.String(), nil
}
- klog.V(2).Infof("failed to find default route IP address: %v", err)
+ klog.V(2).Infof("could not find default route IP address, using ovn gateway IP %q as host IP: %v", gatewayIP, err)
return gatewayIP, nil
}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/.travis.yml b/etcd/vendor/github.com/pquerna/cachecontrol/.travis.yml
deleted file mode 100644
index 0d966bb01c..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-arch:
- - amd64
- - ppc64le
-language: go
-
-go:
- - "1.15"
- - "1.16"
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/LICENSE b/etcd/vendor/github.com/pquerna/cachecontrol/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/README.md b/etcd/vendor/github.com/pquerna/cachecontrol/README.md
deleted file mode 100644
index 35ea9c8ccd..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# cachecontrol: HTTP Caching Parser and Interpretation
-
-[](https://pkg.go.dev/github.com/pquerna/cachecontrol?tab=doc)[](https://travis-ci.org/pquerna/cachecontrol)
-
-
-
-`cachecontrol` implements [RFC 7234](http://tools.ietf.org/html/rfc7234) __Hypertext Transfer Protocol (HTTP/1.1): Caching__. It does this by parsing the `Cache-Control` and other headers, providing information about requests and responses -- but `cachecontrol` does not implement an actual cache backend, just the control plane to make decisions about if a particular response is cachable.
-
-# Usage
-
-`cachecontrol.CachableResponse` returns an array of [reasons](https://godoc.org/github.com/pquerna/cachecontrol/cacheobject#Reason) why a response should not be cached and when it expires. In the case that `len(reasons) == 0`, the response is cachable according to the RFC. However, some people want non-compliant caches for various business use cases, so each reason is specifically named, so if your cache wants to cache `POST` requests, it can easily do that, but still be RFC compliant in other situations.
-
-# Examples
-
-## Can you cache Example.com?
-
-```go
-package main
-
-import (
- "github.com/pquerna/cachecontrol"
-
- "fmt"
- "io/ioutil"
- "net/http"
-)
-
-func main() {
- req, _ := http.NewRequest("GET", "http://www.example.com/", nil)
-
- res, _ := http.DefaultClient.Do(req)
- _, _ = ioutil.ReadAll(res.Body)
-
- reasons, expires, _ := cachecontrol.CachableResponse(req, res, cachecontrol.Options{})
-
- fmt.Println("Reasons to not cache: ", reasons)
- fmt.Println("Expiration: ", expires.String())
-}
-```
-
-## Can I use this in a high performance caching server?
-
-`cachecontrol` is divided into two packages: `cachecontrol` with a high level API, and a lower level `cacheobject` package. Use [Object](https://godoc.org/github.com/pquerna/cachecontrol/cacheobject#Object) in a high performance use case where you have previously parsed headers containing dates or would like to avoid memory allocations.
-
-```go
-package main
-
-import (
- "github.com/pquerna/cachecontrol/cacheobject"
-
- "fmt"
- "io/ioutil"
- "net/http"
-)
-
-func main() {
- req, _ := http.NewRequest("GET", "http://www.example.com/", nil)
-
- res, _ := http.DefaultClient.Do(req)
- _, _ = ioutil.ReadAll(res.Body)
-
- reqDir, _ := cacheobject.ParseRequestCacheControl(req.Header.Get("Cache-Control"))
-
- resDir, _ := cacheobject.ParseResponseCacheControl(res.Header.Get("Cache-Control"))
- expiresHeader, _ := http.ParseTime(res.Header.Get("Expires"))
- dateHeader, _ := http.ParseTime(res.Header.Get("Date"))
- lastModifiedHeader, _ := http.ParseTime(res.Header.Get("Last-Modified"))
-
- obj := cacheobject.Object{
- RespDirectives: resDir,
- RespHeaders: res.Header,
- RespStatusCode: res.StatusCode,
- RespExpiresHeader: expiresHeader,
- RespDateHeader: dateHeader,
- RespLastModifiedHeader: lastModifiedHeader,
-
- ReqDirectives: reqDir,
- ReqHeaders: req.Header,
- ReqMethod: req.Method,
-
- NowUTC: time.Now().UTC(),
- }
- rv := cacheobject.ObjectResults{}
-
- cacheobject.CachableObject(&obj, &rv)
- cacheobject.ExpirationObject(&obj, &rv)
-
- fmt.Println("Errors: ", rv.OutErr)
- fmt.Println("Reasons to not cache: ", rv.OutReasons)
- fmt.Println("Warning headers to add: ", rv.OutWarnings)
- fmt.Println("Expiration: ", rv.OutExpirationTime.String())
-}
-```
-
-## Improvements, bugs, adding features, and taking cachecontrol new directions!
-
-Please [open issues in Github](https://github.com/pquerna/cachecontrol/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :)
-
-# Credits
-
-`cachecontrol` has recieved significant contributions from:
-
-* [Paul Querna](https://github.com/pquerna)
-
-## License
-
-`cachecontrol` is licensed under the [Apache License, Version 2.0](./LICENSE)
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/api.go b/etcd/vendor/github.com/pquerna/cachecontrol/api.go
deleted file mode 100644
index 5759a4c0cb..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/api.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package cachecontrol
-
-import (
- "github.com/pquerna/cachecontrol/cacheobject"
-
- "net/http"
- "time"
-)
-
-type Options struct {
- // Set to True for a private cache, which is not shared among users (eg, in a browser)
- // Set to False for a "shared" cache, which is more common in a server context.
- PrivateCache bool
-}
-
-// Given an HTTP Request, the future Status Code, and an ResponseWriter,
-// determine the possible reasons a response SHOULD NOT be cached.
-func CachableResponseWriter(req *http.Request,
- statusCode int,
- resp http.ResponseWriter,
- opts Options) ([]cacheobject.Reason, time.Time, error) {
- return cacheobject.UsingRequestResponse(req, statusCode, resp.Header(), opts.PrivateCache)
-}
-
-// Given an HTTP Request and Response, determine the possible reasons a response SHOULD NOT
-// be cached.
-func CachableResponse(req *http.Request,
- resp *http.Response,
- opts Options) ([]cacheobject.Reason, time.Time, error) {
- return cacheobject.UsingRequestResponse(req, resp.StatusCode, resp.Header, opts.PrivateCache)
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/directive.go b/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/directive.go
deleted file mode 100644
index afc63dc763..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/directive.go
+++ /dev/null
@@ -1,547 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package cacheobject
-
-import (
- "errors"
- "math"
- "net/http"
- "net/textproto"
- "strconv"
- "strings"
-)
-
-// TODO(pquerna): add extensions from here: http://www.iana.org/assignments/http-cache-directives/http-cache-directives.xhtml
-
-var (
- ErrQuoteMismatch = errors.New("Missing closing quote")
- ErrMaxAgeDeltaSeconds = errors.New("Failed to parse delta-seconds in `max-age`")
- ErrSMaxAgeDeltaSeconds = errors.New("Failed to parse delta-seconds in `s-maxage`")
- ErrMaxStaleDeltaSeconds = errors.New("Failed to parse delta-seconds in `max-stale`")
- ErrMinFreshDeltaSeconds = errors.New("Failed to parse delta-seconds in `min-fresh`")
- ErrNoCacheNoArgs = errors.New("Unexpected argument to `no-cache`")
- ErrNoStoreNoArgs = errors.New("Unexpected argument to `no-store`")
- ErrNoTransformNoArgs = errors.New("Unexpected argument to `no-transform`")
- ErrOnlyIfCachedNoArgs = errors.New("Unexpected argument to `only-if-cached`")
- ErrMustRevalidateNoArgs = errors.New("Unexpected argument to `must-revalidate`")
- ErrPublicNoArgs = errors.New("Unexpected argument to `public`")
- ErrProxyRevalidateNoArgs = errors.New("Unexpected argument to `proxy-revalidate`")
- // Experimental
- ErrImmutableNoArgs = errors.New("Unexpected argument to `immutable`")
- ErrStaleIfErrorDeltaSeconds = errors.New("Failed to parse delta-seconds in `stale-if-error`")
- ErrStaleWhileRevalidateDeltaSeconds = errors.New("Failed to parse delta-seconds in `stale-while-revalidate`")
-)
-
-func whitespace(b byte) bool {
- if b == '\t' || b == ' ' {
- return true
- }
- return false
-}
-
-func parse(value string, cd cacheDirective) error {
- var err error = nil
- i := 0
-
- for i < len(value) && err == nil {
- // eat leading whitespace or commas
- if whitespace(value[i]) || value[i] == ',' {
- i++
- continue
- }
-
- j := i + 1
-
- for j < len(value) {
- if !isToken(value[j]) {
- break
- }
- j++
- }
-
- token := strings.ToLower(value[i:j])
- tokenHasFields := hasFieldNames(token)
- /*
- println("GOT TOKEN:")
- println(" i -> ", i)
- println(" j -> ", j)
- println(" token -> ", token)
- */
-
- if j+1 < len(value) && value[j] == '=' {
- k := j + 1
- // minimum size two bytes of "", but we let httpUnquote handle it.
- if k < len(value) && value[k] == '"' {
- eaten, result := httpUnquote(value[k:])
- if eaten == -1 {
- return ErrQuoteMismatch
- }
- i = k + eaten
-
- err = cd.addPair(token, result)
- } else {
- z := k
- for z < len(value) {
- if tokenHasFields {
- if whitespace(value[z]) {
- break
- }
- } else {
- if whitespace(value[z]) || value[z] == ',' {
- break
- }
- }
- z++
- }
- i = z
-
- result := value[k:z]
- if result != "" && result[len(result)-1] == ',' {
- result = result[:len(result)-1]
- }
-
- err = cd.addPair(token, result)
- }
- } else {
- if token != "," {
- err = cd.addToken(token)
- }
- i = j
- }
- }
-
- return err
-}
-
-// DeltaSeconds specifies a non-negative integer, representing
-// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1
-//
-// When set to -1, this means unset.
-//
-type DeltaSeconds int32
-
-// Parser for delta-seconds, a uint31, more or less:
-// http://tools.ietf.org/html/rfc7234#section-1.2.1
-func parseDeltaSeconds(v string) (DeltaSeconds, error) {
- n, err := strconv.ParseUint(v, 10, 32)
- if err != nil {
- if numError, ok := err.(*strconv.NumError); ok {
- if numError.Err == strconv.ErrRange {
- return DeltaSeconds(math.MaxInt32), nil
- }
- }
- return DeltaSeconds(-1), err
- } else {
- if n > math.MaxInt32 {
- return DeltaSeconds(math.MaxInt32), nil
- } else {
- return DeltaSeconds(n), nil
- }
- }
-}
-
-// Fields present in a header.
-type FieldNames map[string]bool
-
-// internal interface for shared methods of RequestCacheDirectives and ResponseCacheDirectives
-type cacheDirective interface {
- addToken(s string) error
- addPair(s string, v string) error
-}
-
-// LOW LEVEL API: Representation of possible request directives in a `Cache-Control` header: http://tools.ietf.org/html/rfc7234#section-5.2.1
-//
-// Note: Many fields will be `nil` in practice.
-//
-type RequestCacheDirectives struct {
-
- // max-age(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.1
- //
- // The "max-age" request directive indicates that the client is
- // unwilling to accept a response whose age is greater than the
- // specified number of seconds. Unless the max-stale request directive
- // is also present, the client is not willing to accept a stale
- // response.
- MaxAge DeltaSeconds
-
- // max-stale(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.2
- //
- // The "max-stale" request directive indicates that the client is
- // willing to accept a response that has exceeded its freshness
- // lifetime. If max-stale is assigned a value, then the client is
- // willing to accept a response that has exceeded its freshness lifetime
- // by no more than the specified number of seconds. If no value is
- // assigned to max-stale, then the client is willing to accept a stale
- // response of any age.
- MaxStale DeltaSeconds
- MaxStaleSet bool
-
- // min-fresh(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.1.3
- //
- // The "min-fresh" request directive indicates that the client is
- // willing to accept a response whose freshness lifetime is no less than
- // its current age plus the specified time in seconds. That is, the
- // client wants a response that will still be fresh for at least the
- // specified number of seconds.
- MinFresh DeltaSeconds
-
- // no-cache(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.4
- //
- // The "no-cache" request directive indicates that a cache MUST NOT use
- // a stored response to satisfy the request without successful
- // validation on the origin server.
- NoCache bool
-
- // no-store(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.5
- //
- // The "no-store" request directive indicates that a cache MUST NOT
- // store any part of either this request or any response to it. This
- // directive applies to both private and shared caches.
- NoStore bool
-
- // no-transform(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.6
- //
- // The "no-transform" request directive indicates that an intermediary
- // (whether or not it implements a cache) MUST NOT transform the
- // payload, as defined in Section 5.7.2 of RFC7230.
- NoTransform bool
-
- // only-if-cached(bool): http://tools.ietf.org/html/rfc7234#section-5.2.1.7
- //
- // The "only-if-cached" request directive indicates that the client only
- // wishes to obtain a stored response.
- OnlyIfCached bool
-
- // Extensions: http://tools.ietf.org/html/rfc7234#section-5.2.3
- //
- // The Cache-Control header field can be extended through the use of one
- // or more cache-extension tokens, each with an optional value. A cache
- // MUST ignore unrecognized cache directives.
- Extensions []string
-}
-
-func (cd *RequestCacheDirectives) addToken(token string) error {
- var err error = nil
-
- switch token {
- case "max-age":
- err = ErrMaxAgeDeltaSeconds
- case "min-fresh":
- err = ErrMinFreshDeltaSeconds
- case "max-stale":
- cd.MaxStaleSet = true
- case "no-cache":
- cd.NoCache = true
- case "no-store":
- cd.NoStore = true
- case "no-transform":
- cd.NoTransform = true
- case "only-if-cached":
- cd.OnlyIfCached = true
- default:
- cd.Extensions = append(cd.Extensions, token)
- }
- return err
-}
-
-func (cd *RequestCacheDirectives) addPair(token string, v string) error {
- var err error = nil
-
- switch token {
- case "max-age":
- cd.MaxAge, err = parseDeltaSeconds(v)
- if err != nil {
- err = ErrMaxAgeDeltaSeconds
- }
- case "max-stale":
- cd.MaxStale, err = parseDeltaSeconds(v)
- if err != nil {
- err = ErrMaxStaleDeltaSeconds
- }
- case "min-fresh":
- cd.MinFresh, err = parseDeltaSeconds(v)
- if err != nil {
- err = ErrMinFreshDeltaSeconds
- }
- case "no-cache":
- err = ErrNoCacheNoArgs
- case "no-store":
- err = ErrNoStoreNoArgs
- case "no-transform":
- err = ErrNoTransformNoArgs
- case "only-if-cached":
- err = ErrOnlyIfCachedNoArgs
- default:
- // TODO(pquerna): this sucks, making user re-parse
- cd.Extensions = append(cd.Extensions, token+"="+v)
- }
-
- return err
-}
-
-// LOW LEVEL API: Parses a Cache Control Header from a Request into a set of directives.
-func ParseRequestCacheControl(value string) (*RequestCacheDirectives, error) {
- cd := &RequestCacheDirectives{
- MaxAge: -1,
- MaxStale: -1,
- MinFresh: -1,
- }
-
- err := parse(value, cd)
- if err != nil {
- return nil, err
- }
- return cd, nil
-}
-
-// LOW LEVEL API: Repersentation of possible response directives in a `Cache-Control` header: http://tools.ietf.org/html/rfc7234#section-5.2.2
-//
-// Note: Many fields will be `nil` in practice.
-//
-type ResponseCacheDirectives struct {
-
- // must-revalidate(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.1
- //
- // The "must-revalidate" response directive indicates that once it has
- // become stale, a cache MUST NOT use the response to satisfy subsequent
- // requests without successful validation on the origin server.
- MustRevalidate bool
-
- // no-cache(FieldName): http://tools.ietf.org/html/rfc7234#section-5.2.2.2
- //
- // The "no-cache" response directive indicates that the response MUST
- // NOT be used to satisfy a subsequent request without successful
- // validation on the origin server.
- //
- // If the no-cache response directive specifies one or more field-names,
- // then a cache MAY use the response to satisfy a subsequent request,
- // subject to any other restrictions on caching. However, any header
- // fields in the response that have the field-name(s) listed MUST NOT be
- // sent in the response to a subsequent request without successful
- // revalidation with the origin server.
- NoCache FieldNames
-
- // no-cache(cast-to-bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.2
- //
- // While the RFC defines optional field-names on a no-cache directive,
- // many applications only want to know if any no-cache directives were
- // present at all.
- NoCachePresent bool
-
- // no-store(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.3
- //
- // The "no-store" request directive indicates that a cache MUST NOT
- // store any part of either this request or any response to it. This
- // directive applies to both private and shared caches.
- NoStore bool
-
- // no-transform(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.4
- //
- // The "no-transform" response directive indicates that an intermediary
- // (regardless of whether it implements a cache) MUST NOT transform the
- // payload, as defined in Section 5.7.2 of RFC7230.
- NoTransform bool
-
- // public(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.5
- //
- // The "public" response directive indicates that any cache MAY store
- // the response, even if the response would normally be non-cacheable or
- // cacheable only within a private cache.
- Public bool
-
- // private(FieldName): http://tools.ietf.org/html/rfc7234#section-5.2.2.6
- //
- // The "private" response directive indicates that the response message
- // is intended for a single user and MUST NOT be stored by a shared
- // cache. A private cache MAY store the response and reuse it for later
- // requests, even if the response would normally be non-cacheable.
- //
- // If the private response directive specifies one or more field-names,
- // this requirement is limited to the field-values associated with the
- // listed response header fields. That is, a shared cache MUST NOT
- // store the specified field-names(s), whereas it MAY store the
- // remainder of the response message.
- Private FieldNames
-
- // private(cast-to-bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.6
- //
- // While the RFC defines optional field-names on a private directive,
- // many applications only want to know if any private directives were
- // present at all.
- PrivatePresent bool
-
- // proxy-revalidate(bool): http://tools.ietf.org/html/rfc7234#section-5.2.2.7
- //
- // The "proxy-revalidate" response directive has the same meaning as the
- // must-revalidate response directive, except that it does not apply to
- // private caches.
- ProxyRevalidate bool
-
- // max-age(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.2.8
- //
- // The "max-age" response directive indicates that the response is to be
- // considered stale after its age is greater than the specified number
- // of seconds.
- MaxAge DeltaSeconds
-
- // s-maxage(delta seconds): http://tools.ietf.org/html/rfc7234#section-5.2.2.9
- //
- // The "s-maxage" response directive indicates that, in shared caches,
- // the maximum age specified by this directive overrides the maximum age
- // specified by either the max-age directive or the Expires header
- // field. The s-maxage directive also implies the semantics of the
- // proxy-revalidate response directive.
- SMaxAge DeltaSeconds
-
- ////
- // Experimental features
- // - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#Extension_Cache-Control_directives
- // - https://www.fastly.com/blog/stale-while-revalidate-stale-if-error-available-today
- ////
-
- // immutable(cast-to-bool): experimental feature
- Immutable bool
-
- // stale-if-error(delta seconds): experimental feature
- StaleIfError DeltaSeconds
-
- // stale-while-revalidate(delta seconds): experimental feature
- StaleWhileRevalidate DeltaSeconds
-
- // Extensions: http://tools.ietf.org/html/rfc7234#section-5.2.3
- //
- // The Cache-Control header field can be extended through the use of one
- // or more cache-extension tokens, each with an optional value. A cache
- // MUST ignore unrecognized cache directives.
- Extensions []string
-}
-
-// LOW LEVEL API: Parses a Cache Control Header from a Response into a set of directives.
-func ParseResponseCacheControl(value string) (*ResponseCacheDirectives, error) {
- cd := &ResponseCacheDirectives{
- MaxAge: -1,
- SMaxAge: -1,
- // Exerimantal stale timeouts
- StaleIfError: -1,
- StaleWhileRevalidate: -1,
- }
-
- err := parse(value, cd)
- if err != nil {
- return nil, err
- }
- return cd, nil
-}
-
-func (cd *ResponseCacheDirectives) addToken(token string) error {
- var err error = nil
- switch token {
- case "must-revalidate":
- cd.MustRevalidate = true
- case "no-cache":
- cd.NoCachePresent = true
- case "no-store":
- cd.NoStore = true
- case "no-transform":
- cd.NoTransform = true
- case "public":
- cd.Public = true
- case "private":
- cd.PrivatePresent = true
- case "proxy-revalidate":
- cd.ProxyRevalidate = true
- case "max-age":
- err = ErrMaxAgeDeltaSeconds
- case "s-maxage":
- err = ErrSMaxAgeDeltaSeconds
- // Experimental
- case "immutable":
- cd.Immutable = true
- case "stale-if-error":
- err = ErrMaxAgeDeltaSeconds
- case "stale-while-revalidate":
- err = ErrMaxAgeDeltaSeconds
- default:
- cd.Extensions = append(cd.Extensions, token)
- }
- return err
-}
-
-func hasFieldNames(token string) bool {
- switch token {
- case "no-cache":
- return true
- case "private":
- return true
- }
- return false
-}
-
-func (cd *ResponseCacheDirectives) addPair(token string, v string) error {
- var err error = nil
-
- switch token {
- case "must-revalidate":
- err = ErrMustRevalidateNoArgs
- case "no-cache":
- cd.NoCachePresent = true
- tokens := strings.Split(v, ",")
- if cd.NoCache == nil {
- cd.NoCache = make(FieldNames)
- }
- for _, t := range tokens {
- k := http.CanonicalHeaderKey(textproto.TrimString(t))
- cd.NoCache[k] = true
- }
- case "no-store":
- err = ErrNoStoreNoArgs
- case "no-transform":
- err = ErrNoTransformNoArgs
- case "public":
- err = ErrPublicNoArgs
- case "private":
- cd.PrivatePresent = true
- tokens := strings.Split(v, ",")
- if cd.Private == nil {
- cd.Private = make(FieldNames)
- }
- for _, t := range tokens {
- k := http.CanonicalHeaderKey(textproto.TrimString(t))
- cd.Private[k] = true
- }
- case "proxy-revalidate":
- err = ErrProxyRevalidateNoArgs
- case "max-age":
- cd.MaxAge, err = parseDeltaSeconds(v)
- case "s-maxage":
- cd.SMaxAge, err = parseDeltaSeconds(v)
- // Experimental
- case "immutable":
- err = ErrImmutableNoArgs
- case "stale-if-error":
- cd.StaleIfError, err = parseDeltaSeconds(v)
- case "stale-while-revalidate":
- cd.StaleWhileRevalidate, err = parseDeltaSeconds(v)
- default:
- // TODO(pquerna): this sucks, making user re-parse, and its technically not 'quoted' like the original,
- // but this is still easier, just a SplitN on "="
- cd.Extensions = append(cd.Extensions, token+"="+v)
- }
-
- return err
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/lex.go b/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/lex.go
deleted file mode 100644
index c658e09b15..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/lex.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cacheobject
-
-// This file deals with lexical matters of HTTP
-
-func isSeparator(c byte) bool {
- switch c {
- case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
- return true
- }
- return false
-}
-
-func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
-
-func isChar(c byte) bool { return 0 <= c && c <= 127 }
-
-func isAnyText(c byte) bool { return !isCtl(c) }
-
-func isQdText(c byte) bool { return isAnyText(c) && c != '"' }
-
-func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) }
-
-// Valid escaped sequences are not specified in RFC 2616, so for now, we assume
-// that they coincide with the common sense ones used by GO. Malformed
-// characters should probably not be treated as errors by a robust (forgiving)
-// parser, so we replace them with the '?' character.
-func httpUnquotePair(b byte) byte {
- // skip the first byte, which should always be '\'
- switch b {
- case 'a':
- return '\a'
- case 'b':
- return '\b'
- case 'f':
- return '\f'
- case 'n':
- return '\n'
- case 'r':
- return '\r'
- case 't':
- return '\t'
- case 'v':
- return '\v'
- case '\\':
- return '\\'
- case '\'':
- return '\''
- case '"':
- return '"'
- }
- return '?'
-}
-
-// raw must begin with a valid quoted string. Only the first quoted string is
-// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1
-// upon failure.
-func httpUnquote(raw string) (eaten int, result string) {
- buf := make([]byte, len(raw))
- if raw[0] != '"' {
- return -1, ""
- }
- eaten = 1
- j := 0 // # of bytes written in buf
- for i := 1; i < len(raw); i++ {
- switch b := raw[i]; b {
- case '"':
- eaten++
- buf = buf[0:j]
- return i + 1, string(buf)
- case '\\':
- if len(raw) < i+2 {
- return -1, ""
- }
- buf[j] = httpUnquotePair(raw[i+1])
- eaten += 2
- j++
- i++
- default:
- if isQdText(b) {
- buf[j] = b
- } else {
- buf[j] = '?'
- }
- eaten++
- j++
- }
- }
- return -1, ""
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go b/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go
deleted file mode 100644
index ae38a317db..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/object.go
+++ /dev/null
@@ -1,398 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package cacheobject
-
-import (
- "net/http"
- "time"
-)
-
-// LOW LEVEL API: Represents a potentially cachable HTTP object.
-//
-// This struct is designed to be serialized efficiently, so in a high
-// performance caching server, things like Date-Strings don't need to be
-// parsed for every use of a cached object.
-type Object struct {
- CacheIsPrivate bool
-
- RespDirectives *ResponseCacheDirectives
- RespHeaders http.Header
- RespStatusCode int
- RespExpiresHeader time.Time
- RespDateHeader time.Time
- RespLastModifiedHeader time.Time
-
- ReqDirectives *RequestCacheDirectives
- ReqHeaders http.Header
- ReqMethod string
-
- NowUTC time.Time
-}
-
-// LOW LEVEL API: Represents the results of examining an Object with
-// CachableObject and ExpirationObject.
-//
-// TODO(pquerna): decide if this is a good idea or bad
-type ObjectResults struct {
- OutReasons []Reason
- OutWarnings []Warning
- OutExpirationTime time.Time
- OutErr error
-}
-
-// LOW LEVEL API: Check if a request is cacheable.
-// This function doesn't reset the passed ObjectResults.
-func CachableRequestObject(obj *Object, rv *ObjectResults) {
- switch obj.ReqMethod {
- case "GET":
- break
- case "HEAD":
- break
- case "POST":
- // Responses to POST requests can be cacheable if they include explicit freshness information
- break
-
- case "PUT":
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPUT)
-
- case "DELETE":
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodDELETE)
-
- case "CONNECT":
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodCONNECT)
-
- case "OPTIONS":
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodOPTIONS)
-
- case "TRACE":
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodTRACE)
-
- // HTTP Extension Methods: http://www.iana.org/assignments/http-methods/http-methods.xhtml
- //
- // To my knowledge, none of them are cachable. Please open a ticket if this is not the case!
- //
- default:
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodUnknown)
- }
-
- if obj.ReqDirectives != nil && obj.ReqDirectives.NoStore {
- rv.OutReasons = append(rv.OutReasons, ReasonRequestNoStore)
- }
-}
-
-// LOW LEVEL API: Check if a response is cacheable.
-// This function doesn't reset the passed ObjectResults.
-func CachableResponseObject(obj *Object, rv *ObjectResults) {
- /**
- POST: http://tools.ietf.org/html/rfc7231#section-4.3.3
-
- Responses to POST requests are only cacheable when they include
- explicit freshness information (see Section 4.2.1 of [RFC7234]).
- However, POST caching is not widely implemented. For cases where an
- origin server wishes the client to be able to cache the result of a
- POST in a way that can be reused by a later GET, the origin server
- MAY send a 200 (OK) response containing the result and a
- Content-Location header field that has the same value as the POST's
- effective request URI (Section 3.1.4.2).
- */
- if obj.ReqMethod == http.MethodPost && !hasFreshness(obj.RespDirectives, obj.RespHeaders, obj.RespExpiresHeader, obj.CacheIsPrivate) {
- rv.OutReasons = append(rv.OutReasons, ReasonRequestMethodPOST)
- }
-
- // Storing Responses to Authenticated Requests: http://tools.ietf.org/html/rfc7234#section-3.2
- if obj.ReqHeaders.Get("Authorization") != "" {
- if obj.RespDirectives.MustRevalidate ||
- obj.RespDirectives.Public ||
- obj.RespDirectives.SMaxAge != -1 {
- // Expires of some kind present, this is potentially OK.
- } else {
- rv.OutReasons = append(rv.OutReasons, ReasonRequestAuthorizationHeader)
- }
- }
-
- if obj.RespDirectives.PrivatePresent && !obj.CacheIsPrivate {
- rv.OutReasons = append(rv.OutReasons, ReasonResponsePrivate)
- }
-
- if obj.RespDirectives.NoStore {
- rv.OutReasons = append(rv.OutReasons, ReasonResponseNoStore)
- }
-
- /*
- the response either:
-
- * contains an Expires header field (see Section 5.3), or
-
- * contains a max-age response directive (see Section 5.2.2.8), or
-
- * contains a s-maxage response directive (see Section 5.2.2.9)
- and the cache is shared, or
-
- * contains a Cache Control Extension (see Section 5.2.3) that
- allows it to be cached, or
-
- * has a status code that is defined as cacheable by default (see
- Section 4.2.2), or
-
- * contains a public response directive (see Section 5.2.2.5).
- */
-
- if obj.RespHeaders.Get("Expires") != "" ||
- obj.RespDirectives.MaxAge != -1 ||
- (obj.RespDirectives.SMaxAge != -1 && !obj.CacheIsPrivate) ||
- cachableStatusCode(obj.RespStatusCode) ||
- obj.RespDirectives.Public {
- /* cachable by default, at least one of the above conditions was true */
- return
- }
-
- rv.OutReasons = append(rv.OutReasons, ReasonResponseUncachableByDefault)
-}
-
-// LOW LEVEL API: Check if a object is cachable.
-func CachableObject(obj *Object, rv *ObjectResults) {
- rv.OutReasons = nil
- rv.OutWarnings = nil
- rv.OutErr = nil
-
- CachableRequestObject(obj, rv)
- CachableResponseObject(obj, rv)
-}
-
-var twentyFourHours = time.Duration(24 * time.Hour)
-
-const debug = false
-
-// LOW LEVEL API: Update an objects expiration time.
-func ExpirationObject(obj *Object, rv *ObjectResults) {
- /**
- * Okay, lets calculate Freshness/Expiration now. woo:
- * http://tools.ietf.org/html/rfc7234#section-4.2
- */
-
- /*
- o If the cache is shared and the s-maxage response directive
- (Section 5.2.2.9) is present, use its value, or
-
- o If the max-age response directive (Section 5.2.2.8) is present,
- use its value, or
-
- o If the Expires response header field (Section 5.3) is present, use
- its value minus the value of the Date response header field, or
-
- o Otherwise, no explicit expiration time is present in the response.
- A heuristic freshness lifetime might be applicable; see
- Section 4.2.2.
- */
-
- var expiresTime time.Time
-
- if obj.RespDirectives.SMaxAge != -1 && !obj.CacheIsPrivate {
- expiresTime = obj.NowUTC.Add(time.Second * time.Duration(obj.RespDirectives.SMaxAge))
- } else if obj.RespDirectives.MaxAge != -1 {
- expiresTime = obj.NowUTC.UTC().Add(time.Second * time.Duration(obj.RespDirectives.MaxAge))
- } else if !obj.RespExpiresHeader.IsZero() {
- serverDate := obj.RespDateHeader
- if serverDate.IsZero() {
- // common enough case when a Date: header has not yet been added to an
- // active response.
- serverDate = obj.NowUTC
- }
- expiresTime = obj.NowUTC.Add(obj.RespExpiresHeader.Sub(serverDate))
- } else if !obj.RespLastModifiedHeader.IsZero() {
- // heuristic freshness lifetime
- rv.OutWarnings = append(rv.OutWarnings, WarningHeuristicExpiration)
-
- // http://httpd.apache.org/docs/2.4/mod/mod_cache.html#cachelastmodifiedfactor
- // CacheMaxExpire defaults to 24 hours
- // CacheLastModifiedFactor: is 0.1
- //
- // expiry-period = MIN(time-since-last-modified-date * factor, 24 hours)
- //
- // obj.NowUTC
-
- since := obj.RespLastModifiedHeader.Sub(obj.NowUTC)
- since = time.Duration(float64(since) * -0.1)
-
- if since > twentyFourHours {
- expiresTime = obj.NowUTC.Add(twentyFourHours)
- } else {
- expiresTime = obj.NowUTC.Add(since)
- }
-
- if debug {
- println("Now UTC: ", obj.NowUTC.String())
- println("Last-Modified: ", obj.RespLastModifiedHeader.String())
- println("Since: ", since.String())
- println("TwentyFourHours: ", twentyFourHours.String())
- println("Expiration: ", expiresTime.String())
- }
- } else {
- // TODO(pquerna): what should the default behavior be for expiration time?
- }
-
- rv.OutExpirationTime = expiresTime
-}
-
-// Evaluate cachability based on an HTTP request, and parts of the response.
-func UsingRequestResponse(req *http.Request,
- statusCode int,
- respHeaders http.Header,
- privateCache bool) ([]Reason, time.Time, error) {
- reasons, time, _, _, err := UsingRequestResponseWithObject(req, statusCode, respHeaders, privateCache)
- return reasons, time, err
-}
-
-// Evaluate cachability based on an HTTP request, and parts of the response.
-// Returns the parsed Object as well.
-func UsingRequestResponseWithObject(req *http.Request,
- statusCode int,
- respHeaders http.Header,
- privateCache bool) ([]Reason, time.Time, []Warning, *Object, error) {
- var reqHeaders http.Header
- var reqMethod string
-
- var reqDir *RequestCacheDirectives = nil
- respDir, err := ParseResponseCacheControl(respHeaders.Get("Cache-Control"))
- if err != nil {
- return nil, time.Time{}, nil, nil, err
- }
-
- if req != nil {
- reqDir, err = ParseRequestCacheControl(req.Header.Get("Cache-Control"))
- if err != nil {
- return nil, time.Time{}, nil, nil, err
- }
- reqHeaders = req.Header
- reqMethod = req.Method
- }
-
- var expiresHeader time.Time
- var dateHeader time.Time
- var lastModifiedHeader time.Time
-
- if respHeaders.Get("Expires") != "" {
- expiresHeader, err = http.ParseTime(respHeaders.Get("Expires"))
- if err != nil {
- // sometimes servers will return `Expires: 0` or `Expires: -1` to
- // indicate expired content
- expiresHeader = time.Time{}
- }
- expiresHeader = expiresHeader.UTC()
- }
-
- if respHeaders.Get("Date") != "" {
- dateHeader, err = http.ParseTime(respHeaders.Get("Date"))
- if err != nil {
- return nil, time.Time{}, nil, nil, err
- }
- dateHeader = dateHeader.UTC()
- }
-
- if respHeaders.Get("Last-Modified") != "" {
- lastModifiedHeader, err = http.ParseTime(respHeaders.Get("Last-Modified"))
- if err != nil {
- return nil, time.Time{}, nil, nil, err
- }
- lastModifiedHeader = lastModifiedHeader.UTC()
- }
-
- obj := Object{
- CacheIsPrivate: privateCache,
-
- RespDirectives: respDir,
- RespHeaders: respHeaders,
- RespStatusCode: statusCode,
- RespExpiresHeader: expiresHeader,
- RespDateHeader: dateHeader,
- RespLastModifiedHeader: lastModifiedHeader,
-
- ReqDirectives: reqDir,
- ReqHeaders: reqHeaders,
- ReqMethod: reqMethod,
-
- NowUTC: time.Now().UTC(),
- }
- rv := ObjectResults{}
-
- CachableObject(&obj, &rv)
- if rv.OutErr != nil {
- return nil, time.Time{}, nil, nil, rv.OutErr
- }
-
- ExpirationObject(&obj, &rv)
- if rv.OutErr != nil {
- return nil, time.Time{}, nil, nil, rv.OutErr
- }
-
- return rv.OutReasons, rv.OutExpirationTime, rv.OutWarnings, &obj, nil
-}
-
-// calculate if a freshness directive is present: http://tools.ietf.org/html/rfc7234#section-4.2.1
-func hasFreshness(respDir *ResponseCacheDirectives, respHeaders http.Header, respExpires time.Time, privateCache bool) bool {
- if !privateCache && respDir.SMaxAge != -1 {
- return true
- }
-
- if respDir.MaxAge != -1 {
- return true
- }
-
- if !respExpires.IsZero() || respHeaders.Get("Expires") != "" {
- return true
- }
-
- return false
-}
-
-func cachableStatusCode(statusCode int) bool {
- /*
- Responses with status codes that are defined as cacheable by default
- (e.g., 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, and 501 in
- this specification) can be reused by a cache with heuristic
- expiration unless otherwise indicated by the method definition or
- explicit cache controls [RFC7234]; all other status codes are not
- cacheable by default.
- */
- switch statusCode {
- case 200:
- return true
- case 203:
- return true
- case 204:
- return true
- case 206:
- return true
- case 300:
- return true
- case 301:
- return true
- case 404:
- return true
- case 405:
- return true
- case 410:
- return true
- case 414:
- return true
- case 501:
- return true
- default:
- return false
- }
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/reasons.go b/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/reasons.go
deleted file mode 100644
index 2e75ae72ed..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/reasons.go
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package cacheobject
-
-// Repersents a potential Reason to not cache an object.
-//
-// Applications may wish to ignore specific reasons, which will make them non-RFC
-// compliant, but this type gives them specific cases they can choose to ignore,
-// making them compliant in as many cases as they can.
-type Reason int
-
-const (
-
- // The request method was POST and an Expiration header was not supplied.
- ReasonRequestMethodPOST Reason = iota
-
- // The request method was PUT and PUTs are not cachable.
- ReasonRequestMethodPUT
-
- // The request method was DELETE and DELETEs are not cachable.
- ReasonRequestMethodDELETE
-
- // The request method was CONNECT and CONNECTs are not cachable.
- ReasonRequestMethodCONNECT
-
- // The request method was OPTIONS and OPTIONS are not cachable.
- ReasonRequestMethodOPTIONS
-
- // The request method was TRACE and TRACEs are not cachable.
- ReasonRequestMethodTRACE
-
- // The request method was not recognized by cachecontrol, and should not be cached.
- ReasonRequestMethodUnknown
-
- // The request included an Cache-Control: no-store header
- ReasonRequestNoStore
-
- // The request included an Authorization header without an explicit Public or Expiration time: http://tools.ietf.org/html/rfc7234#section-3.2
- ReasonRequestAuthorizationHeader
-
- // The response included an Cache-Control: no-store header
- ReasonResponseNoStore
-
- // The response included an Cache-Control: private header and this is not a Private cache
- ReasonResponsePrivate
-
- // The response failed to meet at least one of the conditions specified in RFC 7234 section 3: http://tools.ietf.org/html/rfc7234#section-3
- ReasonResponseUncachableByDefault
-)
-
-func (r Reason) String() string {
- switch r {
- case ReasonRequestMethodPOST:
- return "ReasonRequestMethodPOST"
- case ReasonRequestMethodPUT:
- return "ReasonRequestMethodPUT"
- case ReasonRequestMethodDELETE:
- return "ReasonRequestMethodDELETE"
- case ReasonRequestMethodCONNECT:
- return "ReasonRequestMethodCONNECT"
- case ReasonRequestMethodOPTIONS:
- return "ReasonRequestMethodOPTIONS"
- case ReasonRequestMethodTRACE:
- return "ReasonRequestMethodTRACE"
- case ReasonRequestMethodUnknown:
- return "ReasonRequestMethodUnkown"
- case ReasonRequestNoStore:
- return "ReasonRequestNoStore"
- case ReasonRequestAuthorizationHeader:
- return "ReasonRequestAuthorizationHeader"
- case ReasonResponseNoStore:
- return "ReasonResponseNoStore"
- case ReasonResponsePrivate:
- return "ReasonResponsePrivate"
- case ReasonResponseUncachableByDefault:
- return "ReasonResponseUncachableByDefault"
- }
-
- panic(r)
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/warning.go b/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/warning.go
deleted file mode 100644
index 82f8941303..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/cacheobject/warning.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package cacheobject
-
-import (
- "fmt"
- "net/http"
- "time"
-)
-
-// Repersents an HTTP Warning: http://tools.ietf.org/html/rfc7234#section-5.5
-type Warning int
-
-const (
- // Response is Stale
- // A cache SHOULD generate this whenever the sent response is stale.
- WarningResponseIsStale Warning = 110
-
- // Revalidation Failed
- // A cache SHOULD generate this when sending a stale
- // response because an attempt to validate the response failed, due to an
- // inability to reach the server.
- WarningRevalidationFailed Warning = 111
-
- // Disconnected Operation
- // A cache SHOULD generate this if it is intentionally disconnected from
- // the rest of the network for a period of time.
- WarningDisconnectedOperation Warning = 112
-
- // Heuristic Expiration
- //
- // A cache SHOULD generate this if it heuristically chose a freshness
- // lifetime greater than 24 hours and the response's age is greater than
- // 24 hours.
- WarningHeuristicExpiration Warning = 113
-
- // Miscellaneous Warning
- //
- // The warning text can include arbitrary information to be presented to
- // a human user or logged. A system receiving this warning MUST NOT
- // take any automated action, besides presenting the warning to the
- // user.
- WarningMiscellaneousWarning Warning = 199
-
- // Transformation Applied
- //
- // This Warning code MUST be added by a proxy if it applies any
- // transformation to the representation, such as changing the
- // content-coding, media-type, or modifying the representation data,
- // unless this Warning code already appears in the response.
- WarningTransformationApplied Warning = 214
-
- // Miscellaneous Persistent Warning
- //
- // The warning text can include arbitrary information to be presented to
- // a human user or logged. A system receiving this warning MUST NOT
- // take any automated action.
- WarningMiscellaneousPersistentWarning Warning = 299
-)
-
-func (w Warning) HeaderString(agent string, date time.Time) string {
- if agent == "" {
- agent = "-"
- } else {
- // TODO(pquerna): this doesn't escape agent if it contains bad things.
- agent = `"` + agent + `"`
- }
- return fmt.Sprintf(`%d %s "%s" %s`, w, agent, w.String(), date.Format(http.TimeFormat))
-}
-
-func (w Warning) String() string {
- switch w {
- case WarningResponseIsStale:
- return "Response is Stale"
- case WarningRevalidationFailed:
- return "Revalidation Failed"
- case WarningDisconnectedOperation:
- return "Disconnected Operation"
- case WarningHeuristicExpiration:
- return "Heuristic Expiration"
- case WarningMiscellaneousWarning:
- // TODO(pquerna): ideally had a better way to override this one code.
- return "Miscellaneous Warning"
- case WarningTransformationApplied:
- return "Transformation Applied"
- case WarningMiscellaneousPersistentWarning:
- // TODO(pquerna): same as WarningMiscellaneousWarning
- return "Miscellaneous Persistent Warning"
- }
-
- panic(w)
-}
diff --git a/etcd/vendor/github.com/pquerna/cachecontrol/doc.go b/etcd/vendor/github.com/pquerna/cachecontrol/doc.go
deleted file mode 100644
index 3fe7ed0d68..0000000000
--- a/etcd/vendor/github.com/pquerna/cachecontrol/doc.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Copyright 2015 Paul Querna
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package cachecontrol implements the logic for HTTP Caching
-//
-// Deciding if an HTTP Response can be cached is often harder
-// and more bug prone than an actual cache storage backend.
-// cachecontrol provides a simple interface to determine if
-// request and response pairs are cachable as defined under
-// RFC 7234 http://tools.ietf.org/html/rfc7234
-package cachecontrol
diff --git a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
deleted file mode 100644
index 8d2f05500b..0000000000
--- a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "fmt"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/testutil/promlint"
-)
-
-// CollectAndLint registers the provided Collector with a newly created pedantic
-// Registry. It then calls GatherAndLint with that Registry and with the
-// provided metricNames.
-func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- return nil, fmt.Errorf("registering collector failed: %w", err)
- }
- return GatherAndLint(reg, metricNames...)
-}
-
-// GatherAndLint gathers all metrics from the provided Gatherer and checks them
-// with the linter in the promlint package. If any metricNames are provided,
-// only metrics with those names are checked.
-func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) {
- got, err := g.Gather()
- if err != nil {
- return nil, fmt.Errorf("gathering metrics failed: %w", err)
- }
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- }
- return promlint.NewWithMetricFamilies(got).Lint()
-}
diff --git a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
deleted file mode 100644
index a20f159b78..0000000000
--- a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promlint provides a linter for Prometheus metrics.
-package promlint
-
-import (
- "errors"
- "fmt"
- "io"
- "regexp"
- "sort"
- "strings"
-
- "github.com/prometheus/common/expfmt"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// A Linter is a Prometheus metrics linter. It identifies issues with metric
-// names, types, and metadata, and reports them to the caller.
-type Linter struct {
- // The linter will read metrics in the Prometheus text format from r and
- // then lint it, _and_ it will lint the metrics provided directly as
- // MetricFamily proto messages in mfs. Note, however, that the current
- // constructor functions New and NewWithMetricFamilies only ever set one
- // of them.
- r io.Reader
- mfs []*dto.MetricFamily
-}
-
-// A Problem is an issue detected by a Linter.
-type Problem struct {
- // The name of the metric indicated by this Problem.
- Metric string
-
- // A description of the issue for this Problem.
- Text string
-}
-
-// newProblem is helper function to create a Problem.
-func newProblem(mf *dto.MetricFamily, text string) Problem {
- return Problem{
- Metric: mf.GetName(),
- Text: text,
- }
-}
-
-// New creates a new Linter that reads an input stream of Prometheus metrics in
-// the Prometheus text exposition format.
-func New(r io.Reader) *Linter {
- return &Linter{
- r: r,
- }
-}
-
-// NewWithMetricFamilies creates a new Linter that reads from a slice of
-// MetricFamily protobuf messages.
-func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter {
- return &Linter{
- mfs: mfs,
- }
-}
-
-// Lint performs a linting pass, returning a slice of Problems indicating any
-// issues found in the metrics stream. The slice is sorted by metric name
-// and issue description.
-func (l *Linter) Lint() ([]Problem, error) {
- var problems []Problem
-
- if l.r != nil {
- d := expfmt.NewDecoder(l.r, expfmt.FmtText)
-
- mf := &dto.MetricFamily{}
- for {
- if err := d.Decode(mf); err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return nil, err
- }
-
- problems = append(problems, lint(mf)...)
- }
- }
- for _, mf := range l.mfs {
- problems = append(problems, lint(mf)...)
- }
-
- // Ensure deterministic output.
- sort.SliceStable(problems, func(i, j int) bool {
- if problems[i].Metric == problems[j].Metric {
- return problems[i].Text < problems[j].Text
- }
- return problems[i].Metric < problems[j].Metric
- })
-
- return problems, nil
-}
-
-// lint is the entry point for linting a single metric.
-func lint(mf *dto.MetricFamily) []Problem {
- fns := []func(mf *dto.MetricFamily) []Problem{
- lintHelp,
- lintMetricUnits,
- lintCounter,
- lintHistogramSummaryReserved,
- lintMetricTypeInName,
- lintReservedChars,
- lintCamelCase,
- lintUnitAbbreviations,
- }
-
- var problems []Problem
- for _, fn := range fns {
- problems = append(problems, fn(mf)...)
- }
-
- // TODO(mdlayher): lint rules for specific metrics types.
- return problems
-}
-
-// lintHelp detects issues related to the help text for a metric.
-func lintHelp(mf *dto.MetricFamily) []Problem {
- var problems []Problem
-
- // Expect all metrics to have help text available.
- if mf.Help == nil {
- problems = append(problems, newProblem(mf, "no help text"))
- }
-
- return problems
-}
-
-// lintMetricUnits detects issues with metric unit names.
-func lintMetricUnits(mf *dto.MetricFamily) []Problem {
- var problems []Problem
-
- unit, base, ok := metricUnits(*mf.Name)
- if !ok {
- // No known units detected.
- return nil
- }
-
- // Unit is already a base unit.
- if unit == base {
- return nil
- }
-
- problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit)))
-
- return problems
-}
-
-// lintCounter detects issues specific to counters, as well as patterns that should
-// only be used with counters.
-func lintCounter(mf *dto.MetricFamily) []Problem {
- var problems []Problem
-
- isCounter := mf.GetType() == dto.MetricType_COUNTER
- isUntyped := mf.GetType() == dto.MetricType_UNTYPED
- hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
-
- switch {
- case isCounter && !hasTotalSuffix:
- problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`))
- case !isUntyped && !isCounter && hasTotalSuffix:
- problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`))
- }
-
- return problems
-}
-
-// lintHistogramSummaryReserved detects when other types of metrics use names or labels
-// reserved for use by histograms and/or summaries.
-func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem {
- // These rules do not apply to untyped metrics.
- t := mf.GetType()
- if t == dto.MetricType_UNTYPED {
- return nil
- }
-
- var problems []Problem
-
- isHistogram := t == dto.MetricType_HISTOGRAM
- isSummary := t == dto.MetricType_SUMMARY
-
- n := mf.GetName()
-
- if !isHistogram && strings.HasSuffix(n, "_bucket") {
- problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`))
- }
- if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
- problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`))
- }
- if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
- problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`))
- }
-
- for _, m := range mf.GetMetric() {
- for _, l := range m.GetLabel() {
- ln := l.GetName()
-
- if !isHistogram && ln == "le" {
- problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`))
- }
- if !isSummary && ln == "quantile" {
- problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`))
- }
- }
- }
-
- return problems
-}
-
-// lintMetricTypeInName detects when metric types are included in the metric name.
-func lintMetricTypeInName(mf *dto.MetricFamily) []Problem {
- var problems []Problem
- n := strings.ToLower(mf.GetName())
-
- for i, t := range dto.MetricType_name {
- if i == int32(dto.MetricType_UNTYPED) {
- continue
- }
-
- typename := strings.ToLower(t)
- if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) {
- problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename)))
- }
- }
- return problems
-}
-
-// lintReservedChars detects colons in metric names.
-func lintReservedChars(mf *dto.MetricFamily) []Problem {
- var problems []Problem
- if strings.Contains(mf.GetName(), ":") {
- problems = append(problems, newProblem(mf, "metric names should not contain ':'"))
- }
- return problems
-}
-
-var camelCase = regexp.MustCompile(`[a-z][A-Z]`)
-
-// lintCamelCase detects metric names and label names written in camelCase.
-func lintCamelCase(mf *dto.MetricFamily) []Problem {
- var problems []Problem
- if camelCase.FindString(mf.GetName()) != "" {
- problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'"))
- }
-
- for _, m := range mf.GetMetric() {
- for _, l := range m.GetLabel() {
- if camelCase.FindString(l.GetName()) != "" {
- problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'"))
- }
- }
- }
- return problems
-}
-
-// lintUnitAbbreviations detects abbreviated units in the metric name.
-func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
- var problems []Problem
- n := strings.ToLower(mf.GetName())
- for _, s := range unitAbbreviations {
- if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) {
- problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units"))
- }
- }
- return problems
-}
-
-// metricUnits attempts to detect known unit types used as part of a metric name,
-// e.g. "foo_bytes_total" or "bar_baz_milligrams".
-func metricUnits(m string) (unit, base string, ok bool) {
- ss := strings.Split(m, "_")
-
- for unit, base := range units {
- // Also check for "no prefix".
- for _, p := range append(unitPrefixes, "") {
- for _, s := range ss {
- // Attempt to explicitly match a known unit with a known prefix,
- // as some words may look like "units" when matching suffix.
- //
- // As an example, "thermometers" should not match "meters", but
- // "kilometers" should.
- if s == p+unit {
- return p + unit, base, true
- }
- }
- }
- }
-
- return "", "", false
-}
-
-// Units and their possible prefixes recognized by this library. More can be
-// added over time as needed.
-var (
- // map a unit to the appropriate base unit.
- units = map[string]string{
- // Base units.
- "amperes": "amperes",
- "bytes": "bytes",
- "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases.
- "grams": "grams",
- "joules": "joules",
- "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements).
- "meters": "meters", // Both American and international spelling permitted.
- "metres": "metres",
- "seconds": "seconds",
- "volts": "volts",
-
- // Non base units.
- // Time.
- "minutes": "seconds",
- "hours": "seconds",
- "days": "seconds",
- "weeks": "seconds",
- // Temperature.
- "kelvins": "kelvin",
- "fahrenheit": "celsius",
- "rankine": "celsius",
- // Length.
- "inches": "meters",
- "yards": "meters",
- "miles": "meters",
- // Bytes.
- "bits": "bytes",
- // Energy.
- "calories": "joules",
- // Mass.
- "pounds": "grams",
- "ounces": "grams",
- }
-
- unitPrefixes = []string{
- "pico",
- "nano",
- "micro",
- "milli",
- "centi",
- "deci",
- "deca",
- "hecto",
- "kilo",
- "kibi",
- "mega",
- "mibi",
- "giga",
- "gibi",
- "tera",
- "tebi",
- "peta",
- "pebi",
- }
-
- // Common abbreviations that we'd like to discourage.
- unitAbbreviations = []string{
- "s",
- "ms",
- "us",
- "ns",
- "sec",
- "b",
- "kb",
- "mb",
- "gb",
- "tb",
- "pb",
- "m",
- "h",
- "d",
- }
-)
diff --git a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
deleted file mode 100644
index 91b83b5285..0000000000
--- a/etcd/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides helpers to test code using the prometheus package
-// of client_golang.
-//
-// While writing unit tests to verify correct instrumentation of your code, it's
-// a common mistake to mostly test the instrumentation library instead of your
-// own code. Rather than verifying that a prometheus.Counter's value has changed
-// as expected or that it shows up in the exposition after registration, it is
-// in general more robust and more faithful to the concept of unit tests to use
-// mock implementations of the prometheus.Counter and prometheus.Registerer
-// interfaces that simply assert that the Add or Register methods have been
-// called with the expected arguments. However, this might be overkill in simple
-// scenarios. The ToFloat64 function is provided for simple inspection of a
-// single-value metric, but it has to be used with caution.
-//
-// End-to-end tests to verify all or larger parts of the metrics exposition can
-// be implemented with the CollectAndCompare or GatherAndCompare functions. The
-// most appropriate use is not so much testing instrumentation of your code, but
-// testing custom prometheus.Collector implementations and in particular whole
-// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
-// and convert it into Prometheus metrics.
-//
-// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect
-// metrics that have issues with their name, type, or metadata without being
-// necessarily invalid, e.g. a counter with a name missing the “_total” suffix.
-package testutil
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "reflect"
-
- "github.com/davecgh/go-spew/spew"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/expfmt"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/internal"
-)
-
-// ToFloat64 collects all Metrics from the provided Collector. It expects that
-// this results in exactly one Metric being collected, which must be a Gauge,
-// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
-// the value of the collected Metric.
-//
-// The Collector provided is typically a simple instance of Gauge or Counter, or
-// – less commonly – a GaugeVec or CounterVec with exactly one element. But any
-// Collector fulfilling the prerequisites described above will do.
-//
-// Use this function with caution. It is computationally very expensive and thus
-// not suited at all to read values from Metrics in regular code. This is really
-// only for testing purposes, and even for testing, other approaches are often
-// more appropriate (see this package's documentation).
-//
-// A clear anti-pattern would be to use a metric type from the prometheus
-// package to track values that are also needed for something else than the
-// exposition of Prometheus metrics. For example, you would like to track the
-// number of items in a queue because your code should reject queuing further
-// items if a certain limit is reached. It is tempting to track the number of
-// items in a prometheus.Gauge, as it is then easily available as a metric for
-// exposition, too. However, then you would need to call ToFloat64 in your
-// regular code, potentially quite often. The recommended way is to track the
-// number of items conventionally (in the way you would have done it without
-// considering Prometheus metrics) and then expose the number with a
-// prometheus.GaugeFunc.
-func ToFloat64(c prometheus.Collector) float64 {
- var (
- m prometheus.Metric
- mCount int
- mChan = make(chan prometheus.Metric)
- done = make(chan struct{})
- )
-
- go func() {
- for m = range mChan {
- mCount++
- }
- close(done)
- }()
-
- c.Collect(mChan)
- close(mChan)
- <-done
-
- if mCount != 1 {
- panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
- }
-
- pb := &dto.Metric{}
- if err := m.Write(pb); err != nil {
- panic(fmt.Errorf("error happened while collecting metrics: %w", err))
- }
- if pb.Gauge != nil {
- return pb.Gauge.GetValue()
- }
- if pb.Counter != nil {
- return pb.Counter.GetValue()
- }
- if pb.Untyped != nil {
- return pb.Untyped.GetValue()
- }
- panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
-}
-
-// CollectAndCount registers the provided Collector with a newly created
-// pedantic Registry. It then calls GatherAndCount with that Registry and with
-// the provided metricNames. In the unlikely case that the registration or the
-// gathering fails, this function panics. (This is inconsistent with the other
-// CollectAnd… functions in this package and has historical reasons. Changing
-// the function signature would be a breaking change and will therefore only
-// happen with the next major version bump.)
-func CollectAndCount(c prometheus.Collector, metricNames ...string) int {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- panic(fmt.Errorf("registering collector failed: %w", err))
- }
- result, err := GatherAndCount(reg, metricNames...)
- if err != nil {
- panic(err)
- }
- return result
-}
-
-// GatherAndCount gathers all metrics from the provided Gatherer and counts
-// them. It returns the number of metric children in all gathered metric
-// families together. If any metricNames are provided, only metrics with those
-// names are counted.
-func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) {
- got, err := g.Gather()
- if err != nil {
- return 0, fmt.Errorf("gathering metrics failed: %w", err)
- }
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- }
-
- result := 0
- for _, mf := range got {
- result += len(mf.GetMetric())
- }
- return result, nil
-}
-
-// ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in
-// plain text format. Then it compares it with the results that the `expected` would return.
-// If the `metricNames` is not empty it would filter the comparison only to the given metric names.
-func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error {
- resp, err := http.Get(url)
- if err != nil {
- return fmt.Errorf("scraping metrics failed: %w", err)
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("the scraping target returned a status code other than 200: %d",
- resp.StatusCode)
- }
-
- scraped, err := convertReaderToMetricFamily(resp.Body)
- if err != nil {
- return err
- }
-
- wanted, err := convertReaderToMetricFamily(expected)
- if err != nil {
- return err
- }
-
- return compareMetricFamilies(scraped, wanted, metricNames...)
-}
-
-// CollectAndCompare registers the provided Collector with a newly created
-// pedantic Registry. It then calls GatherAndCompare with that Registry and with
-// the provided metricNames.
-func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- return fmt.Errorf("registering collector failed: %w", err)
- }
- return GatherAndCompare(reg, expected, metricNames...)
-}
-
-// GatherAndCompare gathers all metrics from the provided Gatherer and compares
-// it to an expected output read from the provided Reader in the Prometheus text
-// exposition format. If any metricNames are provided, only metrics with those
-// names are compared.
-func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
- return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...)
-}
-
-// TransactionalGatherAndCompare gathers all metrics from the provided Gatherer and compares
-// it to an expected output read from the provided Reader in the Prometheus text
-// exposition format. If any metricNames are provided, only metrics with those
-// names are compared.
-func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error {
- got, done, err := g.Gather()
- defer done()
- if err != nil {
- return fmt.Errorf("gathering metrics failed: %w", err)
- }
-
- wanted, err := convertReaderToMetricFamily(expected)
- if err != nil {
- return err
- }
-
- return compareMetricFamilies(got, wanted, metricNames...)
-}
-
-// convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of
-// dto.MetricFamily.
-func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) {
- var tp expfmt.TextParser
- notNormalized, err := tp.TextToMetricFamilies(reader)
- if err != nil {
- return nil, fmt.Errorf("converting reader to metric families failed: %w", err)
- }
-
- return internal.NormalizeMetricFamilies(notNormalized), nil
-}
-
-// compareMetricFamilies would compare 2 slices of metric families, and optionally filters both of
-// them to the `metricNames` provided.
-func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error {
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- }
-
- return compare(got, expected)
-}
-
-// compare encodes both provided slices of metric families into the text format,
-// compares their string message, and returns an error if they do not match.
-// The error contains the encoded text of both the desired and the actual
-// result.
-func compare(got, want []*dto.MetricFamily) error {
- var gotBuf, wantBuf bytes.Buffer
- enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText)
- for _, mf := range got {
- if err := enc.Encode(mf); err != nil {
- return fmt.Errorf("encoding gathered metrics failed: %w", err)
- }
- }
- enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText)
- for _, mf := range want {
- if err := enc.Encode(mf); err != nil {
- return fmt.Errorf("encoding expected metrics failed: %w", err)
- }
- }
- if diffErr := diff(wantBuf, gotBuf); diffErr != "" {
- return fmt.Errorf(diffErr)
- }
- return nil
-}
-
-// diff returns a diff of both values as long as both are of the same type and
-// are a struct, map, slice, array or string. Otherwise it returns an empty string.
-func diff(expected, actual interface{}) string {
- if expected == nil || actual == nil {
- return ""
- }
-
- et, ek := typeAndKind(expected)
- at, _ := typeAndKind(actual)
- if et != at {
- return ""
- }
-
- if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
- return ""
- }
-
- var e, a string
- c := spew.ConfigState{
- Indent: " ",
- DisablePointerAddresses: true,
- DisableCapacities: true,
- SortKeys: true,
- }
- if et != reflect.TypeOf("") {
- e = c.Sdump(expected)
- a = c.Sdump(actual)
- } else {
- e = reflect.ValueOf(expected).String()
- a = reflect.ValueOf(actual).String()
- }
-
- diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{
- A: internal.SplitLines(e),
- B: internal.SplitLines(a),
- FromFile: "metric output does not match expectation; want",
- FromDate: "",
- ToFile: "got:",
- ToDate: "",
- Context: 1,
- })
-
- if diff == "" {
- return ""
- }
-
- return "\n\nDiff:\n" + diff
-}
-
-// typeAndKind returns the type and kind of the given interface{}
-func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
- t := reflect.TypeOf(v)
- k := t.Kind()
-
- if k == reflect.Ptr {
- t = t.Elem()
- k = t.Kind()
- }
- return t, k
-}
-
-func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
- var filtered []*dto.MetricFamily
- for _, m := range metrics {
- for _, name := range names {
- if m.GetName() == name {
- filtered = append(filtered, m)
- break
- }
- }
- }
- return filtered
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/.gitignore b/etcd/vendor/github.com/robfig/cron/v3/.gitignore
deleted file mode 100644
index 00268614f0..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
diff --git a/etcd/vendor/github.com/robfig/cron/v3/.travis.yml b/etcd/vendor/github.com/robfig/cron/v3/.travis.yml
deleted file mode 100644
index 4f2ee4d973..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/etcd/vendor/github.com/robfig/cron/v3/LICENSE b/etcd/vendor/github.com/robfig/cron/v3/LICENSE
deleted file mode 100644
index 3a0f627ffe..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (C) 2012 Rob Figueiredo
-All Rights Reserved.
-
-MIT LICENSE
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/etcd/vendor/github.com/robfig/cron/v3/README.md b/etcd/vendor/github.com/robfig/cron/v3/README.md
deleted file mode 100644
index 984c537c01..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-[](http://godoc.org/github.com/robfig/cron)
-[](https://travis-ci.org/robfig/cron)
-
-# cron
-
-Cron V3 has been released!
-
-To download the specific tagged release, run:
-
- go get github.com/robfig/cron/v3@v3.0.0
-
-Import it in your program as:
-
- import "github.com/robfig/cron/v3"
-
-It requires Go 1.11 or later due to usage of Go Modules.
-
-Refer to the documentation here:
-http://godoc.org/github.com/robfig/cron
-
-The rest of this document describes the the advances in v3 and a list of
-breaking changes for users that wish to upgrade from an earlier version.
-
-## Upgrading to v3 (June 2019)
-
-cron v3 is a major upgrade to the library that addresses all outstanding bugs,
-feature requests, and rough edges. It is based on a merge of master which
-contains various fixes to issues found over the years and the v2 branch which
-contains some backwards-incompatible features like the ability to remove cron
-jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
-the timezone support, and fixes a number of bugs.
-
-New features:
-
-- Support for Go modules. Callers must now import this library as
- `github.com/robfig/cron/v3`, instead of `gopkg.in/...`
-
-- Fixed bugs:
- - 0f01e6b parser: fix combining of Dow and Dom (#70)
- - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
- - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
- - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
- - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
-
-- Standard cron spec parsing by default (first field is "minute"), with an easy
- way to opt into the seconds field (quartz-compatible). Although, note that the
- year field (optional in Quartz) is not supported.
-
-- Extensible, key/value logging via an interface that complies with
- the https://github.com/go-logr/logr project.
-
-- The new Chain & JobWrapper types allow you to install "interceptors" to add
- cross-cutting behavior like the following:
- - Recover any panics from jobs
- - Delay a job's execution if the previous run hasn't completed yet
- - Skip a job's execution if the previous run hasn't completed yet
- - Log each job's invocations
- - Notification when jobs are completed
-
-It is backwards incompatible with both v1 and v2. These updates are required:
-
-- The v1 branch accepted an optional seconds field at the beginning of the cron
- spec. This is non-standard and has led to a lot of confusion. The new default
- parser conforms to the standard as described by [the Cron wikipedia page].
-
- UPDATING: To retain the old behavior, construct your Cron with a custom
- parser:
-
- // Seconds field, required
- cron.New(cron.WithSeconds())
-
- // Seconds field, optional
- cron.New(
- cron.WithParser(
- cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
-
-- The Cron type now accepts functional options on construction rather than the
- previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
-
- UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
- updated to provide those values on construction.
-
-- CRON_TZ is now the recommended way to specify the timezone of a single
- schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
- will continue to be supported since it is unambiguous and easy to do so.
-
- UPDATING: No update is required.
-
-- By default, cron will no longer recover panics in jobs that it runs.
- Recovering can be surprising (see issue #192) and seems to be at odds with
- typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
- has been removed to accommodate the more general JobWrapper type.
-
- UPDATING: To opt into panic recovery and configure the panic logger:
-
- cron.New(cron.WithChain(
- cron.Recover(logger), // or use cron.DefaultLogger
- ))
-
-- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
- removed, since it is duplicative with the leveled logging.
-
- UPDATING: Callers should use `WithLogger` and specify a logger that does not
- discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
-
- cron.New(
- cron.WithLogger(cron.VerbosePrintfLogger(logger)))
-
-
-### Background - Cron spec format
-
-There are two cron spec formats in common usage:
-
-- The "standard" cron format, described on [the Cron wikipedia page] and used by
- the cron Linux system utility.
-
-- The cron format used by [the Quartz Scheduler], commonly used for scheduled
- jobs in Java software
-
-[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
-[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
-
-The original version of this package included an optional "seconds" field, which
-made it incompatible with both of these formats. Now, the "standard" format is
-the default format accepted, and the Quartz format is opt-in.
diff --git a/etcd/vendor/github.com/robfig/cron/v3/chain.go b/etcd/vendor/github.com/robfig/cron/v3/chain.go
deleted file mode 100644
index 9565b418e0..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/chain.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package cron
-
-import (
- "fmt"
- "runtime"
- "sync"
- "time"
-)
-
-// JobWrapper decorates the given Job with some behavior.
-type JobWrapper func(Job) Job
-
-// Chain is a sequence of JobWrappers that decorates submitted jobs with
-// cross-cutting behaviors like logging or synchronization.
-type Chain struct {
- wrappers []JobWrapper
-}
-
-// NewChain returns a Chain consisting of the given JobWrappers.
-func NewChain(c ...JobWrapper) Chain {
- return Chain{c}
-}
-
-// Then decorates the given job with all JobWrappers in the chain.
-//
-// This:
-// NewChain(m1, m2, m3).Then(job)
-// is equivalent to:
-// m1(m2(m3(job)))
-func (c Chain) Then(j Job) Job {
- for i := range c.wrappers {
- j = c.wrappers[len(c.wrappers)-i-1](j)
- }
- return j
-}
-
-// Recover panics in wrapped jobs and log them with the provided logger.
-func Recover(logger Logger) JobWrapper {
- return func(j Job) Job {
- return FuncJob(func() {
- defer func() {
- if r := recover(); r != nil {
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- err, ok := r.(error)
- if !ok {
- err = fmt.Errorf("%v", r)
- }
- logger.Error(err, "panic", "stack", "...\n"+string(buf))
- }
- }()
- j.Run()
- })
- }
-}
-
-// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
-// previous one is complete. Jobs running after a delay of more than a minute
-// have the delay logged at Info.
-func DelayIfStillRunning(logger Logger) JobWrapper {
- return func(j Job) Job {
- var mu sync.Mutex
- return FuncJob(func() {
- start := time.Now()
- mu.Lock()
- defer mu.Unlock()
- if dur := time.Since(start); dur > time.Minute {
- logger.Info("delay", "duration", dur)
- }
- j.Run()
- })
- }
-}
-
-// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
-// still running. It logs skips to the given logger at Info level.
-func SkipIfStillRunning(logger Logger) JobWrapper {
- return func(j Job) Job {
- var ch = make(chan struct{}, 1)
- ch <- struct{}{}
- return FuncJob(func() {
- select {
- case v := <-ch:
- j.Run()
- ch <- v
- default:
- logger.Info("skip")
- }
- })
- }
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/constantdelay.go b/etcd/vendor/github.com/robfig/cron/v3/constantdelay.go
deleted file mode 100644
index cd6e7b1be9..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/constantdelay.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package cron
-
-import "time"
-
-// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
-// It does not support jobs more frequent than once a second.
-type ConstantDelaySchedule struct {
- Delay time.Duration
-}
-
-// Every returns a crontab Schedule that activates once every duration.
-// Delays of less than a second are not supported (will round up to 1 second).
-// Any fields less than a Second are truncated.
-func Every(duration time.Duration) ConstantDelaySchedule {
- if duration < time.Second {
- duration = time.Second
- }
- return ConstantDelaySchedule{
- Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
- }
-}
-
-// Next returns the next time this should be run.
-// This rounds so that the next activation time will be on the second.
-func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
- return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/cron.go b/etcd/vendor/github.com/robfig/cron/v3/cron.go
deleted file mode 100644
index c7e9176658..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/cron.go
+++ /dev/null
@@ -1,355 +0,0 @@
-package cron
-
-import (
- "context"
- "sort"
- "sync"
- "time"
-)
-
-// Cron keeps track of any number of entries, invoking the associated func as
-// specified by the schedule. It may be started, stopped, and the entries may
-// be inspected while running.
-type Cron struct {
- entries []*Entry
- chain Chain
- stop chan struct{}
- add chan *Entry
- remove chan EntryID
- snapshot chan chan []Entry
- running bool
- logger Logger
- runningMu sync.Mutex
- location *time.Location
- parser ScheduleParser
- nextID EntryID
- jobWaiter sync.WaitGroup
-}
-
-// ScheduleParser is an interface for schedule spec parsers that return a Schedule
-type ScheduleParser interface {
- Parse(spec string) (Schedule, error)
-}
-
-// Job is an interface for submitted cron jobs.
-type Job interface {
- Run()
-}
-
-// Schedule describes a job's duty cycle.
-type Schedule interface {
- // Next returns the next activation time, later than the given time.
- // Next is invoked initially, and then each time the job is run.
- Next(time.Time) time.Time
-}
-
-// EntryID identifies an entry within a Cron instance
-type EntryID int
-
-// Entry consists of a schedule and the func to execute on that schedule.
-type Entry struct {
- // ID is the cron-assigned ID of this entry, which may be used to look up a
- // snapshot or remove it.
- ID EntryID
-
- // Schedule on which this job should be run.
- Schedule Schedule
-
- // Next time the job will run, or the zero time if Cron has not been
- // started or this entry's schedule is unsatisfiable
- Next time.Time
-
- // Prev is the last time this job was run, or the zero time if never.
- Prev time.Time
-
- // WrappedJob is the thing to run when the Schedule is activated.
- WrappedJob Job
-
- // Job is the thing that was submitted to cron.
- // It is kept around so that user code that needs to get at the job later,
- // e.g. via Entries() can do so.
- Job Job
-}
-
-// Valid returns true if this is not the zero entry.
-func (e Entry) Valid() bool { return e.ID != 0 }
-
-// byTime is a wrapper for sorting the entry array by time
-// (with zero time at the end).
-type byTime []*Entry
-
-func (s byTime) Len() int { return len(s) }
-func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byTime) Less(i, j int) bool {
- // Two zero times should return false.
- // Otherwise, zero is "greater" than any other time.
- // (To sort it at the end of the list.)
- if s[i].Next.IsZero() {
- return false
- }
- if s[j].Next.IsZero() {
- return true
- }
- return s[i].Next.Before(s[j].Next)
-}
-
-// New returns a new Cron job runner, modified by the given options.
-//
-// Available Settings
-//
-// Time Zone
-// Description: The time zone in which schedules are interpreted
-// Default: time.Local
-//
-// Parser
-// Description: Parser converts cron spec strings into cron.Schedules.
-// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
-//
-// Chain
-// Description: Wrap submitted jobs to customize behavior.
-// Default: A chain that recovers panics and logs them to stderr.
-//
-// See "cron.With*" to modify the default behavior.
-func New(opts ...Option) *Cron {
- c := &Cron{
- entries: nil,
- chain: NewChain(),
- add: make(chan *Entry),
- stop: make(chan struct{}),
- snapshot: make(chan chan []Entry),
- remove: make(chan EntryID),
- running: false,
- runningMu: sync.Mutex{},
- logger: DefaultLogger,
- location: time.Local,
- parser: standardParser,
- }
- for _, opt := range opts {
- opt(c)
- }
- return c
-}
-
-// FuncJob is a wrapper that turns a func() into a cron.Job
-type FuncJob func()
-
-func (f FuncJob) Run() { f() }
-
-// AddFunc adds a func to the Cron to be run on the given schedule.
-// The spec is parsed using the time zone of this Cron instance as the default.
-// An opaque ID is returned that can be used to later remove it.
-func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
- return c.AddJob(spec, FuncJob(cmd))
-}
-
-// AddJob adds a Job to the Cron to be run on the given schedule.
-// The spec is parsed using the time zone of this Cron instance as the default.
-// An opaque ID is returned that can be used to later remove it.
-func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
- schedule, err := c.parser.Parse(spec)
- if err != nil {
- return 0, err
- }
- return c.Schedule(schedule, cmd), nil
-}
-
-// Schedule adds a Job to the Cron to be run on the given schedule.
-// The job is wrapped with the configured Chain.
-func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
- c.runningMu.Lock()
- defer c.runningMu.Unlock()
- c.nextID++
- entry := &Entry{
- ID: c.nextID,
- Schedule: schedule,
- WrappedJob: c.chain.Then(cmd),
- Job: cmd,
- }
- if !c.running {
- c.entries = append(c.entries, entry)
- } else {
- c.add <- entry
- }
- return entry.ID
-}
-
-// Entries returns a snapshot of the cron entries.
-func (c *Cron) Entries() []Entry {
- c.runningMu.Lock()
- defer c.runningMu.Unlock()
- if c.running {
- replyChan := make(chan []Entry, 1)
- c.snapshot <- replyChan
- return <-replyChan
- }
- return c.entrySnapshot()
-}
-
-// Location gets the time zone location
-func (c *Cron) Location() *time.Location {
- return c.location
-}
-
-// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
-func (c *Cron) Entry(id EntryID) Entry {
- for _, entry := range c.Entries() {
- if id == entry.ID {
- return entry
- }
- }
- return Entry{}
-}
-
-// Remove an entry from being run in the future.
-func (c *Cron) Remove(id EntryID) {
- c.runningMu.Lock()
- defer c.runningMu.Unlock()
- if c.running {
- c.remove <- id
- } else {
- c.removeEntry(id)
- }
-}
-
-// Start the cron scheduler in its own goroutine, or no-op if already started.
-func (c *Cron) Start() {
- c.runningMu.Lock()
- defer c.runningMu.Unlock()
- if c.running {
- return
- }
- c.running = true
- go c.run()
-}
-
-// Run the cron scheduler, or no-op if already running.
-func (c *Cron) Run() {
- c.runningMu.Lock()
- if c.running {
- c.runningMu.Unlock()
- return
- }
- c.running = true
- c.runningMu.Unlock()
- c.run()
-}
-
-// run the scheduler.. this is private just due to the need to synchronize
-// access to the 'running' state variable.
-func (c *Cron) run() {
- c.logger.Info("start")
-
- // Figure out the next activation times for each entry.
- now := c.now()
- for _, entry := range c.entries {
- entry.Next = entry.Schedule.Next(now)
- c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
- }
-
- for {
- // Determine the next entry to run.
- sort.Sort(byTime(c.entries))
-
- var timer *time.Timer
- if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
- // If there are no entries yet, just sleep - it still handles new entries
- // and stop requests.
- timer = time.NewTimer(100000 * time.Hour)
- } else {
- timer = time.NewTimer(c.entries[0].Next.Sub(now))
- }
-
- for {
- select {
- case now = <-timer.C:
- now = now.In(c.location)
- c.logger.Info("wake", "now", now)
-
- // Run every entry whose next time was less than now
- for _, e := range c.entries {
- if e.Next.After(now) || e.Next.IsZero() {
- break
- }
- c.startJob(e.WrappedJob)
- e.Prev = e.Next
- e.Next = e.Schedule.Next(now)
- c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
- }
-
- case newEntry := <-c.add:
- timer.Stop()
- now = c.now()
- newEntry.Next = newEntry.Schedule.Next(now)
- c.entries = append(c.entries, newEntry)
- c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
-
- case replyChan := <-c.snapshot:
- replyChan <- c.entrySnapshot()
- continue
-
- case <-c.stop:
- timer.Stop()
- c.logger.Info("stop")
- return
-
- case id := <-c.remove:
- timer.Stop()
- now = c.now()
- c.removeEntry(id)
- c.logger.Info("removed", "entry", id)
- }
-
- break
- }
- }
-}
-
-// startJob runs the given job in a new goroutine.
-func (c *Cron) startJob(j Job) {
- c.jobWaiter.Add(1)
- go func() {
- defer c.jobWaiter.Done()
- j.Run()
- }()
-}
-
-// now returns current time in c location
-func (c *Cron) now() time.Time {
- return time.Now().In(c.location)
-}
-
-// Stop stops the cron scheduler if it is running; otherwise it does nothing.
-// A context is returned so the caller can wait for running jobs to complete.
-func (c *Cron) Stop() context.Context {
- c.runningMu.Lock()
- defer c.runningMu.Unlock()
- if c.running {
- c.stop <- struct{}{}
- c.running = false
- }
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- c.jobWaiter.Wait()
- cancel()
- }()
- return ctx
-}
-
-// entrySnapshot returns a copy of the current cron entry list.
-func (c *Cron) entrySnapshot() []Entry {
- var entries = make([]Entry, len(c.entries))
- for i, e := range c.entries {
- entries[i] = *e
- }
- return entries
-}
-
-func (c *Cron) removeEntry(id EntryID) {
- var entries []*Entry
- for _, e := range c.entries {
- if e.ID != id {
- entries = append(entries, e)
- }
- }
- c.entries = entries
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/doc.go b/etcd/vendor/github.com/robfig/cron/v3/doc.go
deleted file mode 100644
index fa5d08b4db..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/doc.go
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
-Package cron implements a cron spec parser and job runner.
-
-Installation
-
-To download the specific tagged release, run:
-
- go get github.com/robfig/cron/v3@v3.0.0
-
-Import it in your program as:
-
- import "github.com/robfig/cron/v3"
-
-It requires Go 1.11 or later due to usage of Go Modules.
-
-Usage
-
-Callers may register Funcs to be invoked on a given schedule. Cron will run
-them in their own goroutines.
-
- c := cron.New()
- c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
- c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
- c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
- c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
- c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
- c.Start()
- ..
- // Funcs are invoked in their own goroutine, asynchronously.
- ...
- // Funcs may also be added to a running Cron
- c.AddFunc("@daily", func() { fmt.Println("Every day") })
- ..
- // Inspect the cron job entries' next and previous run times.
- inspect(c.Entries())
- ..
- c.Stop() // Stop the scheduler (does not stop any jobs already running).
-
-CRON Expression Format
-
-A cron expression represents a set of times, using 5 space-separated fields.
-
- Field name | Mandatory? | Allowed values | Allowed special characters
- ---------- | ---------- | -------------- | --------------------------
- Minutes | Yes | 0-59 | * / , -
- Hours | Yes | 0-23 | * / , -
- Day of month | Yes | 1-31 | * / , - ?
- Month | Yes | 1-12 or JAN-DEC | * / , -
- Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
-
-Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
-"sun" are equally accepted.
-
-The specific interpretation of the format is based on the Cron Wikipedia page:
-https://en.wikipedia.org/wiki/Cron
-
-Alternative Formats
-
-Alternative Cron expression formats support other fields like seconds. You can
-implement that by creating a custom Parser as follows.
-
- cron.New(
- cron.WithParser(
- cron.NewParser(
- cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
-
-Since adding Seconds is the most common modification to the standard cron spec,
-cron provides a builtin function to do that, which is equivalent to the custom
-parser you saw earlier, except that its seconds field is REQUIRED:
-
- cron.New(cron.WithSeconds())
-
-That emulates Quartz, the most popular alternative Cron schedule format:
-http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
-
-Special Characters
-
-Asterisk ( * )
-
-The asterisk indicates that the cron expression will match for all values of the
-field; e.g., using an asterisk in the 5th field (month) would indicate every
-month.
-
-Slash ( / )
-
-Slashes are used to describe increments of ranges. For example 3-59/15 in the
-1st field (minutes) would indicate the 3rd minute of the hour and every 15
-minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
-that is, an increment over the largest possible range of the field. The form
-"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
-increment until the end of that specific range. It does not wrap around.
-
-Comma ( , )
-
-Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
-the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
-
-Hyphen ( - )
-
-Hyphens are used to define ranges. For example, 9-17 would indicate every
-hour between 9am and 5pm inclusive.
-
-Question mark ( ? )
-
-Question mark may be used instead of '*' for leaving either day-of-month or
-day-of-week blank.
-
-Predefined schedules
-
-You may use one of several pre-defined schedules in place of a cron expression.
-
- Entry | Description | Equivalent To
- ----- | ----------- | -------------
- @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
- @monthly | Run once a month, midnight, first of month | 0 0 1 * *
- @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
- @daily (or @midnight) | Run once a day, midnight | 0 0 * * *
- @hourly | Run once an hour, beginning of hour | 0 * * * *
-
-Intervals
-
-You may also schedule a job to execute at fixed intervals, starting at the time it's added
-or cron is run. This is supported by formatting the cron spec like this:
-
- @every
-
-where "duration" is a string accepted by time.ParseDuration
-(http://golang.org/pkg/time/#ParseDuration).
-
-For example, "@every 1h30m10s" would indicate a schedule that activates after
-1 hour, 30 minutes, 10 seconds, and then every interval after that.
-
-Note: The interval does not take the job runtime into account. For example,
-if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
-it will have only 2 minutes of idle time between each run.
-
-Time zones
-
-By default, all interpretation and scheduling is done in the machine's local
-time zone (time.Local). You can specify a different time zone on construction:
-
- cron.New(
- cron.WithLocation(time.UTC))
-
-Individual cron schedules may also override the time zone they are to be
-interpreted in by providing an additional space-separated field at the beginning
-of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
-
-For example:
-
- # Runs at 6am in time.Local
- cron.New().AddFunc("0 6 * * ?", ...)
-
- # Runs at 6am in America/New_York
- nyc, _ := time.LoadLocation("America/New_York")
- c := cron.New(cron.WithLocation(nyc))
- c.AddFunc("0 6 * * ?", ...)
-
- # Runs at 6am in Asia/Tokyo
- cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
-
- # Runs at 6am in Asia/Tokyo
- c := cron.New(cron.WithLocation(nyc))
- c.SetLocation("America/New_York")
- c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
-
-The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
-
-Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
-not be run!
-
-Job Wrappers
-
-A Cron runner may be configured with a chain of job wrappers to add
-cross-cutting functionality to all submitted jobs. For example, they may be used
-to achieve the following effects:
-
- - Recover any panics from jobs (activated by default)
- - Delay a job's execution if the previous run hasn't completed yet
- - Skip a job's execution if the previous run hasn't completed yet
- - Log each job's invocations
-
-Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
-
- cron.New(cron.WithChain(
- cron.SkipIfStillRunning(logger),
- ))
-
-Install wrappers for individual jobs by explicitly wrapping them:
-
- job = cron.NewChain(
- cron.SkipIfStillRunning(logger),
- ).Then(job)
-
-Thread safety
-
-Since the Cron service runs concurrently with the calling code, some amount of
-care must be taken to ensure proper synchronization.
-
-All cron methods are designed to be correctly synchronized as long as the caller
-ensures that invocations have a clear happens-before ordering between them.
-
-Logging
-
-Cron defines a Logger interface that is a subset of the one defined in
-github.com/go-logr/logr. It has two logging levels (Info and Error), and
-parameters are key/value pairs. This makes it possible for cron logging to plug
-into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
-to wrap the standard library *log.Logger.
-
-For additional insight into Cron operations, verbose logging may be activated
-which will record job runs, scheduling decisions, and added or removed jobs.
-Activate it with a one-off logger as follows:
-
- cron.New(
- cron.WithLogger(
- cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
-
-
-Implementation
-
-Cron entries are stored in an array, sorted by their next activation time. Cron
-sleeps until the next job is due to be run.
-
-Upon waking:
- - it runs each entry that is active on that second
- - it calculates the next run times for the jobs that were run
- - it re-sorts the array of entries by next activation time.
- - it goes to sleep until the soonest job.
-*/
-package cron
diff --git a/etcd/vendor/github.com/robfig/cron/v3/logger.go b/etcd/vendor/github.com/robfig/cron/v3/logger.go
deleted file mode 100644
index b4efcc0535..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/logger.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package cron
-
-import (
- "io/ioutil"
- "log"
- "os"
- "strings"
- "time"
-)
-
-// DefaultLogger is used by Cron if none is specified.
-var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
-
-// DiscardLogger can be used by callers to discard all log messages.
-var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
-
-// Logger is the interface used in this package for logging, so that any backend
-// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
-type Logger interface {
- // Info logs routine messages about cron's operation.
- Info(msg string, keysAndValues ...interface{})
- // Error logs an error condition.
- Error(err error, msg string, keysAndValues ...interface{})
-}
-
-// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
-// into an implementation of the Logger interface which logs errors only.
-func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
- return printfLogger{l, false}
-}
-
-// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
-// "log") into an implementation of the Logger interface which logs everything.
-func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
- return printfLogger{l, true}
-}
-
-type printfLogger struct {
- logger interface{ Printf(string, ...interface{}) }
- logInfo bool
-}
-
-func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
- if pl.logInfo {
- keysAndValues = formatTimes(keysAndValues)
- pl.logger.Printf(
- formatString(len(keysAndValues)),
- append([]interface{}{msg}, keysAndValues...)...)
- }
-}
-
-func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
- keysAndValues = formatTimes(keysAndValues)
- pl.logger.Printf(
- formatString(len(keysAndValues)+2),
- append([]interface{}{msg, "error", err}, keysAndValues...)...)
-}
-
-// formatString returns a logfmt-like format string for the number of
-// key/values.
-func formatString(numKeysAndValues int) string {
- var sb strings.Builder
- sb.WriteString("%s")
- if numKeysAndValues > 0 {
- sb.WriteString(", ")
- }
- for i := 0; i < numKeysAndValues/2; i++ {
- if i > 0 {
- sb.WriteString(", ")
- }
- sb.WriteString("%v=%v")
- }
- return sb.String()
-}
-
-// formatTimes formats any time.Time values as RFC3339.
-func formatTimes(keysAndValues []interface{}) []interface{} {
- var formattedArgs []interface{}
- for _, arg := range keysAndValues {
- if t, ok := arg.(time.Time); ok {
- arg = t.Format(time.RFC3339)
- }
- formattedArgs = append(formattedArgs, arg)
- }
- return formattedArgs
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/option.go b/etcd/vendor/github.com/robfig/cron/v3/option.go
deleted file mode 100644
index 09e4278e77..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/option.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package cron
-
-import (
- "time"
-)
-
-// Option represents a modification to the default behavior of a Cron.
-type Option func(*Cron)
-
-// WithLocation overrides the timezone of the cron instance.
-func WithLocation(loc *time.Location) Option {
- return func(c *Cron) {
- c.location = loc
- }
-}
-
-// WithSeconds overrides the parser used for interpreting job schedules to
-// include a seconds field as the first one.
-func WithSeconds() Option {
- return WithParser(NewParser(
- Second | Minute | Hour | Dom | Month | Dow | Descriptor,
- ))
-}
-
-// WithParser overrides the parser used for interpreting job schedules.
-func WithParser(p ScheduleParser) Option {
- return func(c *Cron) {
- c.parser = p
- }
-}
-
-// WithChain specifies Job wrappers to apply to all jobs added to this cron.
-// Refer to the Chain* functions in this package for provided wrappers.
-func WithChain(wrappers ...JobWrapper) Option {
- return func(c *Cron) {
- c.chain = NewChain(wrappers...)
- }
-}
-
-// WithLogger uses the provided logger.
-func WithLogger(logger Logger) Option {
- return func(c *Cron) {
- c.logger = logger
- }
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/parser.go b/etcd/vendor/github.com/robfig/cron/v3/parser.go
deleted file mode 100644
index 3cf8879f7e..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/parser.go
+++ /dev/null
@@ -1,434 +0,0 @@
-package cron
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// Configuration options for creating a parser. Most options specify which
-// fields should be included, while others enable features. If a field is not
-// included the parser will assume a default value. These options do not change
-// the order fields are parse in.
-type ParseOption int
-
-const (
- Second ParseOption = 1 << iota // Seconds field, default 0
- SecondOptional // Optional seconds field, default 0
- Minute // Minutes field, default 0
- Hour // Hours field, default 0
- Dom // Day of month field, default *
- Month // Month field, default *
- Dow // Day of week field, default *
- DowOptional // Optional day of week field, default *
- Descriptor // Allow descriptors such as @monthly, @weekly, etc.
-)
-
-var places = []ParseOption{
- Second,
- Minute,
- Hour,
- Dom,
- Month,
- Dow,
-}
-
-var defaults = []string{
- "0",
- "0",
- "0",
- "*",
- "*",
- "*",
-}
-
-// A custom Parser that can be configured.
-type Parser struct {
- options ParseOption
-}
-
-// NewParser creates a Parser with custom options.
-//
-// It panics if more than one Optional is given, since it would be impossible to
-// correctly infer which optional is provided or missing in general.
-//
-// Examples
-//
-// // Standard parser without descriptors
-// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
-// sched, err := specParser.Parse("0 0 15 */3 *")
-//
-// // Same as above, just excludes time fields
-// subsParser := NewParser(Dom | Month | Dow)
-// sched, err := specParser.Parse("15 */3 *")
-//
-// // Same as above, just makes Dow optional
-// subsParser := NewParser(Dom | Month | DowOptional)
-// sched, err := specParser.Parse("15 */3")
-//
-func NewParser(options ParseOption) Parser {
- optionals := 0
- if options&DowOptional > 0 {
- optionals++
- }
- if options&SecondOptional > 0 {
- optionals++
- }
- if optionals > 1 {
- panic("multiple optionals may not be configured")
- }
- return Parser{options}
-}
-
-// Parse returns a new crontab schedule representing the given spec.
-// It returns a descriptive error if the spec is not valid.
-// It accepts crontab specs and features configured by NewParser.
-func (p Parser) Parse(spec string) (Schedule, error) {
- if len(spec) == 0 {
- return nil, fmt.Errorf("empty spec string")
- }
-
- // Extract timezone if present
- var loc = time.Local
- if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
- var err error
- i := strings.Index(spec, " ")
- eq := strings.Index(spec, "=")
- if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
- return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
- }
- spec = strings.TrimSpace(spec[i:])
- }
-
- // Handle named schedules (descriptors), if configured
- if strings.HasPrefix(spec, "@") {
- if p.options&Descriptor == 0 {
- return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
- }
- return parseDescriptor(spec, loc)
- }
-
- // Split on whitespace.
- fields := strings.Fields(spec)
-
- // Validate & fill in any omitted or optional fields
- var err error
- fields, err = normalizeFields(fields, p.options)
- if err != nil {
- return nil, err
- }
-
- field := func(field string, r bounds) uint64 {
- if err != nil {
- return 0
- }
- var bits uint64
- bits, err = getField(field, r)
- return bits
- }
-
- var (
- second = field(fields[0], seconds)
- minute = field(fields[1], minutes)
- hour = field(fields[2], hours)
- dayofmonth = field(fields[3], dom)
- month = field(fields[4], months)
- dayofweek = field(fields[5], dow)
- )
- if err != nil {
- return nil, err
- }
-
- return &SpecSchedule{
- Second: second,
- Minute: minute,
- Hour: hour,
- Dom: dayofmonth,
- Month: month,
- Dow: dayofweek,
- Location: loc,
- }, nil
-}
-
-// normalizeFields takes a subset set of the time fields and returns the full set
-// with defaults (zeroes) populated for unset fields.
-//
-// As part of performing this function, it also validates that the provided
-// fields are compatible with the configured options.
-func normalizeFields(fields []string, options ParseOption) ([]string, error) {
- // Validate optionals & add their field to options
- optionals := 0
- if options&SecondOptional > 0 {
- options |= Second
- optionals++
- }
- if options&DowOptional > 0 {
- options |= Dow
- optionals++
- }
- if optionals > 1 {
- return nil, fmt.Errorf("multiple optionals may not be configured")
- }
-
- // Figure out how many fields we need
- max := 0
- for _, place := range places {
- if options&place > 0 {
- max++
- }
- }
- min := max - optionals
-
- // Validate number of fields
- if count := len(fields); count < min || count > max {
- if min == max {
- return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
- }
- return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
- }
-
- // Populate the optional field if not provided
- if min < max && len(fields) == min {
- switch {
- case options&DowOptional > 0:
- fields = append(fields, defaults[5]) // TODO: improve access to default
- case options&SecondOptional > 0:
- fields = append([]string{defaults[0]}, fields...)
- default:
- return nil, fmt.Errorf("unknown optional field")
- }
- }
-
- // Populate all fields not part of options with their defaults
- n := 0
- expandedFields := make([]string, len(places))
- copy(expandedFields, defaults)
- for i, place := range places {
- if options&place > 0 {
- expandedFields[i] = fields[n]
- n++
- }
- }
- return expandedFields, nil
-}
-
-var standardParser = NewParser(
- Minute | Hour | Dom | Month | Dow | Descriptor,
-)
-
-// ParseStandard returns a new crontab schedule representing the given
-// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
-// representing: minute, hour, day of month, month and day of week, in that
-// order. It returns a descriptive error if the spec is not valid.
-//
-// It accepts
-// - Standard crontab specs, e.g. "* * * * ?"
-// - Descriptors, e.g. "@midnight", "@every 1h30m"
-func ParseStandard(standardSpec string) (Schedule, error) {
- return standardParser.Parse(standardSpec)
-}
-
-// getField returns an Int with the bits set representing all of the times that
-// the field represents or error parsing field value. A "field" is a comma-separated
-// list of "ranges".
-func getField(field string, r bounds) (uint64, error) {
- var bits uint64
- ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
- for _, expr := range ranges {
- bit, err := getRange(expr, r)
- if err != nil {
- return bits, err
- }
- bits |= bit
- }
- return bits, nil
-}
-
-// getRange returns the bits indicated by the given expression:
-// number | number "-" number [ "/" number ]
-// or error parsing range.
-func getRange(expr string, r bounds) (uint64, error) {
- var (
- start, end, step uint
- rangeAndStep = strings.Split(expr, "/")
- lowAndHigh = strings.Split(rangeAndStep[0], "-")
- singleDigit = len(lowAndHigh) == 1
- err error
- )
-
- var extra uint64
- if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
- start = r.min
- end = r.max
- extra = starBit
- } else {
- start, err = parseIntOrName(lowAndHigh[0], r.names)
- if err != nil {
- return 0, err
- }
- switch len(lowAndHigh) {
- case 1:
- end = start
- case 2:
- end, err = parseIntOrName(lowAndHigh[1], r.names)
- if err != nil {
- return 0, err
- }
- default:
- return 0, fmt.Errorf("too many hyphens: %s", expr)
- }
- }
-
- switch len(rangeAndStep) {
- case 1:
- step = 1
- case 2:
- step, err = mustParseInt(rangeAndStep[1])
- if err != nil {
- return 0, err
- }
-
- // Special handling: "N/step" means "N-max/step".
- if singleDigit {
- end = r.max
- }
- if step > 1 {
- extra = 0
- }
- default:
- return 0, fmt.Errorf("too many slashes: %s", expr)
- }
-
- if start < r.min {
- return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
- }
- if end > r.max {
- return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
- }
- if start > end {
- return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
- }
- if step == 0 {
- return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
- }
-
- return getBits(start, end, step) | extra, nil
-}
-
-// parseIntOrName returns the (possibly-named) integer contained in expr.
-func parseIntOrName(expr string, names map[string]uint) (uint, error) {
- if names != nil {
- if namedInt, ok := names[strings.ToLower(expr)]; ok {
- return namedInt, nil
- }
- }
- return mustParseInt(expr)
-}
-
-// mustParseInt parses the given expression as an int or returns an error.
-func mustParseInt(expr string) (uint, error) {
- num, err := strconv.Atoi(expr)
- if err != nil {
- return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
- }
- if num < 0 {
- return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
- }
-
- return uint(num), nil
-}
-
-// getBits sets all bits in the range [min, max], modulo the given step size.
-func getBits(min, max, step uint) uint64 {
- var bits uint64
-
- // If step is 1, use shifts.
- if step == 1 {
- return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
- }
-
- // Else, use a simple loop.
- for i := min; i <= max; i += step {
- bits |= 1 << i
- }
- return bits
-}
-
-// all returns all bits within the given bounds. (plus the star bit)
-func all(r bounds) uint64 {
- return getBits(r.min, r.max, 1) | starBit
-}
-
-// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
-func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
- switch descriptor {
- case "@yearly", "@annually":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: 1 << dom.min,
- Month: 1 << months.min,
- Dow: all(dow),
- Location: loc,
- }, nil
-
- case "@monthly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: 1 << dom.min,
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }, nil
-
- case "@weekly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: all(dom),
- Month: all(months),
- Dow: 1 << dow.min,
- Location: loc,
- }, nil
-
- case "@daily", "@midnight":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: all(dom),
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }, nil
-
- case "@hourly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: all(hours),
- Dom: all(dom),
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }, nil
-
- }
-
- const every = "@every "
- if strings.HasPrefix(descriptor, every) {
- duration, err := time.ParseDuration(descriptor[len(every):])
- if err != nil {
- return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
- }
- return Every(duration), nil
- }
-
- return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
-}
diff --git a/etcd/vendor/github.com/robfig/cron/v3/spec.go b/etcd/vendor/github.com/robfig/cron/v3/spec.go
deleted file mode 100644
index fa1e241e5f..0000000000
--- a/etcd/vendor/github.com/robfig/cron/v3/spec.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package cron
-
-import "time"
-
-// SpecSchedule specifies a duty cycle (to the second granularity), based on a
-// traditional crontab specification. It is computed initially and stored as bit sets.
-type SpecSchedule struct {
- Second, Minute, Hour, Dom, Month, Dow uint64
-
- // Override location for this schedule.
- Location *time.Location
-}
-
-// bounds provides a range of acceptable values (plus a map of name to value).
-type bounds struct {
- min, max uint
- names map[string]uint
-}
-
-// The bounds for each field.
-var (
- seconds = bounds{0, 59, nil}
- minutes = bounds{0, 59, nil}
- hours = bounds{0, 23, nil}
- dom = bounds{1, 31, nil}
- months = bounds{1, 12, map[string]uint{
- "jan": 1,
- "feb": 2,
- "mar": 3,
- "apr": 4,
- "may": 5,
- "jun": 6,
- "jul": 7,
- "aug": 8,
- "sep": 9,
- "oct": 10,
- "nov": 11,
- "dec": 12,
- }}
- dow = bounds{0, 6, map[string]uint{
- "sun": 0,
- "mon": 1,
- "tue": 2,
- "wed": 3,
- "thu": 4,
- "fri": 5,
- "sat": 6,
- }}
-)
-
-const (
- // Set the top bit if a star was included in the expression.
- starBit = 1 << 63
-)
-
-// Next returns the next time this schedule is activated, greater than the given
-// time. If no time can be found to satisfy the schedule, return the zero time.
-func (s *SpecSchedule) Next(t time.Time) time.Time {
- // General approach
- //
- // For Month, Day, Hour, Minute, Second:
- // Check if the time value matches. If yes, continue to the next field.
- // If the field doesn't match the schedule, then increment the field until it matches.
- // While incrementing the field, a wrap-around brings it back to the beginning
- // of the field list (since it is necessary to re-verify previous field
- // values)
-
- // Convert the given time into the schedule's timezone, if one is specified.
- // Save the original timezone so we can convert back after we find a time.
- // Note that schedules without a time zone specified (time.Local) are treated
- // as local to the time provided.
- origLocation := t.Location()
- loc := s.Location
- if loc == time.Local {
- loc = t.Location()
- }
- if s.Location != time.Local {
- t = t.In(s.Location)
- }
-
- // Start at the earliest possible time (the upcoming second).
- t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
-
- // This flag indicates whether a field has been incremented.
- added := false
-
- // If no time is found within five years, return zero.
- yearLimit := t.Year() + 5
-
-WRAP:
- if t.Year() > yearLimit {
- return time.Time{}
- }
-
- // Find the first applicable month.
- // If it's this month, then do nothing.
- for 1< 12 {
- t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
- } else {
- t = t.Add(time.Duration(-t.Hour()) * time.Hour)
- }
- }
-
- if t.Day() == 1 {
- goto WRAP
- }
- }
-
- for 1< 0
- dowMatch bool = 1< 0
- )
- if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
- return domMatch && dowMatch
- }
- return domMatch || dowMatch
-}
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/.gitignore b/etcd/vendor/github.com/stoewer/go-strcase/.gitignore
deleted file mode 100644
index db5247b944..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/.gitignore
+++ /dev/null
@@ -1,17 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-vendor
-doc
-
-# Temporary files
-*~
-*.swp
-
-# Editor and IDE config
-.idea
-*.iml
-.vscode
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/.golangci.yml b/etcd/vendor/github.com/stoewer/go-strcase/.golangci.yml
deleted file mode 100644
index 7f98d55c42..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/.golangci.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-run:
- deadline: 10m
-
-linters:
- enable:
- - dupl
- - goconst
- - gocyclo
- - godox
- - gosec
- - interfacer
- - lll
- - maligned
- - misspell
- - prealloc
- - stylecheck
- - unconvert
- - unparam
- - errcheck
- - golint
- - gofmt
- disable: []
- fast: false
-
-issues:
- exclude-use-default: false
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/LICENSE b/etcd/vendor/github.com/stoewer/go-strcase/LICENSE
deleted file mode 100644
index a105a3819a..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017, Adrian Stoewer
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/README.md b/etcd/vendor/github.com/stoewer/go-strcase/README.md
deleted file mode 100644
index 0e8635d801..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-[](https://circleci.com/gh/stoewer/go-strcase/tree/master)
-[](https://codecov.io/gh/stoewer/go-strcase)
-[](https://pkg.go.dev/github.com/stoewer/go-strcase)
----
-
-Go strcase
-==========
-
-The package `strcase` converts between different kinds of naming formats such as camel case
-(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`).
-The package is designed to work only with strings consisting of standard ASCII letters.
-Unicode is currently not supported.
-
-Versioning and stability
-------------------------
-
-Although the master branch is supposed to remain always backward compatible, the repository
-contains version tags in order to support vendoring tools.
-The tag names follow semantic versioning conventions and have the following format `v1.0.0`.
-This package supports Go modules introduced with version 1.11.
-
-Example
--------
-
-```go
-import "github.com/stoewer/go-strcase"
-
-var snake = strcase.SnakeCase("CamelCase")
-```
-
-Dependencies
-------------
-
-### Build dependencies
-
-* none
-
-### Test dependencies
-
-* `github.com/stretchr/testify`
-
-Run linters and unit tests
---------------------------
-
-To run the static code analysis, linters and tests use the following commands:
-
-```
-golangci-lint run --config .golangci.yml ./...
-go test ./...
-```
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/camel.go b/etcd/vendor/github.com/stoewer/go-strcase/camel.go
deleted file mode 100644
index 5c233cc8f1..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/camel.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2017, A. Stoewer
-// All rights reserved.
-
-package strcase
-
-import (
- "strings"
-)
-
-// UpperCamelCase converts a string into camel case starting with a upper case letter.
-func UpperCamelCase(s string) string {
- return camelCase(s, true)
-}
-
-// LowerCamelCase converts a string into camel case starting with a lower case letter.
-func LowerCamelCase(s string) string {
- return camelCase(s, false)
-}
-
-func camelCase(s string, upper bool) string {
- s = strings.TrimSpace(s)
- buffer := make([]rune, 0, len(s))
-
- stringIter(s, func(prev, curr, next rune) {
- if !isDelimiter(curr) {
- if isDelimiter(prev) || (upper && prev == 0) {
- buffer = append(buffer, toUpper(curr))
- } else if isLower(prev) {
- buffer = append(buffer, curr)
- } else {
- buffer = append(buffer, toLower(curr))
- }
- }
- })
-
- return string(buffer)
-}
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/doc.go b/etcd/vendor/github.com/stoewer/go-strcase/doc.go
deleted file mode 100644
index 3e441ca3ef..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright (c) 2017, A. Stoewer
-// All rights reserved.
-
-// Package strcase converts between different kinds of naming formats such as camel case
-// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed
-// to work only with strings consisting of standard ASCII letters. Unicode is currently not
-// supported.
-package strcase
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/helper.go b/etcd/vendor/github.com/stoewer/go-strcase/helper.go
deleted file mode 100644
index ecad589143..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/helper.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2017, A. Stoewer
-// All rights reserved.
-
-package strcase
-
-// isLower checks if a character is lower case. More precisely it evaluates if it is
-// in the range of ASCII character 'a' to 'z'.
-func isLower(ch rune) bool {
- return ch >= 'a' && ch <= 'z'
-}
-
-// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower
-// case counterpart. Other characters remain the same.
-func toLower(ch rune) rune {
- if ch >= 'A' && ch <= 'Z' {
- return ch + 32
- }
- return ch
-}
-
-// isLower checks if a character is upper case. More precisely it evaluates if it is
-// in the range of ASCII characters 'A' to 'Z'.
-func isUpper(ch rune) bool {
- return ch >= 'A' && ch <= 'Z'
-}
-
-// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower
-// case counterpart. Other characters remain the same.
-func toUpper(ch rune) rune {
- if ch >= 'a' && ch <= 'z' {
- return ch - 32
- }
- return ch
-}
-
-// isSpace checks if a character is some kind of whitespace.
-func isSpace(ch rune) bool {
- return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
-}
-
-// isDelimiter checks if a character is some kind of whitespace or '_' or '-'.
-func isDelimiter(ch rune) bool {
- return ch == '-' || ch == '_' || isSpace(ch)
-}
-
-// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the
-// rune at the respective string position as well as the previous and the next rune. If curr is at the
-// first position of the string prev is zero. If curr is at the end of the string next is zero.
-type iterFunc func(prev, curr, next rune)
-
-// stringIter iterates over a string, invoking the callback for every single rune in the string.
-func stringIter(s string, callback iterFunc) {
- var prev rune
- var curr rune
- for _, next := range s {
- if curr == 0 {
- prev = curr
- curr = next
- continue
- }
-
- callback(prev, curr, next)
-
- prev = curr
- curr = next
- }
-
- if len(s) > 0 {
- callback(prev, curr, 0)
- }
-}
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/kebab.go b/etcd/vendor/github.com/stoewer/go-strcase/kebab.go
deleted file mode 100644
index e9a6487579..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/kebab.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) 2017, A. Stoewer
-// All rights reserved.
-
-package strcase
-
-// KebabCase converts a string into kebab case.
-func KebabCase(s string) string {
- return delimiterCase(s, '-', false)
-}
-
-// UpperKebabCase converts a string into kebab case with capital letters.
-func UpperKebabCase(s string) string {
- return delimiterCase(s, '-', true)
-}
diff --git a/etcd/vendor/github.com/stoewer/go-strcase/snake.go b/etcd/vendor/github.com/stoewer/go-strcase/snake.go
deleted file mode 100644
index 1b216e20cf..0000000000
--- a/etcd/vendor/github.com/stoewer/go-strcase/snake.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2017, A. Stoewer
-// All rights reserved.
-
-package strcase
-
-import (
- "strings"
-)
-
-// SnakeCase converts a string into snake case.
-func SnakeCase(s string) string {
- return delimiterCase(s, '_', false)
-}
-
-// UpperSnakeCase converts a string into snake case with capital letters.
-func UpperSnakeCase(s string) string {
- return delimiterCase(s, '_', true)
-}
-
-// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed
-// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE.
-func delimiterCase(s string, delimiter rune, upperCase bool) string {
- s = strings.TrimSpace(s)
- buffer := make([]rune, 0, len(s)+3)
-
- adjustCase := toLower
- if upperCase {
- adjustCase = toUpper
- }
-
- var prev rune
- var curr rune
- for _, next := range s {
- if isDelimiter(curr) {
- if !isDelimiter(prev) {
- buffer = append(buffer, delimiter)
- }
- } else if isUpper(curr) {
- if isLower(prev) || (isUpper(prev) && isLower(next)) {
- buffer = append(buffer, delimiter)
- }
- buffer = append(buffer, adjustCase(curr))
- } else if curr != 0 {
- buffer = append(buffer, adjustCase(curr))
- }
- prev = curr
- curr = next
- }
-
- if len(s) > 0 {
- if isUpper(curr) && isLower(prev) && prev != 0 {
- buffer = append(buffer, delimiter)
- }
- buffer = append(buffer, adjustCase(curr))
- }
-
- return string(buffer)
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
deleted file mode 100644
index 92b8cf73c9..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
-// Please be careful of intitialization order - for example, if you change
-// the global propagator, the DefaultClient might still be using the old one.
-var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
-
-// Get is a convenient replacement for http.Get that adds a span around the request.
-func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Head is a convenient replacement for http.Head that adds a span around the request.
-func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Post is a convenient replacement for http.Post that adds a span around the request.
-func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", contentType)
- return DefaultClient.Do(req)
-}
-
-// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
-func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
- return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
deleted file mode 100644
index 728be09d0e..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Attribute keys that can be added to a span.
-const (
- ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
- ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
- WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
- WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
-)
-
-// Server HTTP metrics.
-const (
- RequestCount = "http.server.request_count" // Incoming request count total
- RequestContentLength = "http.server.request_content_length" // Incoming request bytes total
- ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total
- ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds
-)
-
-// Filter is a predicate used to determine whether a given http.request should
-// be traced. A Filter must return true if the request should be traced.
-type Filter func(*http.Request) bool
-
-func newTracer(tp trace.TracerProvider) trace.Tracer {
- return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion()))
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
deleted file mode 100644
index d0337f3a5e..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "net/http"
- "net/http/httptrace"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/global"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-const (
- instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-)
-
-// config represents the configuration options available for the http.Handler
-// and http.Transport types.
-type config struct {
- Tracer trace.Tracer
- Meter metric.Meter
- Propagators propagation.TextMapPropagator
- SpanStartOptions []trace.SpanStartOption
- PublicEndpoint bool
- PublicEndpointFn func(*http.Request) bool
- ReadEvent bool
- WriteEvent bool
- Filters []Filter
- SpanNameFormatter func(string, *http.Request) string
- ClientTrace func(context.Context) *httptrace.ClientTrace
-
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
-}
-
-// Option interface used for setting optional config properties.
-type Option interface {
- apply(*config)
-}
-
-type optionFunc func(*config)
-
-func (o optionFunc) apply(c *config) {
- o(c)
-}
-
-// newConfig creates a new config struct and applies opts to it.
-func newConfig(opts ...Option) *config {
- c := &config{
- Propagators: otel.GetTextMapPropagator(),
- MeterProvider: global.MeterProvider(),
- }
- for _, opt := range opts {
- opt.apply(c)
- }
-
- // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
- if c.TracerProvider != nil {
- c.Tracer = newTracer(c.TracerProvider)
- }
-
- c.Meter = c.MeterProvider.Meter(
- instrumentationName,
- metric.WithInstrumentationVersion(SemVersion()),
- )
-
- return c
-}
-
-// WithTracerProvider specifies a tracer provider to use for creating a tracer.
-// If none is specified, the global provider is used.
-func WithTracerProvider(provider trace.TracerProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.TracerProvider = provider
- }
- })
-}
-
-// WithMeterProvider specifies a meter provider to use for creating a meter.
-// If none is specified, the global provider is used.
-func WithMeterProvider(provider metric.MeterProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.MeterProvider = provider
- }
- })
-}
-
-// WithPublicEndpoint configures the Handler to link the span with an incoming
-// span context. If this option is not provided, then the association is a child
-// association instead of a link.
-func WithPublicEndpoint() Option {
- return optionFunc(func(c *config) {
- c.PublicEndpoint = true
- })
-}
-
-// WithPublicEndpointFn runs with every request, and allows conditionnally
-// configuring the Handler to link the span with an incoming span context. If
-// this option is not provided or returns false, then the association is a
-// child association instead of a link.
-// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
-func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
- return optionFunc(func(c *config) {
- c.PublicEndpointFn = fn
- })
-}
-
-// WithPropagators configures specific propagators. If this
-// option isn't specified, then the global TextMapPropagator is used.
-func WithPropagators(ps propagation.TextMapPropagator) Option {
- return optionFunc(func(c *config) {
- if ps != nil {
- c.Propagators = ps
- }
- })
-}
-
-// WithSpanOptions configures an additional set of
-// trace.SpanOptions, which are applied to each new span.
-func WithSpanOptions(opts ...trace.SpanStartOption) Option {
- return optionFunc(func(c *config) {
- c.SpanStartOptions = append(c.SpanStartOptions, opts...)
- })
-}
-
-// WithFilter adds a filter to the list of filters used by the handler.
-// If any filter indicates to exclude a request then the request will not be
-// traced. All filters must allow a request to be traced for a Span to be created.
-// If no filters are provided then all requests are traced.
-// Filters will be invoked for each processed request, it is advised to make them
-// simple and fast.
-func WithFilter(f Filter) Option {
- return optionFunc(func(c *config) {
- c.Filters = append(c.Filters, f)
- })
-}
-
-type event int
-
-// Different types of events that can be recorded, see WithMessageEvents.
-const (
- ReadEvents event = iota
- WriteEvents
-)
-
-// WithMessageEvents configures the Handler to record the specified events
-// (span.AddEvent) on spans. By default only summary attributes are added at the
-// end of the request.
-//
-// Valid events are:
-// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
-// using the ReadBytesKey
-// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
-// using the WriteBytesKey
-func WithMessageEvents(events ...event) Option {
- return optionFunc(func(c *config) {
- for _, e := range events {
- switch e {
- case ReadEvents:
- c.ReadEvent = true
- case WriteEvents:
- c.WriteEvent = true
- }
- }
- })
-}
-
-// WithSpanNameFormatter takes a function that will be called on every
-// request and the returned string will become the Span Name.
-func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
- return optionFunc(func(c *config) {
- c.SpanNameFormatter = f
- })
-}
-
-// WithClientTrace takes a function that returns client trace instance that will be
-// applied to the requests sent through the otelhttp Transport.
-func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
- return optionFunc(func(c *config) {
- c.ClientTrace = f
- })
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
deleted file mode 100644
index 38c7f01c71..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otelhttp provides an http.Handler and functions that are intended
-// to be used to add tracing by wrapping existing handlers (with Handler) and
-// routes WithRouteTag.
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
deleted file mode 100644
index 4c037f1d8e..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "io"
- "net/http"
- "time"
-
- "github.com/felixge/httpsnoop"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
- "go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-var _ http.Handler = &Handler{}
-
-// Handler is http middleware that corresponds to the http.Handler interface and
-// is designed to wrap a http.Mux (or equivalent), while individual routes on
-// the mux are wrapped with WithRouteTag. A Handler will add various attributes
-// to the span using the attribute.Keys defined in this package.
-type Handler struct {
- operation string
- handler http.Handler
-
- tracer trace.Tracer
- meter metric.Meter
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- readEvent bool
- writeEvent bool
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- counters map[string]syncint64.Counter
- valueRecorders map[string]syncfloat64.Histogram
- publicEndpoint bool
- publicEndpointFn func(*http.Request) bool
-}
-
-func defaultHandlerFormatter(operation string, _ *http.Request) string {
- return operation
-}
-
-// NewHandler wraps the passed handler, functioning like middleware, in a span
-// named after the operation and with any provided Options.
-func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
- h := Handler{
- handler: handler,
- operation: operation,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
- WithSpanNameFormatter(defaultHandlerFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- h.configure(c)
- h.createMeasures()
-
- return &h
-}
-
-func (h *Handler) configure(c *config) {
- h.tracer = c.Tracer
- h.meter = c.Meter
- h.propagators = c.Propagators
- h.spanStartOptions = c.SpanStartOptions
- h.readEvent = c.ReadEvent
- h.writeEvent = c.WriteEvent
- h.filters = c.Filters
- h.spanNameFormatter = c.SpanNameFormatter
- h.publicEndpoint = c.PublicEndpoint
- h.publicEndpointFn = c.PublicEndpointFn
-}
-
-func handleErr(err error) {
- if err != nil {
- otel.Handle(err)
- }
-}
-
-func (h *Handler) createMeasures() {
- h.counters = make(map[string]syncint64.Counter)
- h.valueRecorders = make(map[string]syncfloat64.Histogram)
-
- requestBytesCounter, err := h.meter.SyncInt64().Counter(RequestContentLength)
- handleErr(err)
-
- responseBytesCounter, err := h.meter.SyncInt64().Counter(ResponseContentLength)
- handleErr(err)
-
- serverLatencyMeasure, err := h.meter.SyncFloat64().Histogram(ServerLatency)
- handleErr(err)
-
- h.counters[RequestContentLength] = requestBytesCounter
- h.counters[ResponseContentLength] = responseBytesCounter
- h.valueRecorders[ServerLatency] = serverLatencyMeasure
-}
-
-// ServeHTTP serves HTTP requests (http.Handler).
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- requestStartTime := time.Now()
- for _, f := range h.filters {
- if !f(r) {
- // Simply pass through to the handler if a filter rejects the request
- h.handler.ServeHTTP(w, r)
- return
- }
- }
-
- ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
- opts := h.spanStartOptions
- if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
- opts = append(opts, trace.WithNewRoot())
- // Linking incoming span context if any for public endpoint.
- if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
- opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
- }
- }
-
- opts = append([]trace.SpanStartOption{
- trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...),
- trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...),
- trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...),
- }, opts...) // start with the configured options
-
- tracer := h.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
- defer span.End()
-
- readRecordFunc := func(int64) {}
- if h.readEvent {
- readRecordFunc = func(n int64) {
- span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
- }
- }
-
- var bw bodyWrapper
- // if request body is nil we don't want to mutate the body as it will affect
- // the identity of it in an unforeseeable way because we assert ReadCloser
- // fulfills a certain interface and it is indeed nil.
- if r.Body != nil {
- bw.ReadCloser = r.Body
- bw.record = readRecordFunc
- r.Body = &bw
- }
-
- writeRecordFunc := func(int64) {}
- if h.writeEvent {
- writeRecordFunc = func(n int64) {
- span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
- }
- }
-
- rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators}
-
- // Wrap w to use our ResponseWriter methods while also exposing
- // other interfaces that w may implement (http.CloseNotifier,
- // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
-
- w = httpsnoop.Wrap(w, httpsnoop.Hooks{
- Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
- return rww.Header
- },
- Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
- return rww.Write
- },
- WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
- return rww.WriteHeader
- },
- })
-
- labeler := &Labeler{}
- ctx = injectLabeler(ctx, labeler)
-
- h.handler.ServeHTTP(w, r.WithContext(ctx))
-
- setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err)
-
- // Add metrics
- attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...)
- h.counters[RequestContentLength].Add(ctx, bw.read, attributes...)
- h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...)
-}
-
-func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) {
- attributes := []attribute.KeyValue{}
-
- // TODO: Consider adding an event after each read and write, possibly as an
- // option (defaulting to off), so as to not create needlessly verbose spans.
- if read > 0 {
- attributes = append(attributes, ReadBytesKey.Int64(read))
- }
- if rerr != nil && rerr != io.EOF {
- attributes = append(attributes, ReadErrorKey.String(rerr.Error()))
- }
- if wrote > 0 {
- attributes = append(attributes, WroteBytesKey.Int64(wrote))
- }
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...)
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer))
- }
- if werr != nil && werr != io.EOF {
- attributes = append(attributes, WriteErrorKey.String(werr.Error()))
- }
- span.SetAttributes(attributes...)
-}
-
-// WithRouteTag annotates a span with the provided route name using the
-// RouteKey Tag.
-func WithRouteTag(route string, h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- span := trace.SpanFromContext(r.Context())
- span.SetAttributes(semconv.HTTPRouteKey.String(route))
- h.ServeHTTP(w, r)
- })
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
deleted file mode 100644
index 26a51a1805..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
-// the metrics recorded by the net/http instrumentation.
-type Labeler struct {
- mu sync.Mutex
- attributes []attribute.KeyValue
-}
-
-// Add attributes to a Labeler.
-func (l *Labeler) Add(ls ...attribute.KeyValue) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.attributes = append(l.attributes, ls...)
-}
-
-// Get returns a copy of the attributes added to the Labeler.
-func (l *Labeler) Get() []attribute.KeyValue {
- l.mu.Lock()
- defer l.mu.Unlock()
- ret := make([]attribute.KeyValue, len(l.attributes))
- copy(ret, l.attributes)
- return ret
-}
-
-type labelerContextKeyType int
-
-const lablelerContextKey labelerContextKeyType = 0
-
-func injectLabeler(ctx context.Context, l *Labeler) context.Context {
- return context.WithValue(ctx, lablelerContextKey, l)
-}
-
-// LabelerFromContext retrieves a Labeler instance from the provided context if
-// one is available. If no Labeler was found in the provided context a new, empty
-// Labeler is returned and the second return value is false. In this case it is
-// safe to use the Labeler but any attributes added to it will not be used.
-func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
- l, ok := ctx.Value(lablelerContextKey).(*Labeler)
- if !ok {
- l = &Labeler{}
- }
- return l, ok
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
deleted file mode 100644
index fd5e1e9bc7..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/http/httptrace"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Transport implements the http.RoundTripper interface and wraps
-// outbound HTTP(S) requests with a span.
-type Transport struct {
- rt http.RoundTripper
-
- tracer trace.Tracer
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- clientTrace func(context.Context) *httptrace.ClientTrace
-}
-
-var _ http.RoundTripper = &Transport{}
-
-// NewTransport wraps the provided http.RoundTripper with one that
-// starts a span and injects the span context into the outbound request headers.
-//
-// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
-// as the base http.RoundTripper.
-func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
- if base == nil {
- base = http.DefaultTransport
- }
-
- t := Transport{
- rt: base,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
- WithSpanNameFormatter(defaultTransportFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- t.applyConfig(c)
-
- return &t
-}
-
-func (t *Transport) applyConfig(c *config) {
- t.tracer = c.Tracer
- t.propagators = c.Propagators
- t.spanStartOptions = c.SpanStartOptions
- t.filters = c.Filters
- t.spanNameFormatter = c.SpanNameFormatter
- t.clientTrace = c.ClientTrace
-}
-
-func defaultTransportFormatter(_ string, r *http.Request) string {
- return "HTTP " + r.Method
-}
-
-// RoundTrip creates a Span and propagates its context via the provided request's headers
-// before handing the request to the configured base RoundTripper. The created span will
-// end when the response body is closed or when a read from the body returns io.EOF.
-func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
- for _, f := range t.filters {
- if !f(r) {
- // Simply pass through to the base RoundTripper if a filter rejects the request
- return t.rt.RoundTrip(r)
- }
- }
-
- tracer := t.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
-
- ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
-
- if t.clientTrace != nil {
- ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
- }
-
- r = r.WithContext(ctx)
- span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...)
- t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
-
- res, err := t.rt.RoundTrip(r)
- if err != nil {
- span.RecordError(err)
- span.SetStatus(codes.Error, err.Error())
- span.End()
- return res, err
- }
-
- span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...)
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode))
- res.Body = newWrappedBody(span, res.Body)
-
- return res, err
-}
-
-// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
-// io.ReadCloser. If the passed body implements io.Writer, the returned value
-// will implement io.ReadWriteCloser.
-func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser {
- // The successful protocol switch responses will have a body that
- // implement an io.ReadWriteCloser. Ensure this interface type continues
- // to be satisfied if that is the case.
- if _, ok := body.(io.ReadWriteCloser); ok {
- return &wrappedBody{span: span, body: body}
- }
-
- // Remove the implementation of the io.ReadWriteCloser and only implement
- // the io.ReadCloser.
- return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}}
-}
-
-// wrappedBody is the response body type returned by the transport
-// instrumentation to complete a span. Errors encountered when using the
-// response body are recorded in span tracking the response.
-//
-// The span tracking the response is ended when this body is closed.
-//
-// If the response body implements the io.Writer interface (i.e. for
-// successful protocol switches), the wrapped body also will.
-type wrappedBody struct {
- span trace.Span
- body io.ReadCloser
-}
-
-var _ io.ReadWriteCloser = &wrappedBody{}
-
-func (wb *wrappedBody) Write(p []byte) (int, error) {
- // This will not panic given the guard in newWrappedBody.
- n, err := wb.body.(io.Writer).Write(p)
- if err != nil {
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-func (wb *wrappedBody) Read(b []byte) (int, error) {
- n, err := wb.body.Read(b)
-
- switch err {
- case nil:
- // nothing to do here but fall through to the return
- case io.EOF:
- wb.span.End()
- default:
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-func (wb *wrappedBody) Close() error {
- wb.span.End()
- if wb.body != nil {
- return wb.body.Close()
- }
- return nil
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
deleted file mode 100644
index 56e4736062..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-// Version is the current release version of the otelhttp instrumentation.
-func Version() string {
- return "0.35.0"
- // This string is updated by the pre_release.sh script during release
-}
-
-// SemVersion is the semantic version to be supplied to tracer/meter creation.
-func SemVersion() string {
- return "semver:" + Version()
-}
diff --git a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
deleted file mode 100644
index da6468c4e5..0000000000
--- a/etcd/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-var _ io.ReadCloser = &bodyWrapper{}
-
-// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type bodyWrapper struct {
- io.ReadCloser
- record func(n int64) // must not be nil
-
- read int64
- err error
-}
-
-func (w *bodyWrapper) Read(b []byte) (int, error) {
- n, err := w.ReadCloser.Read(b)
- n1 := int64(n)
- w.read += n1
- w.err = err
- w.record(n1)
- return n, err
-}
-
-func (w *bodyWrapper) Close() error {
- return w.ReadCloser.Close()
-}
-
-var _ http.ResponseWriter = &respWriterWrapper{}
-
-// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the returned statusCode
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
-// that may be useful when using it in real life situations.
-type respWriterWrapper struct {
- http.ResponseWriter
- record func(n int64) // must not be nil
-
- // used to inject the header
- ctx context.Context
-
- props propagation.TextMapPropagator
-
- written int64
- statusCode int
- err error
- wroteHeader bool
-}
-
-func (w *respWriterWrapper) Header() http.Header {
- return w.ResponseWriter.Header()
-}
-
-func (w *respWriterWrapper) Write(p []byte) (int, error) {
- if !w.wroteHeader {
- w.WriteHeader(http.StatusOK)
- }
- n, err := w.ResponseWriter.Write(p)
- n1 := int64(n)
- w.record(n1)
- w.written += n1
- w.err = err
- return n, err
-}
-
-func (w *respWriterWrapper) WriteHeader(statusCode int) {
- if w.wroteHeader {
- return
- }
- w.wroteHeader = true
- w.statusCode = statusCode
- w.ResponseWriter.WriteHeader(statusCode)
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/LICENSE b/etcd/vendor/go.opentelemetry.io/otel/metric/LICENSE
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/config.go b/etcd/vendor/go.opentelemetry.io/otel/metric/config.go
deleted file mode 100644
index 621e4c5fcb..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-// MeterConfig contains options for Meters.
-type MeterConfig struct {
- instrumentationVersion string
- schemaURL string
-}
-
-// InstrumentationVersion is the version of the library providing instrumentation.
-func (cfg MeterConfig) InstrumentationVersion() string {
- return cfg.instrumentationVersion
-}
-
-// SchemaURL is the schema_url of the library providing instrumentation.
-func (cfg MeterConfig) SchemaURL() string {
- return cfg.schemaURL
-}
-
-// MeterOption is an interface for applying Meter options.
-type MeterOption interface {
- // applyMeter is used to set a MeterOption value of a MeterConfig.
- applyMeter(MeterConfig) MeterConfig
-}
-
-// NewMeterConfig creates a new MeterConfig and applies
-// all the given options.
-func NewMeterConfig(opts ...MeterOption) MeterConfig {
- var config MeterConfig
- for _, o := range opts {
- config = o.applyMeter(config)
- }
- return config
-}
-
-type meterOptionFunc func(MeterConfig) MeterConfig
-
-func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
- return fn(cfg)
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.instrumentationVersion = version
- return config
- })
-}
-
-// WithSchemaURL sets the schema URL.
-func WithSchemaURL(schemaURL string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.schemaURL = schemaURL
- return config
- })
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/doc.go b/etcd/vendor/go.opentelemetry.io/otel/metric/doc.go
deleted file mode 100644
index bd6f434372..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package metric provides an implementation of the metrics part of the
-OpenTelemetry API.
-
-This package is currently in a pre-GA phase. Backwards incompatible changes
-may be introduced in subsequent minor version releases as we work to track the
-evolving OpenTelemetry specification and user feedback.
-*/
-package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/global/global.go b/etcd/vendor/go.opentelemetry.io/otel/metric/global/global.go
deleted file mode 100644
index 05a67c2e99..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/global/global.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/metric/global"
-
-import (
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/internal/global"
-)
-
-// Meter returns a Meter from the global MeterProvider. The
-// instrumentationName must be the name of the library providing
-// instrumentation. This name may be the same as the instrumented code only if
-// that code provides built-in instrumentation. If the instrumentationName is
-// empty, then a implementation defined default name will be used instead.
-//
-// This is short for MeterProvider().Meter(name).
-func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
- return MeterProvider().Meter(instrumentationName, opts...)
-}
-
-// MeterProvider returns the registered global trace provider.
-// If none is registered then a No-op MeterProvider is returned.
-func MeterProvider() metric.MeterProvider {
- return global.MeterProvider()
-}
-
-// SetMeterProvider registers `mp` as the global meter provider.
-func SetMeterProvider(mp metric.MeterProvider) {
- global.SetMeterProvider(mp)
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
deleted file mode 100644
index 370715f694..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
-
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
-
- // Gauge creates an instrument for recording the current value.
- Gauge(name string, opts ...instrument.Option) (Gauge, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// Gauge is an instrument that records independent readings.
-type Gauge interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
deleted file mode 100644
index 41a561bc4a..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
-
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
-
- // Gauge creates an instrument for recording the current value.
- Gauge(name string, opts ...instrument.Option) (Gauge, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
-
-// Gauge is an instrument that records independent readings.
-type Gauge interface {
- // Observe records the state of the instrument.
- //
- // It is only valid to call this within a callback. If called outside of the
- // registered callback it should have no effect on the instrument, and an
- // error will be reported via the error handler.
- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue)
-
- instrument.Asynchronous
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/config.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/config.go
deleted file mode 100644
index 8778bce161..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package instrument // import "go.opentelemetry.io/otel/metric/instrument"
-
-import "go.opentelemetry.io/otel/metric/unit"
-
-// Config contains options for metric instrument descriptors.
-type Config struct {
- description string
- unit unit.Unit
-}
-
-// Description describes the instrument in human-readable terms.
-func (cfg Config) Description() string {
- return cfg.description
-}
-
-// Unit describes the measurement unit for an instrument.
-func (cfg Config) Unit() unit.Unit {
- return cfg.unit
-}
-
-// Option is an interface for applying metric instrument options.
-type Option interface {
- applyInstrument(Config) Config
-}
-
-// NewConfig creates a new Config and applies all the given options.
-func NewConfig(opts ...Option) Config {
- var config Config
- for _, o := range opts {
- config = o.applyInstrument(config)
- }
- return config
-}
-
-type optionFunc func(Config) Config
-
-func (fn optionFunc) applyInstrument(cfg Config) Config {
- return fn(cfg)
-}
-
-// WithDescription applies provided description.
-func WithDescription(desc string) Option {
- return optionFunc(func(cfg Config) Config {
- cfg.description = desc
- return cfg
- })
-}
-
-// WithUnit applies provided unit.
-func WithUnit(u unit.Unit) Option {
- return optionFunc(func(cfg Config) Config {
- cfg.unit = u
- return cfg
- })
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
deleted file mode 100644
index e1bbb850d7..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package instrument // import "go.opentelemetry.io/otel/metric/instrument"
-
-// Asynchronous instruments are instruments that are updated within a Callback.
-// If an instrument is observed outside of it's callback it should be an error.
-//
-// This interface is used as a grouping mechanism.
-type Asynchronous interface {
- asynchronous()
-}
-
-// Synchronous instruments are updated in line with application code.
-//
-// This interface is used as a grouping mechanism.
-type Synchronous interface {
- synchronous()
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
deleted file mode 100644
index 435db1127b..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
- // Histogram creates an instrument for recording a distribution of values.
- Histogram(name string, opts ...instrument.Option) (Histogram, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// Histogram is an instrument that records a distribution of values.
-type Histogram interface {
- // Record adds an additional value to the distribution.
- Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
deleted file mode 100644
index c77a467286..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
-)
-
-// InstrumentProvider provides access to individual instruments.
-type InstrumentProvider interface {
- // Counter creates an instrument for recording increasing values.
- Counter(name string, opts ...instrument.Option) (Counter, error)
- // UpDownCounter creates an instrument for recording changes of a value.
- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error)
- // Histogram creates an instrument for recording a distribution of values.
- Histogram(name string, opts ...instrument.Option) (Histogram, error)
-}
-
-// Counter is an instrument that records increasing values.
-type Counter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// UpDownCounter is an instrument that records increasing or decreasing values.
-type UpDownCounter interface {
- // Add records a change to the counter.
- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
-
-// Histogram is an instrument that records a distribution of values.
-type Histogram interface {
- // Record adds an additional value to the distribution.
- Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
-
- instrument.Synchronous
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
deleted file mode 100644
index aed8b6660a..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/metric/internal/global"
-
-import (
- "context"
- "sync/atomic"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
-)
-
-type afCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncfloat64.Counter
-
- instrument.Asynchronous
-}
-
-func (i *afCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.Counter)
- }
- return nil
-}
-
-type afUpDownCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncfloat64.UpDownCounter
-
- instrument.Asynchronous
-}
-
-func (i *afUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afUpDownCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.UpDownCounter)
- }
- return nil
-}
-
-type afGauge struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncfloat64.Gauge
-
- instrument.Asynchronous
-}
-
-func (i *afGauge) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *afGauge) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncfloat64.Gauge)
- }
- return nil
-}
-
-type aiCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncint64.Counter
-
- instrument.Asynchronous
-}
-
-func (i *aiCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().Counter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.Counter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.Counter)
- }
- return nil
-}
-
-type aiUpDownCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncint64.UpDownCounter
-
- instrument.Asynchronous
-}
-
-func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiUpDownCounter) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.UpDownCounter)
- }
- return nil
-}
-
-type aiGauge struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //asyncint64.Gauge
-
- instrument.Asynchronous
-}
-
-func (i *aiGauge) setDelegate(m metric.Meter) {
- ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...)
- }
-}
-
-func (i *aiGauge) unwrap() instrument.Asynchronous {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(asyncint64.Gauge)
- }
- return nil
-}
-
-//Sync Instruments.
-type sfCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncfloat64.Counter
-
- instrument.Synchronous
-}
-
-func (i *sfCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().Counter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...)
- }
-}
-
-type sfUpDownCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncfloat64.UpDownCounter
-
- instrument.Synchronous
-}
-
-func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...)
- }
-}
-
-type sfHistogram struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncfloat64.Histogram
-
- instrument.Synchronous
-}
-
-func (i *sfHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...)
- }
-}
-
-type siCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncint64.Counter
-
- instrument.Synchronous
-}
-
-func (i *siCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().Counter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.Counter).Add(ctx, x, attrs...)
- }
-}
-
-type siUpDownCounter struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncint64.UpDownCounter
-
- instrument.Synchronous
-}
-
-func (i *siUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...)
- }
-}
-
-type siHistogram struct {
- name string
- opts []instrument.Option
-
- delegate atomic.Value //syncint64.Histogram
-
- instrument.Synchronous
-}
-
-func (i *siHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.SyncInt64().Histogram(i.name, i.opts...)
- if err != nil {
- otel.Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(syncint64.Histogram).Record(ctx, x, attrs...)
- }
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
deleted file mode 100644
index 0fa924f397..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/metric/internal/global"
-
-import (
- "context"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
-)
-
-// meterProvider is a placeholder for a configured SDK MeterProvider.
-//
-// All MeterProvider functionality is forwarded to a delegate once
-// configured.
-type meterProvider struct {
- mtx sync.Mutex
- meters map[il]*meter
-
- delegate metric.MeterProvider
-}
-
-type il struct {
- name string
- version string
-}
-
-// setDelegate configures p to delegate all MeterProvider functionality to
-// provider.
-//
-// All Meters provided prior to this function call are switched out to be
-// Meters provided by provider. All instruments and callbacks are recreated and
-// delegated.
-//
-// It is guaranteed by the caller that this happens only once.
-func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
-
- if len(p.meters) == 0 {
- return
- }
-
- for _, meter := range p.meters {
- meter.setDelegate(provider)
- }
-
- p.meters = nil
-}
-
-// Meter implements MeterProvider.
-func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Meter(name, opts...)
- }
-
- // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
-
- c := metric.NewMeterConfig(opts...)
- key := il{
- name: name,
- version: c.InstrumentationVersion(),
- }
-
- if p.meters == nil {
- p.meters = make(map[il]*meter)
- }
-
- if val, ok := p.meters[key]; ok {
- return val
- }
-
- t := &meter{name: name, opts: opts}
- p.meters[key] = t
- return t
-}
-
-// meter is a placeholder for a metric.Meter.
-//
-// All Meter functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopMeter.
-type meter struct {
- name string
- opts []metric.MeterOption
-
- mtx sync.Mutex
- instruments []delegatedInstrument
- callbacks []delegatedCallback
-
- delegate atomic.Value // metric.Meter
-}
-
-type delegatedInstrument interface {
- setDelegate(metric.Meter)
-}
-
-// setDelegate configures m to delegate all Meter functionality to Meters
-// created by provider.
-//
-// All subsequent calls to the Meter methods will be passed to the delegate.
-//
-// It is guaranteed by the caller that this happens only once.
-func (m *meter) setDelegate(provider metric.MeterProvider) {
- meter := provider.Meter(m.name, m.opts...)
- m.delegate.Store(meter)
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for _, inst := range m.instruments {
- inst.setDelegate(meter)
- }
-
- for _, callback := range m.callbacks {
- callback.setDelegate(meter)
- }
-
- m.instruments = nil
- m.callbacks = nil
-}
-
-// AsyncInt64 is the namespace for the Asynchronous Integer instruments.
-//
-// To Observe data with instruments it must be registered in a callback.
-func (m *meter) AsyncInt64() asyncint64.InstrumentProvider {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.AsyncInt64()
- }
- return (*aiInstProvider)(m)
-}
-
-// AsyncFloat64 is the namespace for the Asynchronous Float instruments.
-//
-// To Observe data with instruments it must be registered in a callback.
-func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.AsyncFloat64()
- }
- return (*afInstProvider)(m)
-}
-
-// RegisterCallback captures the function that will be called during Collect.
-//
-// It is only valid to call Observe within the scope of the passed function,
-// and only on the instruments that were registered with this call.
-func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- insts = unwrapInstruments(insts)
- return del.RegisterCallback(insts, function)
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- m.callbacks = append(m.callbacks, delegatedCallback{
- instruments: insts,
- function: function,
- })
-
- return nil
-}
-
-type wrapped interface {
- unwrap() instrument.Asynchronous
-}
-
-func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous {
- out := make([]instrument.Asynchronous, 0, len(instruments))
-
- for _, inst := range instruments {
- if in, ok := inst.(wrapped); ok {
- out = append(out, in.unwrap())
- } else {
- out = append(out, inst)
- }
- }
-
- return out
-}
-
-// SyncInt64 is the namespace for the Synchronous Integer instruments.
-func (m *meter) SyncInt64() syncint64.InstrumentProvider {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.SyncInt64()
- }
- return (*siInstProvider)(m)
-}
-
-// SyncFloat64 is the namespace for the Synchronous Float instruments.
-func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.SyncFloat64()
- }
- return (*sfInstProvider)(m)
-}
-
-type delegatedCallback struct {
- instruments []instrument.Asynchronous
- function func(context.Context)
-}
-
-func (c *delegatedCallback) setDelegate(m metric.Meter) {
- insts := unwrapInstruments(c.instruments)
- err := m.RegisterCallback(insts, c.function)
- if err != nil {
- otel.Handle(err)
- }
-}
-
-type afInstProvider meter
-
-// Counter creates an instrument for recording increasing values.
-func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// Gauge creates an instrument for recording the current value.
-func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &afGauge{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-type aiInstProvider meter
-
-// Counter creates an instrument for recording increasing values.
-func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// Gauge creates an instrument for recording the current value.
-func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &aiGauge{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-type sfInstProvider meter
-
-// Counter creates an instrument for recording increasing values.
-func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// Histogram creates an instrument for recording a distribution of values.
-func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &sfHistogram{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-type siInstProvider meter
-
-// Counter creates an instrument for recording increasing values.
-func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// UpDownCounter creates an instrument for recording changes of a value.
-func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siUpDownCounter{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
-
-// Histogram creates an instrument for recording a distribution of values.
-func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
- ip.mtx.Lock()
- defer ip.mtx.Unlock()
- ctr := &siHistogram{name: name, opts: opts}
- ip.instruments = append(ip.instruments, ctr)
- return ctr, nil
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go b/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go
deleted file mode 100644
index 47c0d787d8..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// htmp://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/metric/internal/global"
-
-import (
- "errors"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
-)
-
-var (
- globalMeterProvider = defaultMeterProvider()
-
- delegateMeterOnce sync.Once
-)
-
-type meterProviderHolder struct {
- mp metric.MeterProvider
-}
-
-// MeterProvider is the internal implementation for global.MeterProvider.
-func MeterProvider() metric.MeterProvider {
- return globalMeterProvider.Load().(meterProviderHolder).mp
-}
-
-// SetMeterProvider is the internal implementation for global.SetMeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- current := MeterProvider()
- if _, cOk := current.(*meterProvider); cOk {
- if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
- // Do not assign the default delegating MeterProvider to delegate
- // to itself.
- global.Error(
- errors.New("no delegate configured in meter provider"),
- "Setting meter provider to it's current value. No delegate will be configured",
- )
- return
- }
- }
-
- delegateMeterOnce.Do(func() {
- if def, ok := current.(*meterProvider); ok {
- def.setDelegate(mp)
- }
- })
- globalMeterProvider.Store(meterProviderHolder{mp: mp})
-}
-
-func defaultMeterProvider() *atomic.Value {
- v := &atomic.Value{}
- v.Store(meterProviderHolder{mp: &meterProvider{}})
- return v
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/meter.go b/etcd/vendor/go.opentelemetry.io/otel/metric/meter.go
deleted file mode 100644
index 21fc1c499f..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
-)
-
-// MeterProvider provides access to named Meter instances, for instrumenting
-// an application or library.
-type MeterProvider interface {
- // Meter creates an instance of a `Meter` interface. The instrumentationName
- // must be the name of the library providing instrumentation. This name may
- // be the same as the instrumented code only if that code provides built-in
- // instrumentation. If the instrumentationName is empty, then a
- // implementation defined default name will be used instead.
- Meter(instrumentationName string, opts ...MeterOption) Meter
-}
-
-// Meter provides access to instrument instances for recording metrics.
-type Meter interface {
- // AsyncInt64 is the namespace for the Asynchronous Integer instruments.
- //
- // To Observe data with instruments it must be registered in a callback.
- AsyncInt64() asyncint64.InstrumentProvider
-
- // AsyncFloat64 is the namespace for the Asynchronous Float instruments
- //
- // To Observe data with instruments it must be registered in a callback.
- AsyncFloat64() asyncfloat64.InstrumentProvider
-
- // RegisterCallback captures the function that will be called during Collect.
- //
- // It is only valid to call Observe within the scope of the passed function,
- // and only on the instruments that were registered with this call.
- RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error
-
- // SyncInt64 is the namespace for the Synchronous Integer instruments
- SyncInt64() syncint64.InstrumentProvider
- // SyncFloat64 is the namespace for the Synchronous Float instruments
- SyncFloat64() syncfloat64.InstrumentProvider
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/noop.go b/etcd/vendor/go.opentelemetry.io/otel/metric/noop.go
deleted file mode 100644
index e8b9a9a145..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/noop.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric/instrument"
- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/asyncint64"
- "go.opentelemetry.io/otel/metric/instrument/syncfloat64"
- "go.opentelemetry.io/otel/metric/instrument/syncint64"
-)
-
-// NewNoopMeterProvider creates a MeterProvider that does not record any metrics.
-func NewNoopMeterProvider() MeterProvider {
- return noopMeterProvider{}
-}
-
-type noopMeterProvider struct{}
-
-func (noopMeterProvider) Meter(string, ...MeterOption) Meter {
- return noopMeter{}
-}
-
-// NewNoopMeter creates a Meter that does not record any metrics.
-func NewNoopMeter() Meter {
- return noopMeter{}
-}
-
-type noopMeter struct{}
-
-// AsyncInt64 creates an instrument that does not record any metrics.
-func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider {
- return nonrecordingAsyncInt64Instrument{}
-}
-
-// AsyncFloat64 creates an instrument that does not record any metrics.
-func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider {
- return nonrecordingAsyncFloat64Instrument{}
-}
-
-// SyncInt64 creates an instrument that does not record any metrics.
-func (noopMeter) SyncInt64() syncint64.InstrumentProvider {
- return nonrecordingSyncInt64Instrument{}
-}
-
-// SyncFloat64 creates an instrument that does not record any metrics.
-func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider {
- return nonrecordingSyncFloat64Instrument{}
-}
-
-// RegisterCallback creates a register callback that does not record any metrics.
-func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error {
- return nil
-}
-
-type nonrecordingAsyncFloat64Instrument struct {
- instrument.Asynchronous
-}
-
-var (
- _ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{}
- _ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{}
-)
-
-func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) {
- return n, nil
-}
-
-func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) {
-
-}
-
-type nonrecordingAsyncInt64Instrument struct {
- instrument.Asynchronous
-}
-
-var (
- _ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.Counter = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{}
- _ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{}
-)
-
-func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) {
- return n, nil
-}
-
-func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) {
-}
-
-type nonrecordingSyncFloat64Instrument struct {
- instrument.Synchronous
-}
-
-var (
- _ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{}
- _ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{}
-)
-
-func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) {
- return n, nil
-}
-
-func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {
-
-}
-
-func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {
-
-}
-
-type nonrecordingSyncInt64Instrument struct {
- instrument.Synchronous
-}
-
-var (
- _ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{}
- _ syncint64.Counter = nonrecordingSyncInt64Instrument{}
- _ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{}
- _ syncint64.Histogram = nonrecordingSyncInt64Instrument{}
-)
-
-func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) {
- return n, nil
-}
-
-func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) {
- return n, nil
-}
-
-func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {
-}
-func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {
-}
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/etcd/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
deleted file mode 100644
index f8e723593e..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package unit provides units.
-//
-// This package is currently in a pre-GA phase. Backwards incompatible changes
-// may be introduced in subsequent minor version releases as we work to track
-// the evolving OpenTelemetry specification and user feedback.
-package unit // import "go.opentelemetry.io/otel/metric/unit"
diff --git a/etcd/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/etcd/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
deleted file mode 100644
index 647d77302d..0000000000
--- a/etcd/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package unit // import "go.opentelemetry.io/otel/metric/unit"
-
-// Unit is a determinate standard quantity of measurement.
-type Unit string
-
-// Units defined by OpenTelemetry.
-const (
- Dimensionless Unit = "1"
- Bytes Unit = "By"
- Milliseconds Unit = "ms"
-)
diff --git a/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1.go
deleted file mode 100644
index 3a1674a1e5..0000000000
--- a/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ /dev/null
@@ -1,809 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cryptobyte
-
-import (
- encoding_asn1 "encoding/asn1"
- "fmt"
- "math/big"
- "reflect"
- "time"
-
- "golang.org/x/crypto/cryptobyte/asn1"
-)
-
-// This file contains ASN.1-related methods for String and Builder.
-
-// Builder
-
-// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
-func (b *Builder) AddASN1Int64(v int64) {
- b.addASN1Signed(asn1.INTEGER, v)
-}
-
-// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
-// given tag.
-func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
- b.addASN1Signed(tag, v)
-}
-
-// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
-func (b *Builder) AddASN1Enum(v int64) {
- b.addASN1Signed(asn1.ENUM, v)
-}
-
-func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
- b.AddASN1(tag, func(c *Builder) {
- length := 1
- for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
- length++
- }
-
- for ; length > 0; length-- {
- i := v >> uint((length-1)*8) & 0xff
- c.AddUint8(uint8(i))
- }
- })
-}
-
-// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
-func (b *Builder) AddASN1Uint64(v uint64) {
- b.AddASN1(asn1.INTEGER, func(c *Builder) {
- length := 1
- for i := v; i >= 0x80; i >>= 8 {
- length++
- }
-
- for ; length > 0; length-- {
- i := v >> uint((length-1)*8) & 0xff
- c.AddUint8(uint8(i))
- }
- })
-}
-
-// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
-func (b *Builder) AddASN1BigInt(n *big.Int) {
- if b.err != nil {
- return
- }
-
- b.AddASN1(asn1.INTEGER, func(c *Builder) {
- if n.Sign() < 0 {
- // A negative number has to be converted to two's-complement form. So we
- // invert and subtract 1. If the most-significant-bit isn't set then
- // we'll need to pad the beginning with 0xff in order to keep the number
- // negative.
- nMinus1 := new(big.Int).Neg(n)
- nMinus1.Sub(nMinus1, bigOne)
- bytes := nMinus1.Bytes()
- for i := range bytes {
- bytes[i] ^= 0xff
- }
- if len(bytes) == 0 || bytes[0]&0x80 == 0 {
- c.add(0xff)
- }
- c.add(bytes...)
- } else if n.Sign() == 0 {
- c.add(0)
- } else {
- bytes := n.Bytes()
- if bytes[0]&0x80 != 0 {
- c.add(0)
- }
- c.add(bytes...)
- }
- })
-}
-
-// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
-func (b *Builder) AddASN1OctetString(bytes []byte) {
- b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
- c.AddBytes(bytes)
- })
-}
-
-const generalizedTimeFormatStr = "20060102150405Z0700"
-
-// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
-func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
- if t.Year() < 0 || t.Year() > 9999 {
- b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
- return
- }
- b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
- c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
- })
-}
-
-// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
-func (b *Builder) AddASN1UTCTime(t time.Time) {
- b.AddASN1(asn1.UTCTime, func(c *Builder) {
- // As utilized by the X.509 profile, UTCTime can only
- // represent the years 1950 through 2049.
- if t.Year() < 1950 || t.Year() >= 2050 {
- b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
- return
- }
- c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
- })
-}
-
-// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
-// support BIT STRINGs that are not a whole number of bytes.
-func (b *Builder) AddASN1BitString(data []byte) {
- b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
- b.AddUint8(0)
- b.AddBytes(data)
- })
-}
-
-func (b *Builder) addBase128Int(n int64) {
- var length int
- if n == 0 {
- length = 1
- } else {
- for i := n; i > 0; i >>= 7 {
- length++
- }
- }
-
- for i := length - 1; i >= 0; i-- {
- o := byte(n >> uint(i*7))
- o &= 0x7f
- if i != 0 {
- o |= 0x80
- }
-
- b.add(o)
- }
-}
-
-func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
- if len(oid) < 2 {
- return false
- }
-
- if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
- return false
- }
-
- for _, v := range oid {
- if v < 0 {
- return false
- }
- }
-
- return true
-}
-
-func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
- b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
- if !isValidOID(oid) {
- b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
- return
- }
-
- b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
- for _, v := range oid[2:] {
- b.addBase128Int(int64(v))
- }
- })
-}
-
-func (b *Builder) AddASN1Boolean(v bool) {
- b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
- if v {
- b.AddUint8(0xff)
- } else {
- b.AddUint8(0)
- }
- })
-}
-
-func (b *Builder) AddASN1NULL() {
- b.add(uint8(asn1.NULL), 0)
-}
-
-// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
-// successful or records an error if one occurred.
-func (b *Builder) MarshalASN1(v interface{}) {
- // NOTE(martinkr): This is somewhat of a hack to allow propagation of
- // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
- // value embedded into a struct, its tag information is lost.
- if b.err != nil {
- return
- }
- bytes, err := encoding_asn1.Marshal(v)
- if err != nil {
- b.err = err
- return
- }
- b.AddBytes(bytes)
-}
-
-// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
-// Tags greater than 30 are not supported and result in an error (i.e.
-// low-tag-number form only). The child builder passed to the
-// BuilderContinuation can be used to build the content of the ASN.1 object.
-func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
- if b.err != nil {
- return
- }
- // Identifiers with the low five bits set indicate high-tag-number format
- // (two or more octets), which we don't support.
- if tag&0x1f == 0x1f {
- b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag)
- return
- }
- b.AddUint8(uint8(tag))
- b.addLengthPrefixed(1, true, f)
-}
-
-// String
-
-// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
-// representation into out and advances. It reports whether the read
-// was successful.
-func (s *String) ReadASN1Boolean(out *bool) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
- return false
- }
-
- switch bytes[0] {
- case 0:
- *out = false
- case 0xff:
- *out = true
- default:
- return false
- }
-
- return true
-}
-
-var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
-
-// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
-// not point to an integer or to a big.Int, it panics. It reports whether the
-// read was successful.
-func (s *String) ReadASN1Integer(out interface{}) bool {
- if reflect.TypeOf(out).Kind() != reflect.Ptr {
- panic("out is not a pointer")
- }
- switch reflect.ValueOf(out).Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- var i int64
- if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
- return false
- }
- reflect.ValueOf(out).Elem().SetInt(i)
- return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- var u uint64
- if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
- return false
- }
- reflect.ValueOf(out).Elem().SetUint(u)
- return true
- case reflect.Struct:
- if reflect.TypeOf(out).Elem() == bigIntType {
- return s.readASN1BigInt(out.(*big.Int))
- }
- }
- panic("out does not point to an integer type")
-}
-
-func checkASN1Integer(bytes []byte) bool {
- if len(bytes) == 0 {
- // An INTEGER is encoded with at least one octet.
- return false
- }
- if len(bytes) == 1 {
- return true
- }
- if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
- // Value is not minimally encoded.
- return false
- }
- return true
-}
-
-var bigOne = big.NewInt(1)
-
-func (s *String) readASN1BigInt(out *big.Int) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
- return false
- }
- if bytes[0]&0x80 == 0x80 {
- // Negative number.
- neg := make([]byte, len(bytes))
- for i, b := range bytes {
- neg[i] = ^b
- }
- out.SetBytes(neg)
- out.Add(out, bigOne)
- out.Neg(out)
- } else {
- out.SetBytes(bytes)
- }
- return true
-}
-
-func (s *String) readASN1Int64(out *int64) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
- return false
- }
- return true
-}
-
-func asn1Signed(out *int64, n []byte) bool {
- length := len(n)
- if length > 8 {
- return false
- }
- for i := 0; i < length; i++ {
- *out <<= 8
- *out |= int64(n[i])
- }
- // Shift up and down in order to sign extend the result.
- *out <<= 64 - uint8(length)*8
- *out >>= 64 - uint8(length)*8
- return true
-}
-
-func (s *String) readASN1Uint64(out *uint64) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
- return false
- }
- return true
-}
-
-func asn1Unsigned(out *uint64, n []byte) bool {
- length := len(n)
- if length > 9 || length == 9 && n[0] != 0 {
- // Too large for uint64.
- return false
- }
- if n[0]&0x80 != 0 {
- // Negative number.
- return false
- }
- for i := 0; i < length; i++ {
- *out <<= 8
- *out |= uint64(n[i])
- }
- return true
-}
-
-// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
-// and advances. It reports whether the read was successful and resulted in a
-// value that can be represented in an int64.
-func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
- var bytes String
- return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
-}
-
-// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
-// whether the read was successful.
-func (s *String) ReadASN1Enum(out *int) bool {
- var bytes String
- var i int64
- if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
- return false
- }
- if int64(int(i)) != i {
- return false
- }
- *out = int(i)
- return true
-}
-
-func (s *String) readBase128Int(out *int) bool {
- ret := 0
- for i := 0; len(*s) > 0; i++ {
- if i == 5 {
- return false
- }
- // Avoid overflowing int on a 32-bit platform.
- // We don't want different behavior based on the architecture.
- if ret >= 1<<(31-7) {
- return false
- }
- ret <<= 7
- b := s.read(1)[0]
- ret |= int(b & 0x7f)
- if b&0x80 == 0 {
- *out = ret
- return true
- }
- }
- return false // truncated
-}
-
-// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
-// advances. It reports whether the read was successful.
-func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
- return false
- }
-
- // In the worst case, we get two elements from the first byte (which is
- // encoded differently) and then every varint is a single byte long.
- components := make([]int, len(bytes)+1)
-
- // The first varint is 40*value1 + value2:
- // According to this packing, value1 can take the values 0, 1 and 2 only.
- // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
- // then there are no restrictions on value2.
- var v int
- if !bytes.readBase128Int(&v) {
- return false
- }
- if v < 80 {
- components[0] = v / 40
- components[1] = v % 40
- } else {
- components[0] = 2
- components[1] = v - 80
- }
-
- i := 2
- for ; len(bytes) > 0; i++ {
- if !bytes.readBase128Int(&v) {
- return false
- }
- components[i] = v
- }
- *out = components[:i]
- return true
-}
-
-// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
-// advances. It reports whether the read was successful.
-func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
- return false
- }
- t := string(bytes)
- res, err := time.Parse(generalizedTimeFormatStr, t)
- if err != nil {
- return false
- }
- if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
- return false
- }
- *out = res
- return true
-}
-
-const defaultUTCTimeFormatStr = "060102150405Z0700"
-
-// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
-// It reports whether the read was successful.
-func (s *String) ReadASN1UTCTime(out *time.Time) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.UTCTime) {
- return false
- }
- t := string(bytes)
-
- formatStr := defaultUTCTimeFormatStr
- var err error
- res, err := time.Parse(formatStr, t)
- if err != nil {
- // Fallback to minute precision if we can't parse second
- // precision. If we are following X.509 or X.690 we shouldn't
- // support this, but we do.
- formatStr = "0601021504Z0700"
- res, err = time.Parse(formatStr, t)
- }
- if err != nil {
- return false
- }
-
- if serialized := res.Format(formatStr); serialized != t {
- return false
- }
-
- if res.Year() >= 2050 {
- // UTCTime interprets the low order digits 50-99 as 1950-99.
- // This only applies to its use in the X.509 profile.
- // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
- res = res.AddDate(-100, 0, 0)
- }
- *out = res
- return true
-}
-
-// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
-// It reports whether the read was successful.
-func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
- var bytes String
- if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
- len(bytes)*8/8 != len(bytes) {
- return false
- }
-
- paddingBits := uint8(bytes[0])
- bytes = bytes[1:]
- if paddingBits > 7 ||
- len(bytes) == 0 && paddingBits != 0 ||
- len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) {
- return false
- }
-
- lenBytes := String((*s)[2 : 2+lenLen])
- if !lenBytes.readUnsigned(&len32, int(lenLen)) {
- return false
- }
-
- // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
- // with the minimum number of octets.
- if len32 < 128 {
- // Length should have used short-form encoding.
- return false
- }
- if len32>>((lenLen-1)*8) == 0 {
- // Leading octet is 0. Length should have been at least one byte shorter.
- return false
- }
-
- headerLen = 2 + uint32(lenLen)
- if headerLen+len32 < len32 {
- // Overflow.
- return false
- }
- length = headerLen + len32
- }
-
- if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
- return false
- }
- if skipHeader && !out.Skip(int(headerLen)) {
- panic("cryptobyte: internal error")
- }
-
- return true
-}
diff --git a/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
deleted file mode 100644
index cda8e3edfd..0000000000
--- a/etcd/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package asn1 contains supporting types for parsing and building ASN.1
-// messages with the cryptobyte package.
-package asn1 // import "golang.org/x/crypto/cryptobyte/asn1"
-
-// Tag represents an ASN.1 identifier octet, consisting of a tag number
-// (indicating a type) and class (such as context-specific or constructed).
-//
-// Methods in the cryptobyte package only support the low-tag-number form, i.e.
-// a single identifier octet with bits 7-8 encoding the class and bits 1-6
-// encoding the tag number.
-type Tag uint8
-
-const (
- classConstructed = 0x20
- classContextSpecific = 0x80
-)
-
-// Constructed returns t with the constructed class bit set.
-func (t Tag) Constructed() Tag { return t | classConstructed }
-
-// ContextSpecific returns t with the context-specific class bit set.
-func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
-
-// The following is a list of standard tag and class combinations.
-const (
- BOOLEAN = Tag(1)
- INTEGER = Tag(2)
- BIT_STRING = Tag(3)
- OCTET_STRING = Tag(4)
- NULL = Tag(5)
- OBJECT_IDENTIFIER = Tag(6)
- ENUM = Tag(10)
- UTF8String = Tag(12)
- SEQUENCE = Tag(16 | classConstructed)
- SET = Tag(17 | classConstructed)
- PrintableString = Tag(19)
- T61String = Tag(20)
- IA5String = Tag(22)
- UTCTime = Tag(23)
- GeneralizedTime = Tag(24)
- GeneralString = Tag(27)
-)
diff --git a/etcd/vendor/golang.org/x/crypto/cryptobyte/builder.go b/etcd/vendor/golang.org/x/crypto/cryptobyte/builder.go
deleted file mode 100644
index 2a90c592d7..0000000000
--- a/etcd/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cryptobyte
-
-import (
- "errors"
- "fmt"
-)
-
-// A Builder builds byte strings from fixed-length and length-prefixed values.
-// Builders either allocate space as needed, or are ‘fixed’, which means that
-// they write into a given buffer and produce an error if it's exhausted.
-//
-// The zero value is a usable Builder that allocates space as needed.
-//
-// Simple values are marshaled and appended to a Builder using methods on the
-// Builder. Length-prefixed values are marshaled by providing a
-// BuilderContinuation, which is a function that writes the inner contents of
-// the value to a given Builder. See the documentation for BuilderContinuation
-// for details.
-type Builder struct {
- err error
- result []byte
- fixedSize bool
- child *Builder
- offset int
- pendingLenLen int
- pendingIsASN1 bool
- inContinuation *bool
-}
-
-// NewBuilder creates a Builder that appends its output to the given buffer.
-// Like append(), the slice will be reallocated if its capacity is exceeded.
-// Use Bytes to get the final buffer.
-func NewBuilder(buffer []byte) *Builder {
- return &Builder{
- result: buffer,
- }
-}
-
-// NewFixedBuilder creates a Builder that appends its output into the given
-// buffer. This builder does not reallocate the output buffer. Writes that
-// would exceed the buffer's capacity are treated as an error.
-func NewFixedBuilder(buffer []byte) *Builder {
- return &Builder{
- result: buffer,
- fixedSize: true,
- }
-}
-
-// SetError sets the value to be returned as the error from Bytes. Writes
-// performed after calling SetError are ignored.
-func (b *Builder) SetError(err error) {
- b.err = err
-}
-
-// Bytes returns the bytes written by the builder or an error if one has
-// occurred during building.
-func (b *Builder) Bytes() ([]byte, error) {
- if b.err != nil {
- return nil, b.err
- }
- return b.result[b.offset:], nil
-}
-
-// BytesOrPanic returns the bytes written by the builder or panics if an error
-// has occurred during building.
-func (b *Builder) BytesOrPanic() []byte {
- if b.err != nil {
- panic(b.err)
- }
- return b.result[b.offset:]
-}
-
-// AddUint8 appends an 8-bit value to the byte string.
-func (b *Builder) AddUint8(v uint8) {
- b.add(byte(v))
-}
-
-// AddUint16 appends a big-endian, 16-bit value to the byte string.
-func (b *Builder) AddUint16(v uint16) {
- b.add(byte(v>>8), byte(v))
-}
-
-// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
-// byte of the 32-bit input value is silently truncated.
-func (b *Builder) AddUint24(v uint32) {
- b.add(byte(v>>16), byte(v>>8), byte(v))
-}
-
-// AddUint32 appends a big-endian, 32-bit value to the byte string.
-func (b *Builder) AddUint32(v uint32) {
- b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
-}
-
-// AddUint64 appends a big-endian, 64-bit value to the byte string.
-func (b *Builder) AddUint64(v uint64) {
- b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
-}
-
-// AddBytes appends a sequence of bytes to the byte string.
-func (b *Builder) AddBytes(v []byte) {
- b.add(v...)
-}
-
-// BuilderContinuation is a continuation-passing interface for building
-// length-prefixed byte sequences. Builder methods for length-prefixed
-// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
-// supplied to them. The child builder passed to the continuation can be used
-// to build the content of the length-prefixed sequence. For example:
-//
-// parent := cryptobyte.NewBuilder()
-// parent.AddUint8LengthPrefixed(func (child *Builder) {
-// child.AddUint8(42)
-// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
-// grandchild.AddUint8(5)
-// })
-// })
-//
-// It is an error to write more bytes to the child than allowed by the reserved
-// length prefix. After the continuation returns, the child must be considered
-// invalid, i.e. users must not store any copies or references of the child
-// that outlive the continuation.
-//
-// If the continuation panics with a value of type BuildError then the inner
-// error will be returned as the error from Bytes. If the child panics
-// otherwise then Bytes will repanic with the same value.
-type BuilderContinuation func(child *Builder)
-
-// BuildError wraps an error. If a BuilderContinuation panics with this value,
-// the panic will be recovered and the inner error will be returned from
-// Builder.Bytes.
-type BuildError struct {
- Err error
-}
-
-// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
-func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(1, false, f)
-}
-
-// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
-func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(2, false, f)
-}
-
-// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
-func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(3, false, f)
-}
-
-// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
-func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
- b.addLengthPrefixed(4, false, f)
-}
-
-func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
- if !*b.inContinuation {
- *b.inContinuation = true
-
- defer func() {
- *b.inContinuation = false
-
- r := recover()
- if r == nil {
- return
- }
-
- if buildError, ok := r.(BuildError); ok {
- b.err = buildError.Err
- } else {
- panic(r)
- }
- }()
- }
-
- f(arg)
-}
-
-func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
- // Subsequent writes can be ignored if the builder has encountered an error.
- if b.err != nil {
- return
- }
-
- offset := len(b.result)
- b.add(make([]byte, lenLen)...)
-
- if b.inContinuation == nil {
- b.inContinuation = new(bool)
- }
-
- b.child = &Builder{
- result: b.result,
- fixedSize: b.fixedSize,
- offset: offset,
- pendingLenLen: lenLen,
- pendingIsASN1: isASN1,
- inContinuation: b.inContinuation,
- }
-
- b.callContinuation(f, b.child)
- b.flushChild()
- if b.child != nil {
- panic("cryptobyte: internal error")
- }
-}
-
-func (b *Builder) flushChild() {
- if b.child == nil {
- return
- }
- b.child.flushChild()
- child := b.child
- b.child = nil
-
- if child.err != nil {
- b.err = child.err
- return
- }
-
- length := len(child.result) - child.pendingLenLen - child.offset
-
- if length < 0 {
- panic("cryptobyte: internal error") // result unexpectedly shrunk
- }
-
- if child.pendingIsASN1 {
- // For ASN.1, we reserved a single byte for the length. If that turned out
- // to be incorrect, we have to move the contents along in order to make
- // space.
- if child.pendingLenLen != 1 {
- panic("cryptobyte: internal error")
- }
- var lenLen, lenByte uint8
- if int64(length) > 0xfffffffe {
- b.err = errors.New("pending ASN.1 child too long")
- return
- } else if length > 0xffffff {
- lenLen = 5
- lenByte = 0x80 | 4
- } else if length > 0xffff {
- lenLen = 4
- lenByte = 0x80 | 3
- } else if length > 0xff {
- lenLen = 3
- lenByte = 0x80 | 2
- } else if length > 0x7f {
- lenLen = 2
- lenByte = 0x80 | 1
- } else {
- lenLen = 1
- lenByte = uint8(length)
- length = 0
- }
-
- // Insert the initial length byte, make space for successive length bytes,
- // and adjust the offset.
- child.result[child.offset] = lenByte
- extraBytes := int(lenLen - 1)
- if extraBytes != 0 {
- child.add(make([]byte, extraBytes)...)
- childStart := child.offset + child.pendingLenLen
- copy(child.result[childStart+extraBytes:], child.result[childStart:])
- }
- child.offset++
- child.pendingLenLen = extraBytes
- }
-
- l := length
- for i := child.pendingLenLen - 1; i >= 0; i-- {
- child.result[child.offset+i] = uint8(l)
- l >>= 8
- }
- if l != 0 {
- b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
- return
- }
-
- if b.fixedSize && &b.result[0] != &child.result[0] {
- panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
- }
-
- b.result = child.result
-}
-
-func (b *Builder) add(bytes ...byte) {
- if b.err != nil {
- return
- }
- if b.child != nil {
- panic("cryptobyte: attempted write while child is pending")
- }
- if len(b.result)+len(bytes) < len(bytes) {
- b.err = errors.New("cryptobyte: length overflow")
- }
- if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
- b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
- return
- }
- b.result = append(b.result, bytes...)
-}
-
-// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
-// child builder passed to a continuation to unwrite bytes from its parent will
-// panic.
-func (b *Builder) Unwrite(n int) {
- if b.err != nil {
- return
- }
- if b.child != nil {
- panic("cryptobyte: attempted unwrite while child is pending")
- }
- length := len(b.result) - b.pendingLenLen - b.offset
- if length < 0 {
- panic("cryptobyte: internal error")
- }
- if n > length {
- panic("cryptobyte: attempted to unwrite more than was written")
- }
- b.result = b.result[:len(b.result)-n]
-}
-
-// A MarshalingValue marshals itself into a Builder.
-type MarshalingValue interface {
- // Marshal is called by Builder.AddValue. It receives a pointer to a builder
- // to marshal itself into. It may return an error that occurred during
- // marshaling, such as unset or invalid values.
- Marshal(b *Builder) error
-}
-
-// AddValue calls Marshal on v, passing a pointer to the builder to append to.
-// If Marshal returns an error, it is set on the Builder so that subsequent
-// appends don't have an effect.
-func (b *Builder) AddValue(v MarshalingValue) {
- err := v.Marshal(b)
- if err != nil {
- b.err = err
- }
-}
diff --git a/etcd/vendor/golang.org/x/crypto/cryptobyte/string.go b/etcd/vendor/golang.org/x/crypto/cryptobyte/string.go
deleted file mode 100644
index 0531a3d6f1..0000000000
--- a/etcd/vendor/golang.org/x/crypto/cryptobyte/string.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cryptobyte contains types that help with parsing and constructing
-// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
-// contains useful ASN.1 constants.)
-//
-// The String type is for parsing. It wraps a []byte slice and provides helper
-// functions for consuming structures, value by value.
-//
-// The Builder type is for constructing messages. It providers helper functions
-// for appending values and also for appending length-prefixed submessages –
-// without having to worry about calculating the length prefix ahead of time.
-//
-// See the documentation and examples for the Builder and String types to get
-// started.
-package cryptobyte // import "golang.org/x/crypto/cryptobyte"
-
-// String represents a string of bytes. It provides methods for parsing
-// fixed-length and length-prefixed values from it.
-type String []byte
-
-// read advances a String by n bytes and returns them. If less than n bytes
-// remain, it returns nil.
-func (s *String) read(n int) []byte {
- if len(*s) < n || n < 0 {
- return nil
- }
- v := (*s)[:n]
- *s = (*s)[n:]
- return v
-}
-
-// Skip advances the String by n byte and reports whether it was successful.
-func (s *String) Skip(n int) bool {
- return s.read(n) != nil
-}
-
-// ReadUint8 decodes an 8-bit value into out and advances over it.
-// It reports whether the read was successful.
-func (s *String) ReadUint8(out *uint8) bool {
- v := s.read(1)
- if v == nil {
- return false
- }
- *out = uint8(v[0])
- return true
-}
-
-// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
-// It reports whether the read was successful.
-func (s *String) ReadUint16(out *uint16) bool {
- v := s.read(2)
- if v == nil {
- return false
- }
- *out = uint16(v[0])<<8 | uint16(v[1])
- return true
-}
-
-// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
-// It reports whether the read was successful.
-func (s *String) ReadUint24(out *uint32) bool {
- v := s.read(3)
- if v == nil {
- return false
- }
- *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
- return true
-}
-
-// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
-// It reports whether the read was successful.
-func (s *String) ReadUint32(out *uint32) bool {
- v := s.read(4)
- if v == nil {
- return false
- }
- *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
- return true
-}
-
-// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
-// It reports whether the read was successful.
-func (s *String) ReadUint64(out *uint64) bool {
- v := s.read(8)
- if v == nil {
- return false
- }
- *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
- return true
-}
-
-func (s *String) readUnsigned(out *uint32, length int) bool {
- v := s.read(length)
- if v == nil {
- return false
- }
- var result uint32
- for i := 0; i < length; i++ {
- result <<= 8
- result |= uint32(v[i])
- }
- *out = result
- return true
-}
-
-func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
- lenBytes := s.read(lenLen)
- if lenBytes == nil {
- return false
- }
- var length uint32
- for _, b := range lenBytes {
- length = length << 8
- length = length | uint32(b)
- }
- v := s.read(int(length))
- if v == nil {
- return false
- }
- *outChild = v
- return true
-}
-
-// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
-// into out and advances over it. It reports whether the read was successful.
-func (s *String) ReadUint8LengthPrefixed(out *String) bool {
- return s.readLengthPrefixed(1, out)
-}
-
-// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
-// length-prefixed value into out and advances over it. It reports whether the
-// read was successful.
-func (s *String) ReadUint16LengthPrefixed(out *String) bool {
- return s.readLengthPrefixed(2, out)
-}
-
-// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
-// length-prefixed value into out and advances over it. It reports whether
-// the read was successful.
-func (s *String) ReadUint24LengthPrefixed(out *String) bool {
- return s.readLengthPrefixed(3, out)
-}
-
-// ReadBytes reads n bytes into out and advances over them. It reports
-// whether the read was successful.
-func (s *String) ReadBytes(out *[]byte, n int) bool {
- v := s.read(n)
- if v == nil {
- return false
- }
- *out = v
- return true
-}
-
-// CopyBytes copies len(out) bytes into out and advances over them. It reports
-// whether the copy operation was successful
-func (s *String) CopyBytes(out []byte) bool {
- n := len(out)
- v := s.read(n)
- if v == nil {
- return false
- }
- return copy(out, v) == n
-}
-
-// Empty reports whether the string does not contain any bytes.
-func (s String) Empty() bool {
- return len(s) == 0
-}
diff --git a/etcd/vendor/golang.org/x/crypto/ed25519/ed25519.go b/etcd/vendor/golang.org/x/crypto/ed25519/ed25519.go
deleted file mode 100644
index a7828345fc..0000000000
--- a/etcd/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-//
-// Beginning with Go 1.13, the functionality of this package was moved to the
-// standard library as crypto/ed25519. This package only acts as a compatibility
-// wrapper.
-package ed25519
-
-import (
- "crypto/ed25519"
- "io"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-//
-// This type is an alias for crypto/ed25519's PublicKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PublicKey = ed25519.PublicKey
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-//
-// This type is an alias for crypto/ed25519's PrivateKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PrivateKey = ed25519.PrivateKey
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- return ed25519.GenerateKey(rand)
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- return ed25519.NewKeyFromSeed(seed)
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- return ed25519.Sign(privateKey, message)
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- return ed25519.Verify(publicKey, message, sig)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/alias/alias.go b/etcd/vendor/golang.org/x/crypto/internal/alias/alias.go
deleted file mode 100644
index 69c17f822b..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/alias/alias.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego
-// +build !purego
-
-// Package alias implements memory aliasing tests.
-package alias
-
-import "unsafe"
-
-// AnyOverlap reports whether x and y share memory at any (not necessarily
-// corresponding) index. The memory beyond the slice length is ignored.
-func AnyOverlap(x, y []byte) bool {
- return len(x) > 0 && len(y) > 0 &&
- uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
- uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
-}
-
-// InexactOverlap reports whether x and y share memory at any non-corresponding
-// index. The memory beyond the slice length is ignored. Note that x and y can
-// have different lengths and still not have any inexact overlap.
-//
-// InexactOverlap can be used to implement the requirements of the crypto/cipher
-// AEAD, Block, BlockMode and Stream interfaces.
-func InexactOverlap(x, y []byte) bool {
- if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
- return false
- }
- return AnyOverlap(x, y)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/etcd/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
deleted file mode 100644
index 4775b0a438..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego
-// +build purego
-
-// Package alias implements memory aliasing tests.
-package alias
-
-// This is the Google App Engine standard variant based on reflect
-// because the unsafe package and cgo are disallowed.
-
-import "reflect"
-
-// AnyOverlap reports whether x and y share memory at any (not necessarily
-// corresponding) index. The memory beyond the slice length is ignored.
-func AnyOverlap(x, y []byte) bool {
- return len(x) > 0 && len(y) > 0 &&
- reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
- reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
-}
-
-// InexactOverlap reports whether x and y share memory at any non-corresponding
-// index. The memory beyond the slice length is ignored. Note that x and y can
-// have different lengths and still not have any inexact overlap.
-//
-// InexactOverlap can be used to implement the requirements of the crypto/cipher
-// AEAD, Block, BlockMode and Stream interfaces.
-func InexactOverlap(x, y []byte) bool {
- if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
- return false
- }
- return AnyOverlap(x, y)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
deleted file mode 100644
index 45b5c966b2..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-// +build !go1.13
-
-package poly1305
-
-// Generic fallbacks for the math/bits intrinsics, copied from
-// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
-// variable time fallbacks until Go 1.13.
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
- sum = x + y + carry
- carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
- return
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
- diff = x - y - borrow
- borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
- return
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
- const mask32 = 1<<32 - 1
- x0 := x & mask32
- x1 := x >> 32
- y0 := y & mask32
- y1 := y >> 32
- w0 := x0 * y0
- t := x1*y0 + w0>>32
- w1 := t & mask32
- w2 := t >> 32
- w1 += x0 * y1
- hi = x1*y1 + w2 + w1>>32
- lo = x * y
- return
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
deleted file mode 100644
index ed52b3418a..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-package poly1305
-
-import "math/bits"
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
- return bits.Add64(x, y, carry)
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
- return bits.Sub64(x, y, borrow)
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
- return bits.Mul64(x, y)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
deleted file mode 100644
index f184b67d98..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
-// +build !amd64,!ppc64le,!s390x !gc purego
-
-package poly1305
-
-type mac struct{ macGeneric }
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
deleted file mode 100644
index 4aaea810a2..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package poly1305 implements Poly1305 one-time message authentication code as
-// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
-//
-// Poly1305 is a fast, one-time authentication function. It is infeasible for an
-// attacker to generate an authenticator for a message without the key. However, a
-// key must only be used for a single message. Authenticating two different
-// messages with the same key allows an attacker to forge authenticators for other
-// messages with the same key.
-//
-// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
-// used with a fixed key in order to generate one-time keys from an nonce.
-// However, in this package AES isn't used and the one-time key is specified
-// directly.
-package poly1305
-
-import "crypto/subtle"
-
-// TagSize is the size, in bytes, of a poly1305 authenticator.
-const TagSize = 16
-
-// Sum generates an authenticator for msg using a one-time key and puts the
-// 16-byte result into out. Authenticating two different messages with the same
-// key allows an attacker to forge messages at will.
-func Sum(out *[16]byte, m []byte, key *[32]byte) {
- h := New(key)
- h.Write(m)
- h.Sum(out[:0])
-}
-
-// Verify returns true if mac is a valid authenticator for m with the given key.
-func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
- var tmp [16]byte
- Sum(&tmp, m, key)
- return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
-}
-
-// New returns a new MAC computing an authentication
-// tag of all data written to it with the given key.
-// This allows writing the message progressively instead
-// of passing it as a single slice. Common users should use
-// the Sum function instead.
-//
-// The key must be unique for each message, as authenticating
-// two different messages with the same key allows an attacker
-// to forge messages at will.
-func New(key *[32]byte) *MAC {
- m := &MAC{}
- initialize(key, &m.macState)
- return m
-}
-
-// MAC is an io.Writer computing an authentication tag
-// of the data written to it.
-//
-// MAC cannot be used like common hash.Hash implementations,
-// because using a poly1305 key twice breaks its security.
-// Therefore writing data to a running MAC after calling
-// Sum or Verify causes it to panic.
-type MAC struct {
- mac // platform-dependent implementation
-
- finalized bool
-}
-
-// Size returns the number of bytes Sum will return.
-func (h *MAC) Size() int { return TagSize }
-
-// Write adds more data to the running message authentication code.
-// It never returns an error.
-//
-// It must not be called after the first call of Sum or Verify.
-func (h *MAC) Write(p []byte) (n int, err error) {
- if h.finalized {
- panic("poly1305: write to MAC after Sum or Verify")
- }
- return h.mac.Write(p)
-}
-
-// Sum computes the authenticator of all data written to the
-// message authentication code.
-func (h *MAC) Sum(b []byte) []byte {
- var mac [TagSize]byte
- h.mac.Sum(&mac)
- h.finalized = true
- return append(b, mac[:]...)
-}
-
-// Verify returns whether the authenticator of all data written to
-// the message authentication code matches the expected value.
-func (h *MAC) Verify(expected []byte) bool {
- var mac [TagSize]byte
- h.mac.Sum(&mac)
- h.finalized = true
- return subtle.ConstantTimeCompare(expected, mac[:]) == 1
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
deleted file mode 100644
index 6d522333f2..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-package poly1305
-
-//go:noescape
-func update(state *macState, msg []byte)
-
-// mac is a wrapper for macGeneric that redirects calls that would have gone to
-// updateGeneric to update.
-//
-// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
-// using function pointers would carry a major performance cost.
-type mac struct{ macGeneric }
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- update(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- update(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-func (h *mac) Sum(out *[16]byte) {
- state := h.macState
- if h.offset > 0 {
- update(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
deleted file mode 100644
index 1d74f0f881..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-#include "textflag.h"
-
-#define POLY1305_ADD(msg, h0, h1, h2) \
- ADDQ 0(msg), h0; \
- ADCQ 8(msg), h1; \
- ADCQ $1, h2; \
- LEAQ 16(msg), msg
-
-#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
- MOVQ r0, AX; \
- MULQ h0; \
- MOVQ AX, t0; \
- MOVQ DX, t1; \
- MOVQ r0, AX; \
- MULQ h1; \
- ADDQ AX, t1; \
- ADCQ $0, DX; \
- MOVQ r0, t2; \
- IMULQ h2, t2; \
- ADDQ DX, t2; \
- \
- MOVQ r1, AX; \
- MULQ h0; \
- ADDQ AX, t1; \
- ADCQ $0, DX; \
- MOVQ DX, h0; \
- MOVQ r1, t3; \
- IMULQ h2, t3; \
- MOVQ r1, AX; \
- MULQ h1; \
- ADDQ AX, t2; \
- ADCQ DX, t3; \
- ADDQ h0, t2; \
- ADCQ $0, t3; \
- \
- MOVQ t0, h0; \
- MOVQ t1, h1; \
- MOVQ t2, h2; \
- ANDQ $3, h2; \
- MOVQ t2, t0; \
- ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
- ADDQ t0, h0; \
- ADCQ t3, h1; \
- ADCQ $0, h2; \
- SHRQ $2, t3, t2; \
- SHRQ $2, t3; \
- ADDQ t2, h0; \
- ADCQ t3, h1; \
- ADCQ $0, h2
-
-// func update(state *[7]uint64, msg []byte)
-TEXT ·update(SB), $0-32
- MOVQ state+0(FP), DI
- MOVQ msg_base+8(FP), SI
- MOVQ msg_len+16(FP), R15
-
- MOVQ 0(DI), R8 // h0
- MOVQ 8(DI), R9 // h1
- MOVQ 16(DI), R10 // h2
- MOVQ 24(DI), R11 // r0
- MOVQ 32(DI), R12 // r1
-
- CMPQ R15, $16
- JB bytes_between_0_and_15
-
-loop:
- POLY1305_ADD(SI, R8, R9, R10)
-
-multiply:
- POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
- SUBQ $16, R15
- CMPQ R15, $16
- JAE loop
-
-bytes_between_0_and_15:
- TESTQ R15, R15
- JZ done
- MOVQ $1, BX
- XORQ CX, CX
- XORQ R13, R13
- ADDQ R15, SI
-
-flush_buffer:
- SHLQ $8, BX, CX
- SHLQ $8, BX
- MOVB -1(SI), R13
- XORQ R13, BX
- DECQ SI
- DECQ R15
- JNZ flush_buffer
-
- ADDQ BX, R8
- ADCQ CX, R9
- ADCQ $0, R10
- MOVQ $16, R15
- JMP multiply
-
-done:
- MOVQ R8, 0(DI)
- MOVQ R9, 8(DI)
- MOVQ R10, 16(DI)
- RET
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
deleted file mode 100644
index e041da5ea3..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides the generic implementation of Sum and MAC. Other files
-// might provide optimized assembly implementations of some of this code.
-
-package poly1305
-
-import "encoding/binary"
-
-// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
-// for a 64 bytes message is approximately
-//
-// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
-//
-// for some secret r and s. It can be computed sequentially like
-//
-// for len(msg) > 0:
-// h += read(msg, 16)
-// h *= r
-// h %= 2¹³⁰ - 5
-// return h + s
-//
-// All the complexity is about doing performant constant-time math on numbers
-// larger than any available numeric type.
-
-func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
- h := newMACGeneric(key)
- h.Write(msg)
- h.Sum(out)
-}
-
-func newMACGeneric(key *[32]byte) macGeneric {
- m := macGeneric{}
- initialize(key, &m.macState)
- return m
-}
-
-// macState holds numbers in saturated 64-bit little-endian limbs. That is,
-// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
-type macState struct {
- // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
- // can grow larger during and after rounds. It must, however, remain below
- // 2 * (2¹³⁰ - 5).
- h [3]uint64
- // r and s are the private key components.
- r [2]uint64
- s [2]uint64
-}
-
-type macGeneric struct {
- macState
-
- buffer [TagSize]byte
- offset int
-}
-
-// Write splits the incoming message into TagSize chunks, and passes them to
-// update. It buffers incomplete chunks.
-func (h *macGeneric) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- updateGeneric(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- updateGeneric(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-// Sum flushes the last incomplete chunk from the buffer, if any, and generates
-// the MAC output. It does not modify its state, in order to allow for multiple
-// calls to Sum, even if no Write is allowed after Sum.
-func (h *macGeneric) Sum(out *[TagSize]byte) {
- state := h.macState
- if h.offset > 0 {
- updateGeneric(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
-
-// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
-// clears some bits of the secret coefficient to make it possible to implement
-// multiplication more efficiently.
-const (
- rMask0 = 0x0FFFFFFC0FFFFFFF
- rMask1 = 0x0FFFFFFC0FFFFFFC
-)
-
-// initialize loads the 256-bit key into the two 128-bit secret values r and s.
-func initialize(key *[32]byte, m *macState) {
- m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
- m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
- m.s[0] = binary.LittleEndian.Uint64(key[16:24])
- m.s[1] = binary.LittleEndian.Uint64(key[24:32])
-}
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-func mul64(a, b uint64) uint128 {
- hi, lo := bitsMul64(a, b)
- return uint128{lo, hi}
-}
-
-func add128(a, b uint128) uint128 {
- lo, c := bitsAdd64(a.lo, b.lo, 0)
- hi, c := bitsAdd64(a.hi, b.hi, c)
- if c != 0 {
- panic("poly1305: unexpected overflow")
- }
- return uint128{lo, hi}
-}
-
-func shiftRightBy2(a uint128) uint128 {
- a.lo = a.lo>>2 | (a.hi&3)<<62
- a.hi = a.hi >> 2
- return a
-}
-
-// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
-// 128 bits of message, it computes
-//
-// h₊ = (h + m) * r mod 2¹³⁰ - 5
-//
-// If the msg length is not a multiple of TagSize, it assumes the last
-// incomplete chunk is the final one.
-func updateGeneric(state *macState, msg []byte) {
- h0, h1, h2 := state.h[0], state.h[1], state.h[2]
- r0, r1 := state.r[0], state.r[1]
-
- for len(msg) > 0 {
- var c uint64
-
- // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
- // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
- // reduced at the end of the multiplication below.
- //
- // The spec requires us to set a bit just above the message size, not to
- // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
- // add 1 to the most significant (2¹²⁸) limb, h2.
- if len(msg) >= TagSize {
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
- h2 += c + 1
-
- msg = msg[TagSize:]
- } else {
- var buf [TagSize]byte
- copy(buf[:], msg)
- buf[len(msg)] = 1
-
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
- h2 += c
-
- msg = nil
- }
-
- // Multiplication of big number limbs is similar to elementary school
- // columnar multiplication. Instead of digits, there are 64-bit limbs.
- //
- // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
- //
- // h2 h1 h0 x
- // r1 r0 =
- // ----------------
- // h2r0 h1r0 h0r0 <-- individual 128-bit products
- // + h2r1 h1r1 h0r1
- // ------------------------
- // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
- // ------------------------
- // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
- // + m3.lo m2.lo m1.lo m0.lo
- // -------------------------------
- // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
- //
- // The main difference from pen-and-paper multiplication is that we do
- // carry propagation in a separate step, as if we wrote two digit sums
- // at first (the 128-bit limbs), and then carried the tens all at once.
-
- h0r0 := mul64(h0, r0)
- h1r0 := mul64(h1, r0)
- h2r0 := mul64(h2, r0)
- h0r1 := mul64(h0, r1)
- h1r1 := mul64(h1, r1)
- h2r1 := mul64(h2, r1)
-
- // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
- // top 4 bits cleared by rMask{0,1}, we know that their product is not going
- // to overflow 64 bits, so we can ignore the high part of the products.
- //
- // This also means that the product doesn't have a fifth limb (t4).
- if h2r0.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
- if h2r1.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
-
- m0 := h0r0
- m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
- m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
- m3 := h2r1
-
- t0 := m0.lo
- t1, c := bitsAdd64(m1.lo, m0.hi, 0)
- t2, c := bitsAdd64(m2.lo, m1.hi, c)
- t3, _ := bitsAdd64(m3.lo, m2.hi, c)
-
- // Now we have the result as 4 64-bit limbs, and we need to reduce it
- // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
- // a cheap partial reduction according to the reduction identity
- //
- // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
- //
- // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
- // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
- // assumptions we make about h in the rest of the code.
- //
- // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
-
- // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
- // Note that the carry bits are effectively shifted left by 2, in other
- // words, cc = c * 4 for the c in the reduction identity.
- h0, h1, h2 = t0, t1, t2&maskLow2Bits
- cc := uint128{t2 & maskNotLow2Bits, t3}
-
- // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
-
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
- h2 += c
-
- cc = shiftRightBy2(cc)
-
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
- h2 += c
-
- // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
- //
- // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
- }
-
- state.h[0], state.h[1], state.h[2] = h0, h1, h2
-}
-
-const (
- maskLow2Bits uint64 = 0x0000000000000003
- maskNotLow2Bits uint64 = ^maskLow2Bits
-)
-
-// select64 returns x if v == 1 and y if v == 0, in constant time.
-func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
-
-// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
-const (
- p0 = 0xFFFFFFFFFFFFFFFB
- p1 = 0xFFFFFFFFFFFFFFFF
- p2 = 0x0000000000000003
-)
-
-// finalize completes the modular reduction of h and computes
-//
-// out = h + s mod 2¹²⁸
-func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
- h0, h1, h2 := h[0], h[1], h[2]
-
- // After the partial reduction in updateGeneric, h might be more than
- // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
- // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
- // result if the subtraction underflows, and t otherwise.
-
- hMinusP0, b := bitsSub64(h0, p0, 0)
- hMinusP1, b := bitsSub64(h1, p1, b)
- _, b = bitsSub64(h2, p2, b)
-
- // h = h if h < p else h - p
- h0 = select64(b, h0, hMinusP0)
- h1 = select64(b, h1, hMinusP1)
-
- // Finally, we compute the last Poly1305 step
- //
- // tag = h + s mod 2¹²⁸
- //
- // by just doing a wide addition with the 128 low bits of h and discarding
- // the overflow.
- h0, c := bitsAdd64(h0, s[0], 0)
- h1, _ = bitsAdd64(h1, s[1], c)
-
- binary.LittleEndian.PutUint64(out[0:8], h0)
- binary.LittleEndian.PutUint64(out[8:16], h1)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
deleted file mode 100644
index 4a069941a6..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-package poly1305
-
-//go:noescape
-func update(state *macState, msg []byte)
-
-// mac is a wrapper for macGeneric that redirects calls that would have gone to
-// updateGeneric to update.
-//
-// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
-// using function pointers would carry a major performance cost.
-type mac struct{ macGeneric }
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- update(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- update(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-func (h *mac) Sum(out *[16]byte) {
- state := h.macState
- if h.offset > 0 {
- update(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
deleted file mode 100644
index 58422aad23..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-#include "textflag.h"
-
-// This was ported from the amd64 implementation.
-
-#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
- MOVD (msg), t0; \
- MOVD 8(msg), t1; \
- MOVD $1, t2; \
- ADDC t0, h0, h0; \
- ADDE t1, h1, h1; \
- ADDE t2, h2; \
- ADD $16, msg
-
-#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
- MULLD r0, h0, t0; \
- MULLD r0, h1, t4; \
- MULHDU r0, h0, t1; \
- MULHDU r0, h1, t5; \
- ADDC t4, t1, t1; \
- MULLD r0, h2, t2; \
- ADDZE t5; \
- MULHDU r1, h0, t4; \
- MULLD r1, h0, h0; \
- ADD t5, t2, t2; \
- ADDC h0, t1, t1; \
- MULLD h2, r1, t3; \
- ADDZE t4, h0; \
- MULHDU r1, h1, t5; \
- MULLD r1, h1, t4; \
- ADDC t4, t2, t2; \
- ADDE t5, t3, t3; \
- ADDC h0, t2, t2; \
- MOVD $-4, t4; \
- MOVD t0, h0; \
- MOVD t1, h1; \
- ADDZE t3; \
- ANDCC $3, t2, h2; \
- AND t2, t4, t0; \
- ADDC t0, h0, h0; \
- ADDE t3, h1, h1; \
- SLD $62, t3, t4; \
- SRD $2, t2; \
- ADDZE h2; \
- OR t4, t2, t2; \
- SRD $2, t3; \
- ADDC t2, h0, h0; \
- ADDE t3, h1, h1; \
- ADDZE h2
-
-DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
-DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
-GLOBL ·poly1305Mask<>(SB), RODATA, $16
-
-// func update(state *[7]uint64, msg []byte)
-TEXT ·update(SB), $0-32
- MOVD state+0(FP), R3
- MOVD msg_base+8(FP), R4
- MOVD msg_len+16(FP), R5
-
- MOVD 0(R3), R8 // h0
- MOVD 8(R3), R9 // h1
- MOVD 16(R3), R10 // h2
- MOVD 24(R3), R11 // r0
- MOVD 32(R3), R12 // r1
-
- CMP R5, $16
- BLT bytes_between_0_and_15
-
-loop:
- POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
-
-multiply:
- POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
- ADD $-16, R5
- CMP R5, $16
- BGE loop
-
-bytes_between_0_and_15:
- CMP R5, $0
- BEQ done
- MOVD $0, R16 // h0
- MOVD $0, R17 // h1
-
-flush_buffer:
- CMP R5, $8
- BLE just1
-
- MOVD $8, R21
- SUB R21, R5, R21
-
- // Greater than 8 -- load the rightmost remaining bytes in msg
- // and put into R17 (h1)
- MOVD (R4)(R21), R17
- MOVD $16, R22
-
- // Find the offset to those bytes
- SUB R5, R22, R22
- SLD $3, R22
-
- // Shift to get only the bytes in msg
- SRD R22, R17, R17
-
- // Put 1 at high end
- MOVD $1, R23
- SLD $3, R21
- SLD R21, R23, R23
- OR R23, R17, R17
-
- // Remainder is 8
- MOVD $8, R5
-
-just1:
- CMP R5, $8
- BLT less8
-
- // Exactly 8
- MOVD (R4), R16
-
- CMP R17, $0
-
- // Check if we've already set R17; if not
- // set 1 to indicate end of msg.
- BNE carry
- MOVD $1, R17
- BR carry
-
-less8:
- MOVD $0, R16 // h0
- MOVD $0, R22 // shift count
- CMP R5, $4
- BLT less4
- MOVWZ (R4), R16
- ADD $4, R4
- ADD $-4, R5
- MOVD $32, R22
-
-less4:
- CMP R5, $2
- BLT less2
- MOVHZ (R4), R21
- SLD R22, R21, R21
- OR R16, R21, R16
- ADD $16, R22
- ADD $-2, R5
- ADD $2, R4
-
-less2:
- CMP R5, $0
- BEQ insert1
- MOVBZ (R4), R21
- SLD R22, R21, R21
- OR R16, R21, R16
- ADD $8, R22
-
-insert1:
- // Insert 1 at end of msg
- MOVD $1, R21
- SLD R22, R21, R21
- OR R16, R21, R16
-
-carry:
- // Add new values to h0, h1, h2
- ADDC R16, R8
- ADDE R17, R9
- ADDZE R10, R10
- MOVD $16, R5
- ADD R5, R4
- BR multiply
-
-done:
- // Save h0, h1, h2 in state
- MOVD R8, 0(R3)
- MOVD R9, 8(R3)
- MOVD R10, 16(R3)
- RET
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
deleted file mode 100644
index ec95966889..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-package poly1305
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-// updateVX is an assembly implementation of Poly1305 that uses vector
-// instructions. It must only be called if the vector facility (vx) is
-// available.
-//
-//go:noescape
-func updateVX(state *macState, msg []byte)
-
-// mac is a replacement for macGeneric that uses a larger buffer and redirects
-// calls that would have gone to updateGeneric to updateVX if the vector
-// facility is installed.
-//
-// A larger buffer is required for good performance because the vector
-// implementation has a higher fixed cost per call than the generic
-// implementation.
-type mac struct {
- macState
-
- buffer [16 * TagSize]byte // size must be a multiple of block size (16)
- offset int
-}
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < len(h.buffer) {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- if cpu.S390X.HasVX {
- updateVX(&h.macState, h.buffer[:])
- } else {
- updateGeneric(&h.macState, h.buffer[:])
- }
- }
-
- tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
- body := len(p) - tail // number of bytes to process now
- if body > 0 {
- if cpu.S390X.HasVX {
- updateVX(&h.macState, p[:body])
- } else {
- updateGeneric(&h.macState, p[:body])
- }
- }
- h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
- return nn, nil
-}
-
-func (h *mac) Sum(out *[TagSize]byte) {
- state := h.macState
- remainder := h.buffer[:h.offset]
-
- // Use the generic implementation if we have 2 or fewer blocks left
- // to sum. The vector implementation has a higher startup time.
- if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
- updateVX(&state, remainder)
- } else if len(remainder) > 0 {
- updateGeneric(&state, remainder)
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
deleted file mode 100644
index aa9e0494c9..0000000000
--- a/etcd/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
+++ /dev/null
@@ -1,504 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-// +build gc,!purego
-
-#include "textflag.h"
-
-// This implementation of Poly1305 uses the vector facility (vx)
-// to process up to 2 blocks (32 bytes) per iteration using an
-// algorithm based on the one described in:
-//
-// NEON crypto, Daniel J. Bernstein & Peter Schwabe
-// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
-//
-// This algorithm uses 5 26-bit limbs to represent a 130-bit
-// value. These limbs are, for the most part, zero extended and
-// placed into 64-bit vector register elements. Each vector
-// register is 128-bits wide and so holds 2 of these elements.
-// Using 26-bit limbs allows us plenty of headroom to accommodate
-// accumulations before and after multiplication without
-// overflowing either 32-bits (before multiplication) or 64-bits
-// (after multiplication).
-//
-// In order to parallelise the operations required to calculate
-// the sum we use two separate accumulators and then sum those
-// in an extra final step. For compatibility with the generic
-// implementation we perform this summation at the end of every
-// updateVX call.
-//
-// To use two accumulators we must multiply the message blocks
-// by r² rather than r. Only the final message block should be
-// multiplied by r.
-//
-// Example:
-//
-// We want to calculate the sum (h) for a 64 byte message (m):
-//
-// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
-//
-// To do this we split the calculation into the even indices
-// and odd indices of the message. These form our SIMD 'lanes':
-//
-// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
-// m[16:32]r³ + m[48:64]r <- lane 1
-//
-// To calculate this iteratively we refactor so that both lanes
-// are written in terms of r² and r:
-//
-// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
-// (m[16:32]r² + m[48:64])r <- lane 1
-// ^ ^
-// | coefficients for second iteration
-// coefficients for first iteration
-//
-// So in this case we would have two iterations. In the first
-// both lanes are multiplied by r². In the second only the
-// first lane is multiplied by r² and the second lane is
-// instead multiplied by r. This gives use the odd and even
-// powers of r that we need from the original equation.
-//
-// Notation:
-//
-// h - accumulator
-// r - key
-// m - message
-//
-// [a, b] - SIMD register holding two 64-bit values
-// [a, b, c, d] - SIMD register holding four 32-bit values
-// xᵢ[n] - limb n of variable x with bit width i
-//
-// Limbs are expressed in little endian order, so for 26-bit
-// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
-// will be the least significant limb.
-
-// masking constants
-#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
-#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
-
-// expansion constants (see EXPAND macro)
-#define EX0 V2
-#define EX1 V3
-#define EX2 V4
-
-// key (r², r or 1 depending on context)
-#define R_0 V5
-#define R_1 V6
-#define R_2 V7
-#define R_3 V8
-#define R_4 V9
-
-// precalculated coefficients (5r², 5r or 0 depending on context)
-#define R5_1 V10
-#define R5_2 V11
-#define R5_3 V12
-#define R5_4 V13
-
-// message block (m)
-#define M_0 V14
-#define M_1 V15
-#define M_2 V16
-#define M_3 V17
-#define M_4 V18
-
-// accumulator (h)
-#define H_0 V19
-#define H_1 V20
-#define H_2 V21
-#define H_3 V22
-#define H_4 V23
-
-// temporary registers (for short-lived values)
-#define T_0 V24
-#define T_1 V25
-#define T_2 V26
-#define T_3 V27
-#define T_4 V28
-
-GLOBL ·constants<>(SB), RODATA, $0x30
-// EX0
-DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
-DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
-// EX1
-DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
-DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
-// EX2
-DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
-DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
-
-// MULTIPLY multiplies each lane of f and g, partially reduced
-// modulo 2¹³⁰ - 5. The result, h, consists of partial products
-// in each lane that need to be reduced further to produce the
-// final result.
-//
-// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
-//
-// Note that the multiplication by 5 of the high bits is
-// achieved by precalculating the multiplication of four of the
-// g coefficients by 5. These are g51-g54.
-#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
- VMLOF f0, g0, h0 \
- VMLOF f0, g3, h3 \
- VMLOF f0, g1, h1 \
- VMLOF f0, g4, h4 \
- VMLOF f0, g2, h2 \
- VMLOF f1, g54, T_0 \
- VMLOF f1, g2, T_3 \
- VMLOF f1, g0, T_1 \
- VMLOF f1, g3, T_4 \
- VMLOF f1, g1, T_2 \
- VMALOF f2, g53, h0, h0 \
- VMALOF f2, g1, h3, h3 \
- VMALOF f2, g54, h1, h1 \
- VMALOF f2, g2, h4, h4 \
- VMALOF f2, g0, h2, h2 \
- VMALOF f3, g52, T_0, T_0 \
- VMALOF f3, g0, T_3, T_3 \
- VMALOF f3, g53, T_1, T_1 \
- VMALOF f3, g1, T_4, T_4 \
- VMALOF f3, g54, T_2, T_2 \
- VMALOF f4, g51, h0, h0 \
- VMALOF f4, g54, h3, h3 \
- VMALOF f4, g52, h1, h1 \
- VMALOF f4, g0, h4, h4 \
- VMALOF f4, g53, h2, h2 \
- VAG T_0, h0, h0 \
- VAG T_3, h3, h3 \
- VAG T_1, h1, h1 \
- VAG T_4, h4, h4 \
- VAG T_2, h2, h2
-
-// REDUCE performs the following carry operations in four
-// stages, as specified in Bernstein & Schwabe:
-//
-// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
-// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
-// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
-// 4: h₂₆[3]->h₂₆[4]
-//
-// The result is that all of the limbs are limited to 26-bits
-// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
-//
-// Note that although each limb is aligned at 26-bit intervals
-// they may contain values that exceed 2²⁶ - 1, hence the need
-// to carry the excess bits in each limb.
-#define REDUCE(h0, h1, h2, h3, h4) \
- VESRLG $26, h0, T_0 \
- VESRLG $26, h3, T_1 \
- VN MOD26, h0, h0 \
- VN MOD26, h3, h3 \
- VAG T_0, h1, h1 \
- VAG T_1, h4, h4 \
- VESRLG $26, h1, T_2 \
- VESRLG $26, h4, T_3 \
- VN MOD26, h1, h1 \
- VN MOD26, h4, h4 \
- VESLG $2, T_3, T_4 \
- VAG T_3, T_4, T_4 \
- VAG T_2, h2, h2 \
- VAG T_4, h0, h0 \
- VESRLG $26, h2, T_0 \
- VESRLG $26, h0, T_1 \
- VN MOD26, h2, h2 \
- VN MOD26, h0, h0 \
- VAG T_0, h3, h3 \
- VAG T_1, h1, h1 \
- VESRLG $26, h3, T_2 \
- VN MOD26, h3, h3 \
- VAG T_2, h4, h4
-
-// EXPAND splits the 128-bit little-endian values in0 and in1
-// into 26-bit big-endian limbs and places the results into
-// the first and second lane of d₂₆[0:4] respectively.
-//
-// The EX0, EX1 and EX2 constants are arrays of byte indices
-// for permutation. The permutation both reverses the bytes
-// in the input and ensures the bytes are copied into the
-// destination limb ready to be shifted into their final
-// position.
-#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
- VPERM in0, in1, EX0, d0 \
- VPERM in0, in1, EX1, d2 \
- VPERM in0, in1, EX2, d4 \
- VESRLG $26, d0, d1 \
- VESRLG $30, d2, d3 \
- VESRLG $4, d2, d2 \
- VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
- VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
- VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
- VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
- VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
-
-// func updateVX(state *macState, msg []byte)
-TEXT ·updateVX(SB), NOSPLIT, $0
- MOVD state+0(FP), R1
- LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
-
- // load EX0, EX1 and EX2
- MOVD $·constants<>(SB), R5
- VLM (R5), EX0, EX2
-
- // generate masks
- VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
- VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
-
- // load h (accumulator) and r (key) from state
- VZERO T_1 // [0, 0]
- VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
- VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
- VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
- VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
- VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
-
- // unpack h and r into 26-bit limbs
- // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
- VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
- VZERO H_1 // [0, 0]
- VZERO H_3 // [0, 0]
- VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
- VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
- VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
- VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
- VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
- VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
- VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
- VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
-
- // replicate r across all 4 vector elements
- VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
- VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
- VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
- VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
- VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
-
- // zero out lane 1 of h
- VLEIG $1, $0, H_0 // [h₂₆[0], 0]
- VLEIG $1, $0, H_1 // [h₂₆[1], 0]
- VLEIG $1, $0, H_2 // [h₂₆[2], 0]
- VLEIG $1, $0, H_3 // [h₂₆[3], 0]
- VLEIG $1, $0, H_4 // [h₂₆[4], 0]
-
- // calculate 5r (ignore least significant limb)
- VREPIF $5, T_0
- VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
- VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
- VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
- VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
-
- // skip r² calculation if we are only calculating one block
- CMPBLE R3, $16, skip
-
- // calculate r²
- MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
- REDUCE(M_0, M_1, M_2, M_3, M_4)
- VGBM $0x0f0f, T_0
- VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
- VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
- VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
- VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
- VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
-
- // calculate 5r² (ignore least significant limb)
- VREPIF $5, T_0
- VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
- VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
- VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
- VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
-
-loop:
- CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
-
- // load next 2 blocks from message
- VLM (R2), T_0, T_1
-
- // update message slice
- SUB $32, R3
- MOVD $32(R2), R2
-
- // unpack message blocks into 26-bit big-endian limbs
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // add 2¹²⁸ to each message block value
- VLEIB $4, $1, M_4
- VLEIB $12, $1, M_4
-
-multiply:
- // accumulate the incoming message
- VAG H_0, M_0, M_0
- VAG H_3, M_3, M_3
- VAG H_1, M_1, M_1
- VAG H_4, M_4, M_4
- VAG H_2, M_2, M_2
-
- // multiply the accumulator by the key coefficient
- MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
-
- // carry and partially reduce the partial products
- REDUCE(H_0, H_1, H_2, H_3, H_4)
-
- CMPBNE R3, $0, loop
-
-finish:
- // sum lane 0 and lane 1 and put the result in lane 1
- VZERO T_0
- VSUMQG H_0, T_0, H_0
- VSUMQG H_3, T_0, H_3
- VSUMQG H_1, T_0, H_1
- VSUMQG H_4, T_0, H_4
- VSUMQG H_2, T_0, H_2
-
- // reduce again after summation
- // TODO(mundaym): there might be a more efficient way to do this
- // now that we only have 1 active lane. For example, we could
- // simultaneously pack the values as we reduce them.
- REDUCE(H_0, H_1, H_2, H_3, H_4)
-
- // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
- // TODO(mundaym): in testing this final carry was unnecessary.
- // Needs a proof before it can be removed though.
- VESRLG $26, H_1, T_1
- VN MOD26, H_1, H_1
- VAQ T_1, H_2, H_2
- VESRLG $26, H_2, T_2
- VN MOD26, H_2, H_2
- VAQ T_2, H_3, H_3
- VESRLG $26, H_3, T_3
- VN MOD26, H_3, H_3
- VAQ T_3, H_4, H_4
-
- // h is now < 2(2¹³⁰ - 5)
- // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
- VESLG $26, H_1, H_1
- VESLG $26, H_3, H_3
- VO H_0, H_1, H_0
- VO H_2, H_3, H_2
- VESLG $4, H_2, H_2
- VLEIB $7, $48, H_1
- VSLB H_1, H_2, H_2
- VO H_0, H_2, H_0
- VLEIB $7, $104, H_1
- VSLB H_1, H_4, H_3
- VO H_3, H_0, H_0
- VLEIB $7, $24, H_1
- VSRLB H_1, H_4, H_1
-
- // update state
- VSTEG $1, H_0, 0(R1)
- VSTEG $0, H_0, 8(R1)
- VSTEG $1, H_1, 16(R1)
- RET
-
-b2: // 2 or fewer blocks remaining
- CMPBLE R3, $16, b1
-
- // Load the 2 remaining blocks (17-32 bytes remaining).
- MOVD $-17(R3), R0 // index of final byte to load modulo 16
- VL (R2), T_0 // load full 16 byte block
- VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
-
- // The Poly1305 algorithm requires that a 1 bit be appended to
- // each message block. If the final block is less than 16 bytes
- // long then it is easiest to insert the 1 before the message
- // block is split into 26-bit limbs. If, on the other hand, the
- // final message block is 16 bytes long then we append the 1 bit
- // after expansion as normal.
- MOVBZ $1, R0
- MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
- CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
- VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
-
- // Split both blocks into 26-bit limbs in the appropriate lanes.
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // Append a 1 byte to the end of the second to last block.
- VLEIB $4, $1, M_4
-
- // Append a 1 byte to the end of the last block only if it is a
- // full 16 byte block.
- CMPBNE R3, $16, 2(PC)
- VLEIB $12, $1, M_4
-
- // Finally, set up the coefficients for the final multiplication.
- // We have previously saved r and 5r in the 32-bit even indexes
- // of the R_[0-4] and R5_[1-4] coefficient registers.
- //
- // We want lane 0 to be multiplied by r² so that can be kept the
- // same. We want lane 1 to be multiplied by r so we need to move
- // the saved r value into the 32-bit odd index in lane 1 by
- // rotating the 64-bit lane by 32.
- VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
- VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
- VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
- VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
- VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
- VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
- VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
- VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
- VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
- VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
-
- MOVD $0, R3
- BR multiply
-
-skip:
- CMPBEQ R3, $0, finish
-
-b1: // 1 block remaining
-
- // Load the final block (1-16 bytes). This will be placed into
- // lane 0.
- MOVD $-1(R3), R0
- VLL R0, (R2), T_0 // pad to 16 bytes with zeros
-
- // The Poly1305 algorithm requires that a 1 bit be appended to
- // each message block. If the final block is less than 16 bytes
- // long then it is easiest to insert the 1 before the message
- // block is split into 26-bit limbs. If, on the other hand, the
- // final message block is 16 bytes long then we append the 1 bit
- // after expansion as normal.
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, T_0
-
- // Set the message block in lane 1 to the value 0 so that it
- // can be accumulated without affecting the final result.
- VZERO T_1
-
- // Split the final message block into 26-bit limbs in lane 0.
- // Lane 1 will be contain 0.
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // Append a 1 byte to the end of the last block only if it is a
- // full 16 byte block.
- CMPBNE R3, $16, 2(PC)
- VLEIB $4, $1, M_4
-
- // We have previously saved r and 5r in the 32-bit even indexes
- // of the R_[0-4] and R5_[1-4] coefficient registers.
- //
- // We want lane 0 to be multiplied by r so we need to move the
- // saved r value into the 32-bit odd index in lane 0. We want
- // lane 1 to be set to the value 1. This makes multiplication
- // a no-op. We do this by setting lane 1 in every register to 0
- // and then just setting the 32-bit index 3 in R_0 to 1.
- VZERO T_0
- MOVD $0, R0
- MOVD $0x10111213, R12
- VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
- VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
- VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
- VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
- VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
- VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
- VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
- VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
- VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
- VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
-
- // Set the value of lane 1 to be 1.
- VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
-
- MOVD $0, R3
- BR multiply
diff --git a/etcd/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/etcd/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
deleted file mode 100644
index f3c3242a04..0000000000
--- a/etcd/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package secretbox encrypts and authenticates small messages.
-
-Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
-secret-key cryptography. The length of messages is not hidden.
-
-It is the caller's responsibility to ensure the uniqueness of nonces—for
-example, by using nonce 1 for the first message, nonce 2 for the second
-message, etc. Nonces are long enough that randomly generated nonces have
-negligible risk of collision.
-
-Messages should be small because:
-
-1. The whole message needs to be held in memory to be processed.
-
-2. Using large messages pressures implementations on small machines to decrypt
-and process plaintext before authenticating it. This is very dangerous, and
-this API does not allow it, but a protocol that uses excessive message sizes
-might present some implementations with no other choice.
-
-3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
-
-4. Performance may be improved by working with messages that fit into data caches.
-
-Thus large amounts of data should be chunked so that each message is small.
-(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
-chunk size.
-
-This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
-*/
-package secretbox // import "golang.org/x/crypto/nacl/secretbox"
-
-import (
- "golang.org/x/crypto/internal/alias"
- "golang.org/x/crypto/internal/poly1305"
- "golang.org/x/crypto/salsa20/salsa"
-)
-
-// Overhead is the number of bytes of overhead when boxing a message.
-const Overhead = poly1305.TagSize
-
-// setup produces a sub-key and Salsa20 counter given a nonce and key.
-func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
- // We use XSalsa20 for encryption so first we need to generate a
- // key and nonce with HSalsa20.
- var hNonce [16]byte
- copy(hNonce[:], nonce[:])
- salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
-
- // The final 8 bytes of the original nonce form the new nonce.
- copy(counter[:], nonce[16:])
-}
-
-// sliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// Seal appends an encrypted and authenticated copy of message to out, which
-// must not overlap message. The key and nonce pair must be unique for each
-// distinct message and the output will be Overhead bytes longer than message.
-func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
- var subKey [32]byte
- var counter [16]byte
- setup(&subKey, &counter, nonce, key)
-
- // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
- // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
- // keystream as a side effect.
- var firstBlock [64]byte
- salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
-
- var poly1305Key [32]byte
- copy(poly1305Key[:], firstBlock[:])
-
- ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
- if alias.AnyOverlap(out, message) {
- panic("nacl: invalid buffer overlap")
- }
-
- // We XOR up to 32 bytes of message with the keystream generated from
- // the first block.
- firstMessageBlock := message
- if len(firstMessageBlock) > 32 {
- firstMessageBlock = firstMessageBlock[:32]
- }
-
- tagOut := out
- out = out[poly1305.TagSize:]
- for i, x := range firstMessageBlock {
- out[i] = firstBlock[32+i] ^ x
- }
- message = message[len(firstMessageBlock):]
- ciphertext := out
- out = out[len(firstMessageBlock):]
-
- // Now encrypt the rest.
- counter[8] = 1
- salsa.XORKeyStream(out, message, &counter, &subKey)
-
- var tag [poly1305.TagSize]byte
- poly1305.Sum(&tag, ciphertext, &poly1305Key)
- copy(tagOut, tag[:])
-
- return ret
-}
-
-// Open authenticates and decrypts a box produced by Seal and appends the
-// message to out, which must not overlap box. The output will be Overhead
-// bytes smaller than box.
-func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
- if len(box) < Overhead {
- return nil, false
- }
-
- var subKey [32]byte
- var counter [16]byte
- setup(&subKey, &counter, nonce, key)
-
- // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
- // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
- // keystream as a side effect.
- var firstBlock [64]byte
- salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
-
- var poly1305Key [32]byte
- copy(poly1305Key[:], firstBlock[:])
- var tag [poly1305.TagSize]byte
- copy(tag[:], box)
-
- if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
- return nil, false
- }
-
- ret, out := sliceForAppend(out, len(box)-Overhead)
- if alias.AnyOverlap(out, box) {
- panic("nacl: invalid buffer overlap")
- }
-
- // We XOR up to 32 bytes of box with the keystream generated from
- // the first block.
- box = box[Overhead:]
- firstMessageBlock := box
- if len(firstMessageBlock) > 32 {
- firstMessageBlock = firstMessageBlock[:32]
- }
- for i, x := range firstMessageBlock {
- out[i] = firstBlock[32+i] ^ x
- }
-
- box = box[len(firstMessageBlock):]
- out = out[len(firstMessageBlock):]
-
- // Now decrypt the rest.
- counter[8] = 1
- salsa.XORKeyStream(out, box, &counter, &subKey)
-
- return ret, true
-}
diff --git a/etcd/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/etcd/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
deleted file mode 100644
index 904b57e01d..0000000000
--- a/etcd/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
-2898 / PKCS #5 v2.0.
-
-A key derivation function is useful when encrypting data based on a password
-or any other not-fully-random data. It uses a pseudorandom function to derive
-a secure encryption key based on the password.
-
-While v2.0 of the standard defines only one pseudorandom function to use,
-HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
-Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
-choose, you can pass the `New` functions from the different SHA packages to
-pbkdf2.Key.
-*/
-package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
-
-import (
- "crypto/hmac"
- "hash"
-)
-
-// Key derives a key from the password, salt and iteration count, returning a
-// []byte of length keylen that can be used as cryptographic key. The key is
-// derived based on the method described as PBKDF2 with the HMAC variant using
-// the supplied hash function.
-//
-// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
-// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
-// doing:
-//
-// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
-//
-// Remember to get a good random salt. At least 8 bytes is recommended by the
-// RFC.
-//
-// Using a higher iteration count will increase the cost of an exhaustive
-// search but will also make derivation proportionally slower.
-func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
- prf := hmac.New(h, password)
- hashLen := prf.Size()
- numBlocks := (keyLen + hashLen - 1) / hashLen
-
- var buf [4]byte
- dk := make([]byte, 0, numBlocks*hashLen)
- U := make([]byte, hashLen)
- for block := 1; block <= numBlocks; block++ {
- // N.B.: || means concatenation, ^ means XOR
- // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
- // U_1 = PRF(password, salt || uint(i))
- prf.Reset()
- prf.Write(salt)
- buf[0] = byte(block >> 24)
- buf[1] = byte(block >> 16)
- buf[2] = byte(block >> 8)
- buf[3] = byte(block)
- prf.Write(buf[:4])
- dk = prf.Sum(dk)
- T := dk[len(dk)-hashLen:]
- copy(U, T)
-
- // U_n = PRF(password, U_(n-1))
- for n := 2; n <= iter; n++ {
- prf.Reset()
- prf.Write(U)
- U = U[:0]
- U = prf.Sum(U)
- for x := range U {
- T[x] ^= U[x]
- }
- }
- }
- return dk[:keyLen]
-}
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
deleted file mode 100644
index 4c96147c86..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package salsa provides low-level access to functions in the Salsa family.
-package salsa // import "golang.org/x/crypto/salsa20/salsa"
-
-// Sigma is the Salsa20 constant for 256-bit keys.
-var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
-
-// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
-// key k, and 16-byte constant c, and puts the result into the 32-byte array
-// out.
-func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
- x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
- x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
- x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
- x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
- x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
- x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
- x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
- x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
- x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
- x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
- x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
- x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
-
- for i := 0; i < 20; i += 2 {
- u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
- u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
- u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
- u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
- u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
- u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
- u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
- u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
- u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
-
- u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
- u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
- u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
- u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
- u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
- u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
- u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
- u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
- u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
- }
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x5)
- out[5] = byte(x5 >> 8)
- out[6] = byte(x5 >> 16)
- out[7] = byte(x5 >> 24)
-
- out[8] = byte(x10)
- out[9] = byte(x10 >> 8)
- out[10] = byte(x10 >> 16)
- out[11] = byte(x10 >> 24)
-
- out[12] = byte(x15)
- out[13] = byte(x15 >> 8)
- out[14] = byte(x15 >> 16)
- out[15] = byte(x15 >> 24)
-
- out[16] = byte(x6)
- out[17] = byte(x6 >> 8)
- out[18] = byte(x6 >> 16)
- out[19] = byte(x6 >> 24)
-
- out[20] = byte(x7)
- out[21] = byte(x7 >> 8)
- out[22] = byte(x7 >> 16)
- out[23] = byte(x7 >> 24)
-
- out[24] = byte(x8)
- out[25] = byte(x8 >> 8)
- out[26] = byte(x8 >> 16)
- out[27] = byte(x8 >> 24)
-
- out[28] = byte(x9)
- out[29] = byte(x9 >> 8)
- out[30] = byte(x9 >> 16)
- out[31] = byte(x9 >> 24)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
deleted file mode 100644
index 9bfc0927ce..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package salsa
-
-// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
-// the result into the 64-byte array out. The input and output may be the same array.
-func Core208(out *[64]byte, in *[64]byte) {
- j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
- j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
- j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
- j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
- j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
- j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
- j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
- j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
- j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
- j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
- j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
- j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
-
- x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
- x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
-
- for i := 0; i < 8; i += 2 {
- u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
- u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
- u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
- u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
- u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
- u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
- u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
- u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
- u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
-
- u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
- u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
- u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
- u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
- u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
- u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
- u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
- u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
- u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
- }
- x0 += j0
- x1 += j1
- x2 += j2
- x3 += j3
- x4 += j4
- x5 += j5
- x6 += j6
- x7 += j7
- x8 += j8
- x9 += j9
- x10 += j10
- x11 += j11
- x12 += j12
- x13 += j13
- x14 += j14
- x15 += j15
-
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x1)
- out[5] = byte(x1 >> 8)
- out[6] = byte(x1 >> 16)
- out[7] = byte(x1 >> 24)
-
- out[8] = byte(x2)
- out[9] = byte(x2 >> 8)
- out[10] = byte(x2 >> 16)
- out[11] = byte(x2 >> 24)
-
- out[12] = byte(x3)
- out[13] = byte(x3 >> 8)
- out[14] = byte(x3 >> 16)
- out[15] = byte(x3 >> 24)
-
- out[16] = byte(x4)
- out[17] = byte(x4 >> 8)
- out[18] = byte(x4 >> 16)
- out[19] = byte(x4 >> 24)
-
- out[20] = byte(x5)
- out[21] = byte(x5 >> 8)
- out[22] = byte(x5 >> 16)
- out[23] = byte(x5 >> 24)
-
- out[24] = byte(x6)
- out[25] = byte(x6 >> 8)
- out[26] = byte(x6 >> 16)
- out[27] = byte(x6 >> 24)
-
- out[28] = byte(x7)
- out[29] = byte(x7 >> 8)
- out[30] = byte(x7 >> 16)
- out[31] = byte(x7 >> 24)
-
- out[32] = byte(x8)
- out[33] = byte(x8 >> 8)
- out[34] = byte(x8 >> 16)
- out[35] = byte(x8 >> 24)
-
- out[36] = byte(x9)
- out[37] = byte(x9 >> 8)
- out[38] = byte(x9 >> 16)
- out[39] = byte(x9 >> 24)
-
- out[40] = byte(x10)
- out[41] = byte(x10 >> 8)
- out[42] = byte(x10 >> 16)
- out[43] = byte(x10 >> 24)
-
- out[44] = byte(x11)
- out[45] = byte(x11 >> 8)
- out[46] = byte(x11 >> 16)
- out[47] = byte(x11 >> 24)
-
- out[48] = byte(x12)
- out[49] = byte(x12 >> 8)
- out[50] = byte(x12 >> 16)
- out[51] = byte(x12 >> 24)
-
- out[52] = byte(x13)
- out[53] = byte(x13 >> 8)
- out[54] = byte(x13 >> 16)
- out[55] = byte(x13 >> 24)
-
- out[56] = byte(x14)
- out[57] = byte(x14 >> 8)
- out[58] = byte(x14 >> 16)
- out[59] = byte(x14 >> 24)
-
- out[60] = byte(x15)
- out[61] = byte(x15 >> 8)
- out[62] = byte(x15 >> 16)
- out[63] = byte(x15 >> 24)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
deleted file mode 100644
index c400dfcf7b..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && !purego && gc
-// +build amd64,!purego,gc
-
-package salsa
-
-//go:noescape
-
-// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
-func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
-
-// XORKeyStream crypts bytes from in to out using the given key and counters.
-// In and out must overlap entirely or not at all. Counter
-// contains the raw salsa20 counter bytes (both nonce and block counter).
-func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- if len(in) == 0 {
- return
- }
- _ = out[len(in)-1]
- salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
-}
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
deleted file mode 100644
index c089277204..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
+++ /dev/null
@@ -1,881 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && !purego && gc
-// +build amd64,!purego,gc
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
-// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
-TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
- MOVQ out+0(FP),DI
- MOVQ in+8(FP),SI
- MOVQ n+16(FP),DX
- MOVQ nonce+24(FP),CX
- MOVQ key+32(FP),R8
-
- MOVQ SP,R12
- ADDQ $31, R12
- ANDQ $~31, R12
-
- MOVQ DX,R9
- MOVQ CX,DX
- MOVQ R8,R10
- CMPQ R9,$0
- JBE DONE
- START:
- MOVL 20(R10),CX
- MOVL 0(R10),R8
- MOVL 0(DX),AX
- MOVL 16(R10),R11
- MOVL CX,0(R12)
- MOVL R8, 4 (R12)
- MOVL AX, 8 (R12)
- MOVL R11, 12 (R12)
- MOVL 8(DX),CX
- MOVL 24(R10),R8
- MOVL 4(R10),AX
- MOVL 4(DX),R11
- MOVL CX,16(R12)
- MOVL R8, 20 (R12)
- MOVL AX, 24 (R12)
- MOVL R11, 28 (R12)
- MOVL 12(DX),CX
- MOVL 12(R10),DX
- MOVL 28(R10),R8
- MOVL 8(R10),AX
- MOVL DX,32(R12)
- MOVL CX, 36 (R12)
- MOVL R8, 40 (R12)
- MOVL AX, 44 (R12)
- MOVQ $1634760805,DX
- MOVQ $857760878,CX
- MOVQ $2036477234,R8
- MOVQ $1797285236,AX
- MOVL DX,48(R12)
- MOVL CX, 52 (R12)
- MOVL R8, 56 (R12)
- MOVL AX, 60 (R12)
- CMPQ R9,$256
- JB BYTESBETWEEN1AND255
- MOVOA 48(R12),X0
- PSHUFL $0X55,X0,X1
- PSHUFL $0XAA,X0,X2
- PSHUFL $0XFF,X0,X3
- PSHUFL $0X00,X0,X0
- MOVOA X1,64(R12)
- MOVOA X2,80(R12)
- MOVOA X3,96(R12)
- MOVOA X0,112(R12)
- MOVOA 0(R12),X0
- PSHUFL $0XAA,X0,X1
- PSHUFL $0XFF,X0,X2
- PSHUFL $0X00,X0,X3
- PSHUFL $0X55,X0,X0
- MOVOA X1,128(R12)
- MOVOA X2,144(R12)
- MOVOA X3,160(R12)
- MOVOA X0,176(R12)
- MOVOA 16(R12),X0
- PSHUFL $0XFF,X0,X1
- PSHUFL $0X55,X0,X2
- PSHUFL $0XAA,X0,X0
- MOVOA X1,192(R12)
- MOVOA X2,208(R12)
- MOVOA X0,224(R12)
- MOVOA 32(R12),X0
- PSHUFL $0X00,X0,X1
- PSHUFL $0XAA,X0,X2
- PSHUFL $0XFF,X0,X0
- MOVOA X1,240(R12)
- MOVOA X2,256(R12)
- MOVOA X0,272(R12)
- BYTESATLEAST256:
- MOVL 16(R12),DX
- MOVL 36 (R12),CX
- MOVL DX,288(R12)
- MOVL CX,304(R12)
- SHLQ $32,CX
- ADDQ CX,DX
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 292 (R12)
- MOVL CX, 308 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 296 (R12)
- MOVL CX, 312 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 300 (R12)
- MOVL CX, 316 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX,16(R12)
- MOVL CX, 36 (R12)
- MOVQ R9,352(R12)
- MOVQ $20,DX
- MOVOA 64(R12),X0
- MOVOA 80(R12),X1
- MOVOA 96(R12),X2
- MOVOA 256(R12),X3
- MOVOA 272(R12),X4
- MOVOA 128(R12),X5
- MOVOA 144(R12),X6
- MOVOA 176(R12),X7
- MOVOA 192(R12),X8
- MOVOA 208(R12),X9
- MOVOA 224(R12),X10
- MOVOA 304(R12),X11
- MOVOA 112(R12),X12
- MOVOA 160(R12),X13
- MOVOA 240(R12),X14
- MOVOA 288(R12),X15
- MAINLOOP1:
- MOVOA X1,320(R12)
- MOVOA X2,336(R12)
- MOVOA X13,X1
- PADDL X12,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X14
- PSRLL $25,X2
- PXOR X2,X14
- MOVOA X7,X1
- PADDL X0,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X11
- PSRLL $25,X2
- PXOR X2,X11
- MOVOA X12,X1
- PADDL X14,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X15
- PSRLL $23,X2
- PXOR X2,X15
- MOVOA X0,X1
- PADDL X11,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X9
- PSRLL $23,X2
- PXOR X2,X9
- MOVOA X14,X1
- PADDL X15,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X13
- PSRLL $19,X2
- PXOR X2,X13
- MOVOA X11,X1
- PADDL X9,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X7
- PSRLL $19,X2
- PXOR X2,X7
- MOVOA X15,X1
- PADDL X13,X1
- MOVOA X1,X2
- PSLLL $18,X1
- PXOR X1,X12
- PSRLL $14,X2
- PXOR X2,X12
- MOVOA 320(R12),X1
- MOVOA X12,320(R12)
- MOVOA X9,X2
- PADDL X7,X2
- MOVOA X2,X12
- PSLLL $18,X2
- PXOR X2,X0
- PSRLL $14,X12
- PXOR X12,X0
- MOVOA X5,X2
- PADDL X1,X2
- MOVOA X2,X12
- PSLLL $7,X2
- PXOR X2,X3
- PSRLL $25,X12
- PXOR X12,X3
- MOVOA 336(R12),X2
- MOVOA X0,336(R12)
- MOVOA X6,X0
- PADDL X2,X0
- MOVOA X0,X12
- PSLLL $7,X0
- PXOR X0,X4
- PSRLL $25,X12
- PXOR X12,X4
- MOVOA X1,X0
- PADDL X3,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X10
- PSRLL $23,X12
- PXOR X12,X10
- MOVOA X2,X0
- PADDL X4,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X8
- PSRLL $23,X12
- PXOR X12,X8
- MOVOA X3,X0
- PADDL X10,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X5
- PSRLL $19,X12
- PXOR X12,X5
- MOVOA X4,X0
- PADDL X8,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X6
- PSRLL $19,X12
- PXOR X12,X6
- MOVOA X10,X0
- PADDL X5,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X1
- PSRLL $14,X12
- PXOR X12,X1
- MOVOA 320(R12),X0
- MOVOA X1,320(R12)
- MOVOA X4,X1
- PADDL X0,X1
- MOVOA X1,X12
- PSLLL $7,X1
- PXOR X1,X7
- PSRLL $25,X12
- PXOR X12,X7
- MOVOA X8,X1
- PADDL X6,X1
- MOVOA X1,X12
- PSLLL $18,X1
- PXOR X1,X2
- PSRLL $14,X12
- PXOR X12,X2
- MOVOA 336(R12),X12
- MOVOA X2,336(R12)
- MOVOA X14,X1
- PADDL X12,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X5
- PSRLL $25,X2
- PXOR X2,X5
- MOVOA X0,X1
- PADDL X7,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X10
- PSRLL $23,X2
- PXOR X2,X10
- MOVOA X12,X1
- PADDL X5,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X8
- PSRLL $23,X2
- PXOR X2,X8
- MOVOA X7,X1
- PADDL X10,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X4
- PSRLL $19,X2
- PXOR X2,X4
- MOVOA X5,X1
- PADDL X8,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X14
- PSRLL $19,X2
- PXOR X2,X14
- MOVOA X10,X1
- PADDL X4,X1
- MOVOA X1,X2
- PSLLL $18,X1
- PXOR X1,X0
- PSRLL $14,X2
- PXOR X2,X0
- MOVOA 320(R12),X1
- MOVOA X0,320(R12)
- MOVOA X8,X0
- PADDL X14,X0
- MOVOA X0,X2
- PSLLL $18,X0
- PXOR X0,X12
- PSRLL $14,X2
- PXOR X2,X12
- MOVOA X11,X0
- PADDL X1,X0
- MOVOA X0,X2
- PSLLL $7,X0
- PXOR X0,X6
- PSRLL $25,X2
- PXOR X2,X6
- MOVOA 336(R12),X2
- MOVOA X12,336(R12)
- MOVOA X3,X0
- PADDL X2,X0
- MOVOA X0,X12
- PSLLL $7,X0
- PXOR X0,X13
- PSRLL $25,X12
- PXOR X12,X13
- MOVOA X1,X0
- PADDL X6,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X15
- PSRLL $23,X12
- PXOR X12,X15
- MOVOA X2,X0
- PADDL X13,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X9
- PSRLL $23,X12
- PXOR X12,X9
- MOVOA X6,X0
- PADDL X15,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X11
- PSRLL $19,X12
- PXOR X12,X11
- MOVOA X13,X0
- PADDL X9,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X3
- PSRLL $19,X12
- PXOR X12,X3
- MOVOA X15,X0
- PADDL X11,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X1
- PSRLL $14,X12
- PXOR X12,X1
- MOVOA X9,X0
- PADDL X3,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X2
- PSRLL $14,X12
- PXOR X12,X2
- MOVOA 320(R12),X12
- MOVOA 336(R12),X0
- SUBQ $2,DX
- JA MAINLOOP1
- PADDL 112(R12),X12
- PADDL 176(R12),X7
- PADDL 224(R12),X10
- PADDL 272(R12),X4
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 0(SI),DX
- XORL 4(SI),CX
- XORL 8(SI),R8
- XORL 12(SI),R9
- MOVL DX,0(DI)
- MOVL CX,4(DI)
- MOVL R8,8(DI)
- MOVL R9,12(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 64(SI),DX
- XORL 68(SI),CX
- XORL 72(SI),R8
- XORL 76(SI),R9
- MOVL DX,64(DI)
- MOVL CX,68(DI)
- MOVL R8,72(DI)
- MOVL R9,76(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 128(SI),DX
- XORL 132(SI),CX
- XORL 136(SI),R8
- XORL 140(SI),R9
- MOVL DX,128(DI)
- MOVL CX,132(DI)
- MOVL R8,136(DI)
- MOVL R9,140(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- XORL 192(SI),DX
- XORL 196(SI),CX
- XORL 200(SI),R8
- XORL 204(SI),R9
- MOVL DX,192(DI)
- MOVL CX,196(DI)
- MOVL R8,200(DI)
- MOVL R9,204(DI)
- PADDL 240(R12),X14
- PADDL 64(R12),X0
- PADDL 128(R12),X5
- PADDL 192(R12),X8
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 16(SI),DX
- XORL 20(SI),CX
- XORL 24(SI),R8
- XORL 28(SI),R9
- MOVL DX,16(DI)
- MOVL CX,20(DI)
- MOVL R8,24(DI)
- MOVL R9,28(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 80(SI),DX
- XORL 84(SI),CX
- XORL 88(SI),R8
- XORL 92(SI),R9
- MOVL DX,80(DI)
- MOVL CX,84(DI)
- MOVL R8,88(DI)
- MOVL R9,92(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 144(SI),DX
- XORL 148(SI),CX
- XORL 152(SI),R8
- XORL 156(SI),R9
- MOVL DX,144(DI)
- MOVL CX,148(DI)
- MOVL R8,152(DI)
- MOVL R9,156(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- XORL 208(SI),DX
- XORL 212(SI),CX
- XORL 216(SI),R8
- XORL 220(SI),R9
- MOVL DX,208(DI)
- MOVL CX,212(DI)
- MOVL R8,216(DI)
- MOVL R9,220(DI)
- PADDL 288(R12),X15
- PADDL 304(R12),X11
- PADDL 80(R12),X1
- PADDL 144(R12),X6
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 32(SI),DX
- XORL 36(SI),CX
- XORL 40(SI),R8
- XORL 44(SI),R9
- MOVL DX,32(DI)
- MOVL CX,36(DI)
- MOVL R8,40(DI)
- MOVL R9,44(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 96(SI),DX
- XORL 100(SI),CX
- XORL 104(SI),R8
- XORL 108(SI),R9
- MOVL DX,96(DI)
- MOVL CX,100(DI)
- MOVL R8,104(DI)
- MOVL R9,108(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 160(SI),DX
- XORL 164(SI),CX
- XORL 168(SI),R8
- XORL 172(SI),R9
- MOVL DX,160(DI)
- MOVL CX,164(DI)
- MOVL R8,168(DI)
- MOVL R9,172(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- XORL 224(SI),DX
- XORL 228(SI),CX
- XORL 232(SI),R8
- XORL 236(SI),R9
- MOVL DX,224(DI)
- MOVL CX,228(DI)
- MOVL R8,232(DI)
- MOVL R9,236(DI)
- PADDL 160(R12),X13
- PADDL 208(R12),X9
- PADDL 256(R12),X3
- PADDL 96(R12),X2
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 48(SI),DX
- XORL 52(SI),CX
- XORL 56(SI),R8
- XORL 60(SI),R9
- MOVL DX,48(DI)
- MOVL CX,52(DI)
- MOVL R8,56(DI)
- MOVL R9,60(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 112(SI),DX
- XORL 116(SI),CX
- XORL 120(SI),R8
- XORL 124(SI),R9
- MOVL DX,112(DI)
- MOVL CX,116(DI)
- MOVL R8,120(DI)
- MOVL R9,124(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 176(SI),DX
- XORL 180(SI),CX
- XORL 184(SI),R8
- XORL 188(SI),R9
- MOVL DX,176(DI)
- MOVL CX,180(DI)
- MOVL R8,184(DI)
- MOVL R9,188(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- XORL 240(SI),DX
- XORL 244(SI),CX
- XORL 248(SI),R8
- XORL 252(SI),R9
- MOVL DX,240(DI)
- MOVL CX,244(DI)
- MOVL R8,248(DI)
- MOVL R9,252(DI)
- MOVQ 352(R12),R9
- SUBQ $256,R9
- ADDQ $256,SI
- ADDQ $256,DI
- CMPQ R9,$256
- JAE BYTESATLEAST256
- CMPQ R9,$0
- JBE DONE
- BYTESBETWEEN1AND255:
- CMPQ R9,$64
- JAE NOCOPY
- MOVQ DI,DX
- LEAQ 360(R12),DI
- MOVQ R9,CX
- REP; MOVSB
- LEAQ 360(R12),DI
- LEAQ 360(R12),SI
- NOCOPY:
- MOVQ R9,352(R12)
- MOVOA 48(R12),X0
- MOVOA 0(R12),X1
- MOVOA 16(R12),X2
- MOVOA 32(R12),X3
- MOVOA X1,X4
- MOVQ $20,CX
- MAINLOOP2:
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X3
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X3,X3
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X1
- PSHUFL $0X4E,X2,X2
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X1,X1
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X1
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X1,X1
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X3
- PSHUFL $0X4E,X2,X2
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X3,X3
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X3
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X3,X3
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X1
- PSHUFL $0X4E,X2,X2
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X1,X1
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X1
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X1,X1
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X3
- PSHUFL $0X4E,X2,X2
- PXOR X6,X3
- SUBQ $4,CX
- PADDL X3,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PXOR X7,X7
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X3,X3
- PXOR X6,X0
- JA MAINLOOP2
- PADDL 48(R12),X0
- PADDL 0(R12),X1
- PADDL 16(R12),X2
- PADDL 32(R12),X3
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 0(SI),CX
- XORL 48(SI),R8
- XORL 32(SI),R9
- XORL 16(SI),AX
- MOVL CX,0(DI)
- MOVL R8,48(DI)
- MOVL R9,32(DI)
- MOVL AX,16(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 20(SI),CX
- XORL 4(SI),R8
- XORL 52(SI),R9
- XORL 36(SI),AX
- MOVL CX,20(DI)
- MOVL R8,4(DI)
- MOVL R9,52(DI)
- MOVL AX,36(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 40(SI),CX
- XORL 24(SI),R8
- XORL 8(SI),R9
- XORL 56(SI),AX
- MOVL CX,40(DI)
- MOVL R8,24(DI)
- MOVL R9,8(DI)
- MOVL AX,56(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- XORL 60(SI),CX
- XORL 44(SI),R8
- XORL 28(SI),R9
- XORL 12(SI),AX
- MOVL CX,60(DI)
- MOVL R8,44(DI)
- MOVL R9,28(DI)
- MOVL AX,12(DI)
- MOVQ 352(R12),R9
- MOVL 16(R12),CX
- MOVL 36 (R12),R8
- ADDQ $1,CX
- SHLQ $32,R8
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $32,R8
- MOVL CX,16(R12)
- MOVL R8, 36 (R12)
- CMPQ R9,$64
- JA BYTESATLEAST65
- JAE BYTESATLEAST64
- MOVQ DI,SI
- MOVQ DX,DI
- MOVQ R9,CX
- REP; MOVSB
- BYTESATLEAST64:
- DONE:
- RET
- BYTESATLEAST65:
- SUBQ $64,R9
- ADDQ $64,DI
- ADDQ $64,SI
- JMP BYTESBETWEEN1AND255
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
deleted file mode 100644
index 4392cc1ac7..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-// +build !amd64 purego !gc
-
-package salsa
-
-// XORKeyStream crypts bytes from in to out using the given key and counters.
-// In and out must overlap entirely or not at all. Counter
-// contains the raw salsa20 counter bytes (both nonce and block counter).
-func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- genericXORKeyStream(out, in, counter, key)
-}
diff --git a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
deleted file mode 100644
index 68169c6d68..0000000000
--- a/etcd/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package salsa
-
-const rounds = 20
-
-// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
-// and 16-byte constant c, and puts the result into 64-byte array out.
-func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
- j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
- j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
- j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
- j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
- j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
- j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
- j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
- j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
- j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
- j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
- j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
- j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
-
- x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
- x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
-
- for i := 0; i < rounds; i += 2 {
- u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
- u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
- u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
- u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
- u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
- u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
- u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
- u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
- u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
-
- u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
- u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
- u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
- u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
-
- u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
- u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
- u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
- u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
-
- u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
- u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
- u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
- u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
-
- u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
- u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
- u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
- u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
- }
- x0 += j0
- x1 += j1
- x2 += j2
- x3 += j3
- x4 += j4
- x5 += j5
- x6 += j6
- x7 += j7
- x8 += j8
- x9 += j9
- x10 += j10
- x11 += j11
- x12 += j12
- x13 += j13
- x14 += j14
- x15 += j15
-
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x1)
- out[5] = byte(x1 >> 8)
- out[6] = byte(x1 >> 16)
- out[7] = byte(x1 >> 24)
-
- out[8] = byte(x2)
- out[9] = byte(x2 >> 8)
- out[10] = byte(x2 >> 16)
- out[11] = byte(x2 >> 24)
-
- out[12] = byte(x3)
- out[13] = byte(x3 >> 8)
- out[14] = byte(x3 >> 16)
- out[15] = byte(x3 >> 24)
-
- out[16] = byte(x4)
- out[17] = byte(x4 >> 8)
- out[18] = byte(x4 >> 16)
- out[19] = byte(x4 >> 24)
-
- out[20] = byte(x5)
- out[21] = byte(x5 >> 8)
- out[22] = byte(x5 >> 16)
- out[23] = byte(x5 >> 24)
-
- out[24] = byte(x6)
- out[25] = byte(x6 >> 8)
- out[26] = byte(x6 >> 16)
- out[27] = byte(x6 >> 24)
-
- out[28] = byte(x7)
- out[29] = byte(x7 >> 8)
- out[30] = byte(x7 >> 16)
- out[31] = byte(x7 >> 24)
-
- out[32] = byte(x8)
- out[33] = byte(x8 >> 8)
- out[34] = byte(x8 >> 16)
- out[35] = byte(x8 >> 24)
-
- out[36] = byte(x9)
- out[37] = byte(x9 >> 8)
- out[38] = byte(x9 >> 16)
- out[39] = byte(x9 >> 24)
-
- out[40] = byte(x10)
- out[41] = byte(x10 >> 8)
- out[42] = byte(x10 >> 16)
- out[43] = byte(x10 >> 24)
-
- out[44] = byte(x11)
- out[45] = byte(x11 >> 8)
- out[46] = byte(x11 >> 16)
- out[47] = byte(x11 >> 24)
-
- out[48] = byte(x12)
- out[49] = byte(x12 >> 8)
- out[50] = byte(x12 >> 16)
- out[51] = byte(x12 >> 24)
-
- out[52] = byte(x13)
- out[53] = byte(x13 >> 8)
- out[54] = byte(x13 >> 16)
- out[55] = byte(x13 >> 24)
-
- out[56] = byte(x14)
- out[57] = byte(x14 >> 8)
- out[58] = byte(x14 >> 16)
- out[59] = byte(x14 >> 24)
-
- out[60] = byte(x15)
- out[61] = byte(x15 >> 8)
- out[62] = byte(x15 >> 16)
- out[63] = byte(x15 >> 24)
-}
-
-// genericXORKeyStream is the generic implementation of XORKeyStream to be used
-// when no assembly implementation is available.
-func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- var block [64]byte
- var counterCopy [16]byte
- copy(counterCopy[:], counter[:])
-
- for len(in) >= 64 {
- core(&block, &counterCopy, key, &Sigma)
- for i, x := range block {
- out[i] = in[i] ^ x
- }
- u := uint32(1)
- for i := 8; i < 16; i++ {
- u += uint32(counterCopy[i])
- counterCopy[i] = byte(u)
- u >>= 8
- }
- in = in[64:]
- out = out[64:]
- }
-
- if len(in) > 0 {
- core(&block, &counterCopy, key, &Sigma)
- for i, v := range in {
- out[i] = v ^ block[i]
- }
- }
-}
diff --git a/etcd/vendor/golang.org/x/net/html/atom/atom.go b/etcd/vendor/golang.org/x/net/html/atom/atom.go
deleted file mode 100644
index cd0a8ac154..0000000000
--- a/etcd/vendor/golang.org/x/net/html/atom/atom.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package atom provides integer codes (also known as atoms) for a fixed set of
-// frequently occurring HTML strings: tag names and attribute keys such as "p"
-// and "id".
-//
-// Sharing an atom's name between all elements with the same tag can result in
-// fewer string allocations when tokenizing and parsing HTML. Integer
-// comparisons are also generally faster than string comparisons.
-//
-// The value of an atom's particular code is not guaranteed to stay the same
-// between versions of this package. Neither is any ordering guaranteed:
-// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
-// be dense. The only guarantees are that e.g. looking up "div" will yield
-// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
-package atom // import "golang.org/x/net/html/atom"
-
-// Atom is an integer code for a string. The zero value maps to "".
-type Atom uint32
-
-// String returns the atom's name.
-func (a Atom) String() string {
- start := uint32(a >> 8)
- n := uint32(a & 0xff)
- if start+n > uint32(len(atomText)) {
- return ""
- }
- return atomText[start : start+n]
-}
-
-func (a Atom) string() string {
- return atomText[a>>8 : a>>8+a&0xff]
-}
-
-// fnv computes the FNV hash with an arbitrary starting value h.
-func fnv(h uint32, s []byte) uint32 {
- for i := range s {
- h ^= uint32(s[i])
- h *= 16777619
- }
- return h
-}
-
-func match(s string, t []byte) bool {
- for i, c := range t {
- if s[i] != c {
- return false
- }
- }
- return true
-}
-
-// Lookup returns the atom whose name is s. It returns zero if there is no
-// such atom. The lookup is case sensitive.
-func Lookup(s []byte) Atom {
- if len(s) == 0 || len(s) > maxAtomLen {
- return 0
- }
- h := fnv(hash0, s)
- if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
- return a
- }
- if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
- return a
- }
- return 0
-}
-
-// String returns a string whose contents are equal to s. In that sense, it is
-// equivalent to string(s) but may be more efficient.
-func String(s []byte) string {
- if a := Lookup(s); a != 0 {
- return a.String()
- }
- return string(s)
-}
diff --git a/etcd/vendor/golang.org/x/net/html/atom/table.go b/etcd/vendor/golang.org/x/net/html/atom/table.go
deleted file mode 100644
index 2a938864cb..0000000000
--- a/etcd/vendor/golang.org/x/net/html/atom/table.go
+++ /dev/null
@@ -1,783 +0,0 @@
-// Code generated by go generate gen.go; DO NOT EDIT.
-
-//go:generate go run gen.go
-
-package atom
-
-const (
- A Atom = 0x1
- Abbr Atom = 0x4
- Accept Atom = 0x1a06
- AcceptCharset Atom = 0x1a0e
- Accesskey Atom = 0x2c09
- Acronym Atom = 0xaa07
- Action Atom = 0x27206
- Address Atom = 0x6f307
- Align Atom = 0xb105
- Allowfullscreen Atom = 0x2080f
- Allowpaymentrequest Atom = 0xc113
- Allowusermedia Atom = 0xdd0e
- Alt Atom = 0xf303
- Annotation Atom = 0x1c90a
- AnnotationXml Atom = 0x1c90e
- Applet Atom = 0x31906
- Area Atom = 0x35604
- Article Atom = 0x3fc07
- As Atom = 0x3c02
- Aside Atom = 0x10705
- Async Atom = 0xff05
- Audio Atom = 0x11505
- Autocomplete Atom = 0x2780c
- Autofocus Atom = 0x12109
- Autoplay Atom = 0x13c08
- B Atom = 0x101
- Base Atom = 0x3b04
- Basefont Atom = 0x3b08
- Bdi Atom = 0xba03
- Bdo Atom = 0x14b03
- Bgsound Atom = 0x15e07
- Big Atom = 0x17003
- Blink Atom = 0x17305
- Blockquote Atom = 0x1870a
- Body Atom = 0x2804
- Br Atom = 0x202
- Button Atom = 0x19106
- Canvas Atom = 0x10306
- Caption Atom = 0x23107
- Center Atom = 0x22006
- Challenge Atom = 0x29b09
- Charset Atom = 0x2107
- Checked Atom = 0x47907
- Cite Atom = 0x19c04
- Class Atom = 0x56405
- Code Atom = 0x5c504
- Col Atom = 0x1ab03
- Colgroup Atom = 0x1ab08
- Color Atom = 0x1bf05
- Cols Atom = 0x1c404
- Colspan Atom = 0x1c407
- Command Atom = 0x1d707
- Content Atom = 0x58b07
- Contenteditable Atom = 0x58b0f
- Contextmenu Atom = 0x3800b
- Controls Atom = 0x1de08
- Coords Atom = 0x1ea06
- Crossorigin Atom = 0x1fb0b
- Data Atom = 0x4a504
- Datalist Atom = 0x4a508
- Datetime Atom = 0x2b808
- Dd Atom = 0x2d702
- Default Atom = 0x10a07
- Defer Atom = 0x5c705
- Del Atom = 0x45203
- Desc Atom = 0x56104
- Details Atom = 0x7207
- Dfn Atom = 0x8703
- Dialog Atom = 0xbb06
- Dir Atom = 0x9303
- Dirname Atom = 0x9307
- Disabled Atom = 0x16408
- Div Atom = 0x16b03
- Dl Atom = 0x5e602
- Download Atom = 0x46308
- Draggable Atom = 0x17a09
- Dropzone Atom = 0x40508
- Dt Atom = 0x64b02
- Em Atom = 0x6e02
- Embed Atom = 0x6e05
- Enctype Atom = 0x28d07
- Face Atom = 0x21e04
- Fieldset Atom = 0x22608
- Figcaption Atom = 0x22e0a
- Figure Atom = 0x24806
- Font Atom = 0x3f04
- Footer Atom = 0xf606
- For Atom = 0x25403
- ForeignObject Atom = 0x2540d
- Foreignobject Atom = 0x2610d
- Form Atom = 0x26e04
- Formaction Atom = 0x26e0a
- Formenctype Atom = 0x2890b
- Formmethod Atom = 0x2a40a
- Formnovalidate Atom = 0x2ae0e
- Formtarget Atom = 0x2c00a
- Frame Atom = 0x8b05
- Frameset Atom = 0x8b08
- H1 Atom = 0x15c02
- H2 Atom = 0x2de02
- H3 Atom = 0x30d02
- H4 Atom = 0x34502
- H5 Atom = 0x34f02
- H6 Atom = 0x64d02
- Head Atom = 0x33104
- Header Atom = 0x33106
- Headers Atom = 0x33107
- Height Atom = 0x5206
- Hgroup Atom = 0x2ca06
- Hidden Atom = 0x2d506
- High Atom = 0x2db04
- Hr Atom = 0x15702
- Href Atom = 0x2e004
- Hreflang Atom = 0x2e008
- Html Atom = 0x5604
- HttpEquiv Atom = 0x2e80a
- I Atom = 0x601
- Icon Atom = 0x58a04
- Id Atom = 0x10902
- Iframe Atom = 0x2fc06
- Image Atom = 0x30205
- Img Atom = 0x30703
- Input Atom = 0x44b05
- Inputmode Atom = 0x44b09
- Ins Atom = 0x20403
- Integrity Atom = 0x23f09
- Is Atom = 0x16502
- Isindex Atom = 0x30f07
- Ismap Atom = 0x31605
- Itemid Atom = 0x38b06
- Itemprop Atom = 0x19d08
- Itemref Atom = 0x3cd07
- Itemscope Atom = 0x67109
- Itemtype Atom = 0x31f08
- Kbd Atom = 0xb903
- Keygen Atom = 0x3206
- Keytype Atom = 0xd607
- Kind Atom = 0x17704
- Label Atom = 0x5905
- Lang Atom = 0x2e404
- Legend Atom = 0x18106
- Li Atom = 0xb202
- Link Atom = 0x17404
- List Atom = 0x4a904
- Listing Atom = 0x4a907
- Loop Atom = 0x5d04
- Low Atom = 0xc303
- Main Atom = 0x1004
- Malignmark Atom = 0xb00a
- Manifest Atom = 0x6d708
- Map Atom = 0x31803
- Mark Atom = 0xb604
- Marquee Atom = 0x32707
- Math Atom = 0x32e04
- Max Atom = 0x33d03
- Maxlength Atom = 0x33d09
- Media Atom = 0xe605
- Mediagroup Atom = 0xe60a
- Menu Atom = 0x38704
- Menuitem Atom = 0x38708
- Meta Atom = 0x4b804
- Meter Atom = 0x9805
- Method Atom = 0x2a806
- Mglyph Atom = 0x30806
- Mi Atom = 0x34702
- Min Atom = 0x34703
- Minlength Atom = 0x34709
- Mn Atom = 0x2b102
- Mo Atom = 0xa402
- Ms Atom = 0x67402
- Mtext Atom = 0x35105
- Multiple Atom = 0x35f08
- Muted Atom = 0x36705
- Name Atom = 0x9604
- Nav Atom = 0x1303
- Nobr Atom = 0x3704
- Noembed Atom = 0x6c07
- Noframes Atom = 0x8908
- Nomodule Atom = 0xa208
- Nonce Atom = 0x1a605
- Noscript Atom = 0x21608
- Novalidate Atom = 0x2b20a
- Object Atom = 0x26806
- Ol Atom = 0x13702
- Onabort Atom = 0x19507
- Onafterprint Atom = 0x2360c
- Onautocomplete Atom = 0x2760e
- Onautocompleteerror Atom = 0x27613
- Onauxclick Atom = 0x61f0a
- Onbeforeprint Atom = 0x69e0d
- Onbeforeunload Atom = 0x6e70e
- Onblur Atom = 0x56d06
- Oncancel Atom = 0x11908
- Oncanplay Atom = 0x14d09
- Oncanplaythrough Atom = 0x14d10
- Onchange Atom = 0x41b08
- Onclick Atom = 0x2f507
- Onclose Atom = 0x36c07
- Oncontextmenu Atom = 0x37e0d
- Oncopy Atom = 0x39106
- Oncuechange Atom = 0x3970b
- Oncut Atom = 0x3a205
- Ondblclick Atom = 0x3a70a
- Ondrag Atom = 0x3b106
- Ondragend Atom = 0x3b109
- Ondragenter Atom = 0x3ba0b
- Ondragexit Atom = 0x3c50a
- Ondragleave Atom = 0x3df0b
- Ondragover Atom = 0x3ea0a
- Ondragstart Atom = 0x3f40b
- Ondrop Atom = 0x40306
- Ondurationchange Atom = 0x41310
- Onemptied Atom = 0x40a09
- Onended Atom = 0x42307
- Onerror Atom = 0x42a07
- Onfocus Atom = 0x43107
- Onhashchange Atom = 0x43d0c
- Oninput Atom = 0x44907
- Oninvalid Atom = 0x45509
- Onkeydown Atom = 0x45e09
- Onkeypress Atom = 0x46b0a
- Onkeyup Atom = 0x48007
- Onlanguagechange Atom = 0x48d10
- Onload Atom = 0x49d06
- Onloadeddata Atom = 0x49d0c
- Onloadedmetadata Atom = 0x4b010
- Onloadend Atom = 0x4c609
- Onloadstart Atom = 0x4cf0b
- Onmessage Atom = 0x4da09
- Onmessageerror Atom = 0x4da0e
- Onmousedown Atom = 0x4e80b
- Onmouseenter Atom = 0x4f30c
- Onmouseleave Atom = 0x4ff0c
- Onmousemove Atom = 0x50b0b
- Onmouseout Atom = 0x5160a
- Onmouseover Atom = 0x5230b
- Onmouseup Atom = 0x52e09
- Onmousewheel Atom = 0x53c0c
- Onoffline Atom = 0x54809
- Ononline Atom = 0x55108
- Onpagehide Atom = 0x5590a
- Onpageshow Atom = 0x5730a
- Onpaste Atom = 0x57f07
- Onpause Atom = 0x59a07
- Onplay Atom = 0x5a406
- Onplaying Atom = 0x5a409
- Onpopstate Atom = 0x5ad0a
- Onprogress Atom = 0x5b70a
- Onratechange Atom = 0x5cc0c
- Onrejectionhandled Atom = 0x5d812
- Onreset Atom = 0x5ea07
- Onresize Atom = 0x5f108
- Onscroll Atom = 0x60008
- Onsecuritypolicyviolation Atom = 0x60819
- Onseeked Atom = 0x62908
- Onseeking Atom = 0x63109
- Onselect Atom = 0x63a08
- Onshow Atom = 0x64406
- Onsort Atom = 0x64f06
- Onstalled Atom = 0x65909
- Onstorage Atom = 0x66209
- Onsubmit Atom = 0x66b08
- Onsuspend Atom = 0x67b09
- Ontimeupdate Atom = 0x400c
- Ontoggle Atom = 0x68408
- Onunhandledrejection Atom = 0x68c14
- Onunload Atom = 0x6ab08
- Onvolumechange Atom = 0x6b30e
- Onwaiting Atom = 0x6c109
- Onwheel Atom = 0x6ca07
- Open Atom = 0x1a304
- Optgroup Atom = 0x5f08
- Optimum Atom = 0x6d107
- Option Atom = 0x6e306
- Output Atom = 0x51d06
- P Atom = 0xc01
- Param Atom = 0xc05
- Pattern Atom = 0x6607
- Picture Atom = 0x7b07
- Ping Atom = 0xef04
- Placeholder Atom = 0x1310b
- Plaintext Atom = 0x1b209
- Playsinline Atom = 0x1400b
- Poster Atom = 0x2cf06
- Pre Atom = 0x47003
- Preload Atom = 0x48607
- Progress Atom = 0x5b908
- Prompt Atom = 0x53606
- Public Atom = 0x58606
- Q Atom = 0xcf01
- Radiogroup Atom = 0x30a
- Rb Atom = 0x3a02
- Readonly Atom = 0x35708
- Referrerpolicy Atom = 0x3d10e
- Rel Atom = 0x48703
- Required Atom = 0x24c08
- Reversed Atom = 0x8008
- Rows Atom = 0x9c04
- Rowspan Atom = 0x9c07
- Rp Atom = 0x23c02
- Rt Atom = 0x19a02
- Rtc Atom = 0x19a03
- Ruby Atom = 0xfb04
- S Atom = 0x2501
- Samp Atom = 0x7804
- Sandbox Atom = 0x12907
- Scope Atom = 0x67505
- Scoped Atom = 0x67506
- Script Atom = 0x21806
- Seamless Atom = 0x37108
- Section Atom = 0x56807
- Select Atom = 0x63c06
- Selected Atom = 0x63c08
- Shape Atom = 0x1e505
- Size Atom = 0x5f504
- Sizes Atom = 0x5f505
- Slot Atom = 0x1ef04
- Small Atom = 0x20605
- Sortable Atom = 0x65108
- Sorted Atom = 0x33706
- Source Atom = 0x37806
- Spacer Atom = 0x43706
- Span Atom = 0x9f04
- Spellcheck Atom = 0x4740a
- Src Atom = 0x5c003
- Srcdoc Atom = 0x5c006
- Srclang Atom = 0x5f907
- Srcset Atom = 0x6f906
- Start Atom = 0x3fa05
- Step Atom = 0x58304
- Strike Atom = 0xd206
- Strong Atom = 0x6dd06
- Style Atom = 0x6ff05
- Sub Atom = 0x66d03
- Summary Atom = 0x70407
- Sup Atom = 0x70b03
- Svg Atom = 0x70e03
- System Atom = 0x71106
- Tabindex Atom = 0x4be08
- Table Atom = 0x59505
- Target Atom = 0x2c406
- Tbody Atom = 0x2705
- Td Atom = 0x9202
- Template Atom = 0x71408
- Textarea Atom = 0x35208
- Tfoot Atom = 0xf505
- Th Atom = 0x15602
- Thead Atom = 0x33005
- Time Atom = 0x4204
- Title Atom = 0x11005
- Tr Atom = 0xcc02
- Track Atom = 0x1ba05
- Translate Atom = 0x1f209
- Tt Atom = 0x6802
- Type Atom = 0xd904
- Typemustmatch Atom = 0x2900d
- U Atom = 0xb01
- Ul Atom = 0xa702
- Updateviacache Atom = 0x460e
- Usemap Atom = 0x59e06
- Value Atom = 0x1505
- Var Atom = 0x16d03
- Video Atom = 0x2f105
- Wbr Atom = 0x57c03
- Width Atom = 0x64905
- Workertype Atom = 0x71c0a
- Wrap Atom = 0x72604
- Xmp Atom = 0x12f03
-)
-
-const hash0 = 0x81cdf10e
-
-const maxAtomLen = 25
-
-var table = [1 << 9]Atom{
- 0x1: 0xe60a, // mediagroup
- 0x2: 0x2e404, // lang
- 0x4: 0x2c09, // accesskey
- 0x5: 0x8b08, // frameset
- 0x7: 0x63a08, // onselect
- 0x8: 0x71106, // system
- 0xa: 0x64905, // width
- 0xc: 0x2890b, // formenctype
- 0xd: 0x13702, // ol
- 0xe: 0x3970b, // oncuechange
- 0x10: 0x14b03, // bdo
- 0x11: 0x11505, // audio
- 0x12: 0x17a09, // draggable
- 0x14: 0x2f105, // video
- 0x15: 0x2b102, // mn
- 0x16: 0x38704, // menu
- 0x17: 0x2cf06, // poster
- 0x19: 0xf606, // footer
- 0x1a: 0x2a806, // method
- 0x1b: 0x2b808, // datetime
- 0x1c: 0x19507, // onabort
- 0x1d: 0x460e, // updateviacache
- 0x1e: 0xff05, // async
- 0x1f: 0x49d06, // onload
- 0x21: 0x11908, // oncancel
- 0x22: 0x62908, // onseeked
- 0x23: 0x30205, // image
- 0x24: 0x5d812, // onrejectionhandled
- 0x26: 0x17404, // link
- 0x27: 0x51d06, // output
- 0x28: 0x33104, // head
- 0x29: 0x4ff0c, // onmouseleave
- 0x2a: 0x57f07, // onpaste
- 0x2b: 0x5a409, // onplaying
- 0x2c: 0x1c407, // colspan
- 0x2f: 0x1bf05, // color
- 0x30: 0x5f504, // size
- 0x31: 0x2e80a, // http-equiv
- 0x33: 0x601, // i
- 0x34: 0x5590a, // onpagehide
- 0x35: 0x68c14, // onunhandledrejection
- 0x37: 0x42a07, // onerror
- 0x3a: 0x3b08, // basefont
- 0x3f: 0x1303, // nav
- 0x40: 0x17704, // kind
- 0x41: 0x35708, // readonly
- 0x42: 0x30806, // mglyph
- 0x44: 0xb202, // li
- 0x46: 0x2d506, // hidden
- 0x47: 0x70e03, // svg
- 0x48: 0x58304, // step
- 0x49: 0x23f09, // integrity
- 0x4a: 0x58606, // public
- 0x4c: 0x1ab03, // col
- 0x4d: 0x1870a, // blockquote
- 0x4e: 0x34f02, // h5
- 0x50: 0x5b908, // progress
- 0x51: 0x5f505, // sizes
- 0x52: 0x34502, // h4
- 0x56: 0x33005, // thead
- 0x57: 0xd607, // keytype
- 0x58: 0x5b70a, // onprogress
- 0x59: 0x44b09, // inputmode
- 0x5a: 0x3b109, // ondragend
- 0x5d: 0x3a205, // oncut
- 0x5e: 0x43706, // spacer
- 0x5f: 0x1ab08, // colgroup
- 0x62: 0x16502, // is
- 0x65: 0x3c02, // as
- 0x66: 0x54809, // onoffline
- 0x67: 0x33706, // sorted
- 0x69: 0x48d10, // onlanguagechange
- 0x6c: 0x43d0c, // onhashchange
- 0x6d: 0x9604, // name
- 0x6e: 0xf505, // tfoot
- 0x6f: 0x56104, // desc
- 0x70: 0x33d03, // max
- 0x72: 0x1ea06, // coords
- 0x73: 0x30d02, // h3
- 0x74: 0x6e70e, // onbeforeunload
- 0x75: 0x9c04, // rows
- 0x76: 0x63c06, // select
- 0x77: 0x9805, // meter
- 0x78: 0x38b06, // itemid
- 0x79: 0x53c0c, // onmousewheel
- 0x7a: 0x5c006, // srcdoc
- 0x7d: 0x1ba05, // track
- 0x7f: 0x31f08, // itemtype
- 0x82: 0xa402, // mo
- 0x83: 0x41b08, // onchange
- 0x84: 0x33107, // headers
- 0x85: 0x5cc0c, // onratechange
- 0x86: 0x60819, // onsecuritypolicyviolation
- 0x88: 0x4a508, // datalist
- 0x89: 0x4e80b, // onmousedown
- 0x8a: 0x1ef04, // slot
- 0x8b: 0x4b010, // onloadedmetadata
- 0x8c: 0x1a06, // accept
- 0x8d: 0x26806, // object
- 0x91: 0x6b30e, // onvolumechange
- 0x92: 0x2107, // charset
- 0x93: 0x27613, // onautocompleteerror
- 0x94: 0xc113, // allowpaymentrequest
- 0x95: 0x2804, // body
- 0x96: 0x10a07, // default
- 0x97: 0x63c08, // selected
- 0x98: 0x21e04, // face
- 0x99: 0x1e505, // shape
- 0x9b: 0x68408, // ontoggle
- 0x9e: 0x64b02, // dt
- 0x9f: 0xb604, // mark
- 0xa1: 0xb01, // u
- 0xa4: 0x6ab08, // onunload
- 0xa5: 0x5d04, // loop
- 0xa6: 0x16408, // disabled
- 0xaa: 0x42307, // onended
- 0xab: 0xb00a, // malignmark
- 0xad: 0x67b09, // onsuspend
- 0xae: 0x35105, // mtext
- 0xaf: 0x64f06, // onsort
- 0xb0: 0x19d08, // itemprop
- 0xb3: 0x67109, // itemscope
- 0xb4: 0x17305, // blink
- 0xb6: 0x3b106, // ondrag
- 0xb7: 0xa702, // ul
- 0xb8: 0x26e04, // form
- 0xb9: 0x12907, // sandbox
- 0xba: 0x8b05, // frame
- 0xbb: 0x1505, // value
- 0xbc: 0x66209, // onstorage
- 0xbf: 0xaa07, // acronym
- 0xc0: 0x19a02, // rt
- 0xc2: 0x202, // br
- 0xc3: 0x22608, // fieldset
- 0xc4: 0x2900d, // typemustmatch
- 0xc5: 0xa208, // nomodule
- 0xc6: 0x6c07, // noembed
- 0xc7: 0x69e0d, // onbeforeprint
- 0xc8: 0x19106, // button
- 0xc9: 0x2f507, // onclick
- 0xca: 0x70407, // summary
- 0xcd: 0xfb04, // ruby
- 0xce: 0x56405, // class
- 0xcf: 0x3f40b, // ondragstart
- 0xd0: 0x23107, // caption
- 0xd4: 0xdd0e, // allowusermedia
- 0xd5: 0x4cf0b, // onloadstart
- 0xd9: 0x16b03, // div
- 0xda: 0x4a904, // list
- 0xdb: 0x32e04, // math
- 0xdc: 0x44b05, // input
- 0xdf: 0x3ea0a, // ondragover
- 0xe0: 0x2de02, // h2
- 0xe2: 0x1b209, // plaintext
- 0xe4: 0x4f30c, // onmouseenter
- 0xe7: 0x47907, // checked
- 0xe8: 0x47003, // pre
- 0xea: 0x35f08, // multiple
- 0xeb: 0xba03, // bdi
- 0xec: 0x33d09, // maxlength
- 0xed: 0xcf01, // q
- 0xee: 0x61f0a, // onauxclick
- 0xf0: 0x57c03, // wbr
- 0xf2: 0x3b04, // base
- 0xf3: 0x6e306, // option
- 0xf5: 0x41310, // ondurationchange
- 0xf7: 0x8908, // noframes
- 0xf9: 0x40508, // dropzone
- 0xfb: 0x67505, // scope
- 0xfc: 0x8008, // reversed
- 0xfd: 0x3ba0b, // ondragenter
- 0xfe: 0x3fa05, // start
- 0xff: 0x12f03, // xmp
- 0x100: 0x5f907, // srclang
- 0x101: 0x30703, // img
- 0x104: 0x101, // b
- 0x105: 0x25403, // for
- 0x106: 0x10705, // aside
- 0x107: 0x44907, // oninput
- 0x108: 0x35604, // area
- 0x109: 0x2a40a, // formmethod
- 0x10a: 0x72604, // wrap
- 0x10c: 0x23c02, // rp
- 0x10d: 0x46b0a, // onkeypress
- 0x10e: 0x6802, // tt
- 0x110: 0x34702, // mi
- 0x111: 0x36705, // muted
- 0x112: 0xf303, // alt
- 0x113: 0x5c504, // code
- 0x114: 0x6e02, // em
- 0x115: 0x3c50a, // ondragexit
- 0x117: 0x9f04, // span
- 0x119: 0x6d708, // manifest
- 0x11a: 0x38708, // menuitem
- 0x11b: 0x58b07, // content
- 0x11d: 0x6c109, // onwaiting
- 0x11f: 0x4c609, // onloadend
- 0x121: 0x37e0d, // oncontextmenu
- 0x123: 0x56d06, // onblur
- 0x124: 0x3fc07, // article
- 0x125: 0x9303, // dir
- 0x126: 0xef04, // ping
- 0x127: 0x24c08, // required
- 0x128: 0x45509, // oninvalid
- 0x129: 0xb105, // align
- 0x12b: 0x58a04, // icon
- 0x12c: 0x64d02, // h6
- 0x12d: 0x1c404, // cols
- 0x12e: 0x22e0a, // figcaption
- 0x12f: 0x45e09, // onkeydown
- 0x130: 0x66b08, // onsubmit
- 0x131: 0x14d09, // oncanplay
- 0x132: 0x70b03, // sup
- 0x133: 0xc01, // p
- 0x135: 0x40a09, // onemptied
- 0x136: 0x39106, // oncopy
- 0x137: 0x19c04, // cite
- 0x138: 0x3a70a, // ondblclick
- 0x13a: 0x50b0b, // onmousemove
- 0x13c: 0x66d03, // sub
- 0x13d: 0x48703, // rel
- 0x13e: 0x5f08, // optgroup
- 0x142: 0x9c07, // rowspan
- 0x143: 0x37806, // source
- 0x144: 0x21608, // noscript
- 0x145: 0x1a304, // open
- 0x146: 0x20403, // ins
- 0x147: 0x2540d, // foreignObject
- 0x148: 0x5ad0a, // onpopstate
- 0x14a: 0x28d07, // enctype
- 0x14b: 0x2760e, // onautocomplete
- 0x14c: 0x35208, // textarea
- 0x14e: 0x2780c, // autocomplete
- 0x14f: 0x15702, // hr
- 0x150: 0x1de08, // controls
- 0x151: 0x10902, // id
- 0x153: 0x2360c, // onafterprint
- 0x155: 0x2610d, // foreignobject
- 0x156: 0x32707, // marquee
- 0x157: 0x59a07, // onpause
- 0x158: 0x5e602, // dl
- 0x159: 0x5206, // height
- 0x15a: 0x34703, // min
- 0x15b: 0x9307, // dirname
- 0x15c: 0x1f209, // translate
- 0x15d: 0x5604, // html
- 0x15e: 0x34709, // minlength
- 0x15f: 0x48607, // preload
- 0x160: 0x71408, // template
- 0x161: 0x3df0b, // ondragleave
- 0x162: 0x3a02, // rb
- 0x164: 0x5c003, // src
- 0x165: 0x6dd06, // strong
- 0x167: 0x7804, // samp
- 0x168: 0x6f307, // address
- 0x169: 0x55108, // ononline
- 0x16b: 0x1310b, // placeholder
- 0x16c: 0x2c406, // target
- 0x16d: 0x20605, // small
- 0x16e: 0x6ca07, // onwheel
- 0x16f: 0x1c90a, // annotation
- 0x170: 0x4740a, // spellcheck
- 0x171: 0x7207, // details
- 0x172: 0x10306, // canvas
- 0x173: 0x12109, // autofocus
- 0x174: 0xc05, // param
- 0x176: 0x46308, // download
- 0x177: 0x45203, // del
- 0x178: 0x36c07, // onclose
- 0x179: 0xb903, // kbd
- 0x17a: 0x31906, // applet
- 0x17b: 0x2e004, // href
- 0x17c: 0x5f108, // onresize
- 0x17e: 0x49d0c, // onloadeddata
- 0x180: 0xcc02, // tr
- 0x181: 0x2c00a, // formtarget
- 0x182: 0x11005, // title
- 0x183: 0x6ff05, // style
- 0x184: 0xd206, // strike
- 0x185: 0x59e06, // usemap
- 0x186: 0x2fc06, // iframe
- 0x187: 0x1004, // main
- 0x189: 0x7b07, // picture
- 0x18c: 0x31605, // ismap
- 0x18e: 0x4a504, // data
- 0x18f: 0x5905, // label
- 0x191: 0x3d10e, // referrerpolicy
- 0x192: 0x15602, // th
- 0x194: 0x53606, // prompt
- 0x195: 0x56807, // section
- 0x197: 0x6d107, // optimum
- 0x198: 0x2db04, // high
- 0x199: 0x15c02, // h1
- 0x19a: 0x65909, // onstalled
- 0x19b: 0x16d03, // var
- 0x19c: 0x4204, // time
- 0x19e: 0x67402, // ms
- 0x19f: 0x33106, // header
- 0x1a0: 0x4da09, // onmessage
- 0x1a1: 0x1a605, // nonce
- 0x1a2: 0x26e0a, // formaction
- 0x1a3: 0x22006, // center
- 0x1a4: 0x3704, // nobr
- 0x1a5: 0x59505, // table
- 0x1a6: 0x4a907, // listing
- 0x1a7: 0x18106, // legend
- 0x1a9: 0x29b09, // challenge
- 0x1aa: 0x24806, // figure
- 0x1ab: 0xe605, // media
- 0x1ae: 0xd904, // type
- 0x1af: 0x3f04, // font
- 0x1b0: 0x4da0e, // onmessageerror
- 0x1b1: 0x37108, // seamless
- 0x1b2: 0x8703, // dfn
- 0x1b3: 0x5c705, // defer
- 0x1b4: 0xc303, // low
- 0x1b5: 0x19a03, // rtc
- 0x1b6: 0x5230b, // onmouseover
- 0x1b7: 0x2b20a, // novalidate
- 0x1b8: 0x71c0a, // workertype
- 0x1ba: 0x3cd07, // itemref
- 0x1bd: 0x1, // a
- 0x1be: 0x31803, // map
- 0x1bf: 0x400c, // ontimeupdate
- 0x1c0: 0x15e07, // bgsound
- 0x1c1: 0x3206, // keygen
- 0x1c2: 0x2705, // tbody
- 0x1c5: 0x64406, // onshow
- 0x1c7: 0x2501, // s
- 0x1c8: 0x6607, // pattern
- 0x1cc: 0x14d10, // oncanplaythrough
- 0x1ce: 0x2d702, // dd
- 0x1cf: 0x6f906, // srcset
- 0x1d0: 0x17003, // big
- 0x1d2: 0x65108, // sortable
- 0x1d3: 0x48007, // onkeyup
- 0x1d5: 0x5a406, // onplay
- 0x1d7: 0x4b804, // meta
- 0x1d8: 0x40306, // ondrop
- 0x1da: 0x60008, // onscroll
- 0x1db: 0x1fb0b, // crossorigin
- 0x1dc: 0x5730a, // onpageshow
- 0x1dd: 0x4, // abbr
- 0x1de: 0x9202, // td
- 0x1df: 0x58b0f, // contenteditable
- 0x1e0: 0x27206, // action
- 0x1e1: 0x1400b, // playsinline
- 0x1e2: 0x43107, // onfocus
- 0x1e3: 0x2e008, // hreflang
- 0x1e5: 0x5160a, // onmouseout
- 0x1e6: 0x5ea07, // onreset
- 0x1e7: 0x13c08, // autoplay
- 0x1e8: 0x63109, // onseeking
- 0x1ea: 0x67506, // scoped
- 0x1ec: 0x30a, // radiogroup
- 0x1ee: 0x3800b, // contextmenu
- 0x1ef: 0x52e09, // onmouseup
- 0x1f1: 0x2ca06, // hgroup
- 0x1f2: 0x2080f, // allowfullscreen
- 0x1f3: 0x4be08, // tabindex
- 0x1f6: 0x30f07, // isindex
- 0x1f7: 0x1a0e, // accept-charset
- 0x1f8: 0x2ae0e, // formnovalidate
- 0x1fb: 0x1c90e, // annotation-xml
- 0x1fc: 0x6e05, // embed
- 0x1fd: 0x21806, // script
- 0x1fe: 0xbb06, // dialog
- 0x1ff: 0x1d707, // command
-}
-
-const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
- "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
- "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
- "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
- "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
- "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
- "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
- "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
- "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
- "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
- "ignObjectforeignobjectformactionautocompleteerrorformenctype" +
- "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
- "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
- "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
- "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
- "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
- "enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
- "articleondropzonemptiedondurationchangeonendedonerroronfocus" +
- "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
- "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
- "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
- "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
- "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
- "classectionbluronpageshowbronpastepublicontenteditableonpaus" +
- "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
- "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
- "violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
- "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
- "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
- "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
- "arysupsvgsystemplateworkertypewrap"
diff --git a/etcd/vendor/golang.org/x/net/html/const.go b/etcd/vendor/golang.org/x/net/html/const.go
deleted file mode 100644
index ff7acf2d5b..0000000000
--- a/etcd/vendor/golang.org/x/net/html/const.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-// Section 12.2.4.2 of the HTML5 specification says "The following elements
-// have varying levels of special parsing rules".
-// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
-var isSpecialElementMap = map[string]bool{
- "address": true,
- "applet": true,
- "area": true,
- "article": true,
- "aside": true,
- "base": true,
- "basefont": true,
- "bgsound": true,
- "blockquote": true,
- "body": true,
- "br": true,
- "button": true,
- "caption": true,
- "center": true,
- "col": true,
- "colgroup": true,
- "dd": true,
- "details": true,
- "dir": true,
- "div": true,
- "dl": true,
- "dt": true,
- "embed": true,
- "fieldset": true,
- "figcaption": true,
- "figure": true,
- "footer": true,
- "form": true,
- "frame": true,
- "frameset": true,
- "h1": true,
- "h2": true,
- "h3": true,
- "h4": true,
- "h5": true,
- "h6": true,
- "head": true,
- "header": true,
- "hgroup": true,
- "hr": true,
- "html": true,
- "iframe": true,
- "img": true,
- "input": true,
- "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
- "li": true,
- "link": true,
- "listing": true,
- "main": true,
- "marquee": true,
- "menu": true,
- "meta": true,
- "nav": true,
- "noembed": true,
- "noframes": true,
- "noscript": true,
- "object": true,
- "ol": true,
- "p": true,
- "param": true,
- "plaintext": true,
- "pre": true,
- "script": true,
- "section": true,
- "select": true,
- "source": true,
- "style": true,
- "summary": true,
- "table": true,
- "tbody": true,
- "td": true,
- "template": true,
- "textarea": true,
- "tfoot": true,
- "th": true,
- "thead": true,
- "title": true,
- "tr": true,
- "track": true,
- "ul": true,
- "wbr": true,
- "xmp": true,
-}
-
-func isSpecialElement(element *Node) bool {
- switch element.Namespace {
- case "", "html":
- return isSpecialElementMap[element.Data]
- case "math":
- switch element.Data {
- case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
- return true
- }
- case "svg":
- switch element.Data {
- case "foreignObject", "desc", "title":
- return true
- }
- }
- return false
-}
diff --git a/etcd/vendor/golang.org/x/net/html/doc.go b/etcd/vendor/golang.org/x/net/html/doc.go
deleted file mode 100644
index 822ed42a04..0000000000
--- a/etcd/vendor/golang.org/x/net/html/doc.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package html implements an HTML5-compliant tokenizer and parser.
-
-Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
-caller's responsibility to ensure that r provides UTF-8 encoded HTML.
-
- z := html.NewTokenizer(r)
-
-Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
-which parses the next token and returns its type, or an error:
-
- for {
- tt := z.Next()
- if tt == html.ErrorToken {
- // ...
- return ...
- }
- // Process the current token.
- }
-
-There are two APIs for retrieving the current token. The high-level API is to
-call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
-allow optionally calling Raw after Next but before Token, Text, TagName, or
-TagAttr. In EBNF notation, the valid call sequence per token is:
-
- Next {Raw} [ Token | Text | TagName {TagAttr} ]
-
-Token returns an independent data structure that completely describes a token.
-Entities (such as "<") are unescaped, tag names and attribute keys are
-lower-cased, and attributes are collected into a []Attribute. For example:
-
- for {
- if z.Next() == html.ErrorToken {
- // Returning io.EOF indicates success.
- return z.Err()
- }
- emitToken(z.Token())
- }
-
-The low-level API performs fewer allocations and copies, but the contents of
-the []byte values returned by Text, TagName and TagAttr may change on the next
-call to Next. For example, to extract an HTML page's anchor text:
-
- depth := 0
- for {
- tt := z.Next()
- switch tt {
- case html.ErrorToken:
- return z.Err()
- case html.TextToken:
- if depth > 0 {
- // emitBytes should copy the []byte it receives,
- // if it doesn't process it immediately.
- emitBytes(z.Text())
- }
- case html.StartTagToken, html.EndTagToken:
- tn, _ := z.TagName()
- if len(tn) == 1 && tn[0] == 'a' {
- if tt == html.StartTagToken {
- depth++
- } else {
- depth--
- }
- }
- }
- }
-
-Parsing is done by calling Parse with an io.Reader, which returns the root of
-the parse tree (the document element) as a *Node. It is the caller's
-responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
-example, to process each anchor node in depth-first order:
-
- doc, err := html.Parse(r)
- if err != nil {
- // ...
- }
- var f func(*html.Node)
- f = func(n *html.Node) {
- if n.Type == html.ElementNode && n.Data == "a" {
- // Do something with n...
- }
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- f(c)
- }
- }
- f(doc)
-
-The relevant specifications include:
-https://html.spec.whatwg.org/multipage/syntax.html and
-https://html.spec.whatwg.org/multipage/syntax.html#tokenization
-*/
-package html // import "golang.org/x/net/html"
-
-// The tokenization algorithm implemented by this package is not a line-by-line
-// transliteration of the relatively verbose state-machine in the WHATWG
-// specification. A more direct approach is used instead, where the program
-// counter implies the state, such as whether it is tokenizing a tag or a text
-// node. Specification compliance is verified by checking expected and actual
-// outputs over a test suite rather than aiming for algorithmic fidelity.
-
-// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
-// TODO(nigeltao): How does parsing interact with a JavaScript engine?
diff --git a/etcd/vendor/golang.org/x/net/html/doctype.go b/etcd/vendor/golang.org/x/net/html/doctype.go
deleted file mode 100644
index c484e5a94f..0000000000
--- a/etcd/vendor/golang.org/x/net/html/doctype.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "strings"
-)
-
-// parseDoctype parses the data from a DoctypeToken into a name,
-// public identifier, and system identifier. It returns a Node whose Type
-// is DoctypeNode, whose Data is the name, and which has attributes
-// named "system" and "public" for the two identifiers if they were present.
-// quirks is whether the document should be parsed in "quirks mode".
-func parseDoctype(s string) (n *Node, quirks bool) {
- n = &Node{Type: DoctypeNode}
-
- // Find the name.
- space := strings.IndexAny(s, whitespace)
- if space == -1 {
- space = len(s)
- }
- n.Data = s[:space]
- // The comparison to "html" is case-sensitive.
- if n.Data != "html" {
- quirks = true
- }
- n.Data = strings.ToLower(n.Data)
- s = strings.TrimLeft(s[space:], whitespace)
-
- if len(s) < 6 {
- // It can't start with "PUBLIC" or "SYSTEM".
- // Ignore the rest of the string.
- return n, quirks || s != ""
- }
-
- key := strings.ToLower(s[:6])
- s = s[6:]
- for key == "public" || key == "system" {
- s = strings.TrimLeft(s, whitespace)
- if s == "" {
- break
- }
- quote := s[0]
- if quote != '"' && quote != '\'' {
- break
- }
- s = s[1:]
- q := strings.IndexRune(s, rune(quote))
- var id string
- if q == -1 {
- id = s
- s = ""
- } else {
- id = s[:q]
- s = s[q+1:]
- }
- n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
- if key == "public" {
- key = "system"
- } else {
- key = ""
- }
- }
-
- if key != "" || s != "" {
- quirks = true
- } else if len(n.Attr) > 0 {
- if n.Attr[0].Key == "public" {
- public := strings.ToLower(n.Attr[0].Val)
- switch public {
- case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
- quirks = true
- default:
- for _, q := range quirkyIDs {
- if strings.HasPrefix(public, q) {
- quirks = true
- break
- }
- }
- }
- // The following two public IDs only cause quirks mode if there is no system ID.
- if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
- strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
- quirks = true
- }
- }
- if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
- strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
- quirks = true
- }
- }
-
- return n, quirks
-}
-
-// quirkyIDs is a list of public doctype identifiers that cause a document
-// to be interpreted in quirks mode. The identifiers should be in lower case.
-var quirkyIDs = []string{
- "+//silmaril//dtd html pro v0r11 19970101//",
- "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
- "-//as//dtd html 3.0 aswedit + extensions//",
- "-//ietf//dtd html 2.0 level 1//",
- "-//ietf//dtd html 2.0 level 2//",
- "-//ietf//dtd html 2.0 strict level 1//",
- "-//ietf//dtd html 2.0 strict level 2//",
- "-//ietf//dtd html 2.0 strict//",
- "-//ietf//dtd html 2.0//",
- "-//ietf//dtd html 2.1e//",
- "-//ietf//dtd html 3.0//",
- "-//ietf//dtd html 3.2 final//",
- "-//ietf//dtd html 3.2//",
- "-//ietf//dtd html 3//",
- "-//ietf//dtd html level 0//",
- "-//ietf//dtd html level 1//",
- "-//ietf//dtd html level 2//",
- "-//ietf//dtd html level 3//",
- "-//ietf//dtd html strict level 0//",
- "-//ietf//dtd html strict level 1//",
- "-//ietf//dtd html strict level 2//",
- "-//ietf//dtd html strict level 3//",
- "-//ietf//dtd html strict//",
- "-//ietf//dtd html//",
- "-//metrius//dtd metrius presentational//",
- "-//microsoft//dtd internet explorer 2.0 html strict//",
- "-//microsoft//dtd internet explorer 2.0 html//",
- "-//microsoft//dtd internet explorer 2.0 tables//",
- "-//microsoft//dtd internet explorer 3.0 html strict//",
- "-//microsoft//dtd internet explorer 3.0 html//",
- "-//microsoft//dtd internet explorer 3.0 tables//",
- "-//netscape comm. corp.//dtd html//",
- "-//netscape comm. corp.//dtd strict html//",
- "-//o'reilly and associates//dtd html 2.0//",
- "-//o'reilly and associates//dtd html extended 1.0//",
- "-//o'reilly and associates//dtd html extended relaxed 1.0//",
- "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
- "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
- "-//spyglass//dtd html 2.0 extended//",
- "-//sq//dtd html 2.0 hotmetal + extensions//",
- "-//sun microsystems corp.//dtd hotjava html//",
- "-//sun microsystems corp.//dtd hotjava strict html//",
- "-//w3c//dtd html 3 1995-03-24//",
- "-//w3c//dtd html 3.2 draft//",
- "-//w3c//dtd html 3.2 final//",
- "-//w3c//dtd html 3.2//",
- "-//w3c//dtd html 3.2s draft//",
- "-//w3c//dtd html 4.0 frameset//",
- "-//w3c//dtd html 4.0 transitional//",
- "-//w3c//dtd html experimental 19960712//",
- "-//w3c//dtd html experimental 970421//",
- "-//w3c//dtd w3 html//",
- "-//w3o//dtd w3 html 3.0//",
- "-//webtechs//dtd mozilla html 2.0//",
- "-//webtechs//dtd mozilla html//",
-}
diff --git a/etcd/vendor/golang.org/x/net/html/entity.go b/etcd/vendor/golang.org/x/net/html/entity.go
deleted file mode 100644
index b628880a01..0000000000
--- a/etcd/vendor/golang.org/x/net/html/entity.go
+++ /dev/null
@@ -1,2253 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-// All entities that do not end with ';' are 6 or fewer bytes long.
-const longestEntityWithoutSemicolon = 6
-
-// entity is a map from HTML entity names to their values. The semicolon matters:
-// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
-// lists both "amp" and "amp;" as two separate entries.
-//
-// Note that the HTML5 list is larger than the HTML4 list at
-// http://www.w3.org/TR/html4/sgml/entities.html
-var entity = map[string]rune{
- "AElig;": '\U000000C6',
- "AMP;": '\U00000026',
- "Aacute;": '\U000000C1',
- "Abreve;": '\U00000102',
- "Acirc;": '\U000000C2',
- "Acy;": '\U00000410',
- "Afr;": '\U0001D504',
- "Agrave;": '\U000000C0',
- "Alpha;": '\U00000391',
- "Amacr;": '\U00000100',
- "And;": '\U00002A53',
- "Aogon;": '\U00000104',
- "Aopf;": '\U0001D538',
- "ApplyFunction;": '\U00002061',
- "Aring;": '\U000000C5',
- "Ascr;": '\U0001D49C',
- "Assign;": '\U00002254',
- "Atilde;": '\U000000C3',
- "Auml;": '\U000000C4',
- "Backslash;": '\U00002216',
- "Barv;": '\U00002AE7',
- "Barwed;": '\U00002306',
- "Bcy;": '\U00000411',
- "Because;": '\U00002235',
- "Bernoullis;": '\U0000212C',
- "Beta;": '\U00000392',
- "Bfr;": '\U0001D505',
- "Bopf;": '\U0001D539',
- "Breve;": '\U000002D8',
- "Bscr;": '\U0000212C',
- "Bumpeq;": '\U0000224E',
- "CHcy;": '\U00000427',
- "COPY;": '\U000000A9',
- "Cacute;": '\U00000106',
- "Cap;": '\U000022D2',
- "CapitalDifferentialD;": '\U00002145',
- "Cayleys;": '\U0000212D',
- "Ccaron;": '\U0000010C',
- "Ccedil;": '\U000000C7',
- "Ccirc;": '\U00000108',
- "Cconint;": '\U00002230',
- "Cdot;": '\U0000010A',
- "Cedilla;": '\U000000B8',
- "CenterDot;": '\U000000B7',
- "Cfr;": '\U0000212D',
- "Chi;": '\U000003A7',
- "CircleDot;": '\U00002299',
- "CircleMinus;": '\U00002296',
- "CirclePlus;": '\U00002295',
- "CircleTimes;": '\U00002297',
- "ClockwiseContourIntegral;": '\U00002232',
- "CloseCurlyDoubleQuote;": '\U0000201D',
- "CloseCurlyQuote;": '\U00002019',
- "Colon;": '\U00002237',
- "Colone;": '\U00002A74',
- "Congruent;": '\U00002261',
- "Conint;": '\U0000222F',
- "ContourIntegral;": '\U0000222E',
- "Copf;": '\U00002102',
- "Coproduct;": '\U00002210',
- "CounterClockwiseContourIntegral;": '\U00002233',
- "Cross;": '\U00002A2F',
- "Cscr;": '\U0001D49E',
- "Cup;": '\U000022D3',
- "CupCap;": '\U0000224D',
- "DD;": '\U00002145',
- "DDotrahd;": '\U00002911',
- "DJcy;": '\U00000402',
- "DScy;": '\U00000405',
- "DZcy;": '\U0000040F',
- "Dagger;": '\U00002021',
- "Darr;": '\U000021A1',
- "Dashv;": '\U00002AE4',
- "Dcaron;": '\U0000010E',
- "Dcy;": '\U00000414',
- "Del;": '\U00002207',
- "Delta;": '\U00000394',
- "Dfr;": '\U0001D507',
- "DiacriticalAcute;": '\U000000B4',
- "DiacriticalDot;": '\U000002D9',
- "DiacriticalDoubleAcute;": '\U000002DD',
- "DiacriticalGrave;": '\U00000060',
- "DiacriticalTilde;": '\U000002DC',
- "Diamond;": '\U000022C4',
- "DifferentialD;": '\U00002146',
- "Dopf;": '\U0001D53B',
- "Dot;": '\U000000A8',
- "DotDot;": '\U000020DC',
- "DotEqual;": '\U00002250',
- "DoubleContourIntegral;": '\U0000222F',
- "DoubleDot;": '\U000000A8',
- "DoubleDownArrow;": '\U000021D3',
- "DoubleLeftArrow;": '\U000021D0',
- "DoubleLeftRightArrow;": '\U000021D4',
- "DoubleLeftTee;": '\U00002AE4',
- "DoubleLongLeftArrow;": '\U000027F8',
- "DoubleLongLeftRightArrow;": '\U000027FA',
- "DoubleLongRightArrow;": '\U000027F9',
- "DoubleRightArrow;": '\U000021D2',
- "DoubleRightTee;": '\U000022A8',
- "DoubleUpArrow;": '\U000021D1',
- "DoubleUpDownArrow;": '\U000021D5',
- "DoubleVerticalBar;": '\U00002225',
- "DownArrow;": '\U00002193',
- "DownArrowBar;": '\U00002913',
- "DownArrowUpArrow;": '\U000021F5',
- "DownBreve;": '\U00000311',
- "DownLeftRightVector;": '\U00002950',
- "DownLeftTeeVector;": '\U0000295E',
- "DownLeftVector;": '\U000021BD',
- "DownLeftVectorBar;": '\U00002956',
- "DownRightTeeVector;": '\U0000295F',
- "DownRightVector;": '\U000021C1',
- "DownRightVectorBar;": '\U00002957',
- "DownTee;": '\U000022A4',
- "DownTeeArrow;": '\U000021A7',
- "Downarrow;": '\U000021D3',
- "Dscr;": '\U0001D49F',
- "Dstrok;": '\U00000110',
- "ENG;": '\U0000014A',
- "ETH;": '\U000000D0',
- "Eacute;": '\U000000C9',
- "Ecaron;": '\U0000011A',
- "Ecirc;": '\U000000CA',
- "Ecy;": '\U0000042D',
- "Edot;": '\U00000116',
- "Efr;": '\U0001D508',
- "Egrave;": '\U000000C8',
- "Element;": '\U00002208',
- "Emacr;": '\U00000112',
- "EmptySmallSquare;": '\U000025FB',
- "EmptyVerySmallSquare;": '\U000025AB',
- "Eogon;": '\U00000118',
- "Eopf;": '\U0001D53C',
- "Epsilon;": '\U00000395',
- "Equal;": '\U00002A75',
- "EqualTilde;": '\U00002242',
- "Equilibrium;": '\U000021CC',
- "Escr;": '\U00002130',
- "Esim;": '\U00002A73',
- "Eta;": '\U00000397',
- "Euml;": '\U000000CB',
- "Exists;": '\U00002203',
- "ExponentialE;": '\U00002147',
- "Fcy;": '\U00000424',
- "Ffr;": '\U0001D509',
- "FilledSmallSquare;": '\U000025FC',
- "FilledVerySmallSquare;": '\U000025AA',
- "Fopf;": '\U0001D53D',
- "ForAll;": '\U00002200',
- "Fouriertrf;": '\U00002131',
- "Fscr;": '\U00002131',
- "GJcy;": '\U00000403',
- "GT;": '\U0000003E',
- "Gamma;": '\U00000393',
- "Gammad;": '\U000003DC',
- "Gbreve;": '\U0000011E',
- "Gcedil;": '\U00000122',
- "Gcirc;": '\U0000011C',
- "Gcy;": '\U00000413',
- "Gdot;": '\U00000120',
- "Gfr;": '\U0001D50A',
- "Gg;": '\U000022D9',
- "Gopf;": '\U0001D53E',
- "GreaterEqual;": '\U00002265',
- "GreaterEqualLess;": '\U000022DB',
- "GreaterFullEqual;": '\U00002267',
- "GreaterGreater;": '\U00002AA2',
- "GreaterLess;": '\U00002277',
- "GreaterSlantEqual;": '\U00002A7E',
- "GreaterTilde;": '\U00002273',
- "Gscr;": '\U0001D4A2',
- "Gt;": '\U0000226B',
- "HARDcy;": '\U0000042A',
- "Hacek;": '\U000002C7',
- "Hat;": '\U0000005E',
- "Hcirc;": '\U00000124',
- "Hfr;": '\U0000210C',
- "HilbertSpace;": '\U0000210B',
- "Hopf;": '\U0000210D',
- "HorizontalLine;": '\U00002500',
- "Hscr;": '\U0000210B',
- "Hstrok;": '\U00000126',
- "HumpDownHump;": '\U0000224E',
- "HumpEqual;": '\U0000224F',
- "IEcy;": '\U00000415',
- "IJlig;": '\U00000132',
- "IOcy;": '\U00000401',
- "Iacute;": '\U000000CD',
- "Icirc;": '\U000000CE',
- "Icy;": '\U00000418',
- "Idot;": '\U00000130',
- "Ifr;": '\U00002111',
- "Igrave;": '\U000000CC',
- "Im;": '\U00002111',
- "Imacr;": '\U0000012A',
- "ImaginaryI;": '\U00002148',
- "Implies;": '\U000021D2',
- "Int;": '\U0000222C',
- "Integral;": '\U0000222B',
- "Intersection;": '\U000022C2',
- "InvisibleComma;": '\U00002063',
- "InvisibleTimes;": '\U00002062',
- "Iogon;": '\U0000012E',
- "Iopf;": '\U0001D540',
- "Iota;": '\U00000399',
- "Iscr;": '\U00002110',
- "Itilde;": '\U00000128',
- "Iukcy;": '\U00000406',
- "Iuml;": '\U000000CF',
- "Jcirc;": '\U00000134',
- "Jcy;": '\U00000419',
- "Jfr;": '\U0001D50D',
- "Jopf;": '\U0001D541',
- "Jscr;": '\U0001D4A5',
- "Jsercy;": '\U00000408',
- "Jukcy;": '\U00000404',
- "KHcy;": '\U00000425',
- "KJcy;": '\U0000040C',
- "Kappa;": '\U0000039A',
- "Kcedil;": '\U00000136',
- "Kcy;": '\U0000041A',
- "Kfr;": '\U0001D50E',
- "Kopf;": '\U0001D542',
- "Kscr;": '\U0001D4A6',
- "LJcy;": '\U00000409',
- "LT;": '\U0000003C',
- "Lacute;": '\U00000139',
- "Lambda;": '\U0000039B',
- "Lang;": '\U000027EA',
- "Laplacetrf;": '\U00002112',
- "Larr;": '\U0000219E',
- "Lcaron;": '\U0000013D',
- "Lcedil;": '\U0000013B',
- "Lcy;": '\U0000041B',
- "LeftAngleBracket;": '\U000027E8',
- "LeftArrow;": '\U00002190',
- "LeftArrowBar;": '\U000021E4',
- "LeftArrowRightArrow;": '\U000021C6',
- "LeftCeiling;": '\U00002308',
- "LeftDoubleBracket;": '\U000027E6',
- "LeftDownTeeVector;": '\U00002961',
- "LeftDownVector;": '\U000021C3',
- "LeftDownVectorBar;": '\U00002959',
- "LeftFloor;": '\U0000230A',
- "LeftRightArrow;": '\U00002194',
- "LeftRightVector;": '\U0000294E',
- "LeftTee;": '\U000022A3',
- "LeftTeeArrow;": '\U000021A4',
- "LeftTeeVector;": '\U0000295A',
- "LeftTriangle;": '\U000022B2',
- "LeftTriangleBar;": '\U000029CF',
- "LeftTriangleEqual;": '\U000022B4',
- "LeftUpDownVector;": '\U00002951',
- "LeftUpTeeVector;": '\U00002960',
- "LeftUpVector;": '\U000021BF',
- "LeftUpVectorBar;": '\U00002958',
- "LeftVector;": '\U000021BC',
- "LeftVectorBar;": '\U00002952',
- "Leftarrow;": '\U000021D0',
- "Leftrightarrow;": '\U000021D4',
- "LessEqualGreater;": '\U000022DA',
- "LessFullEqual;": '\U00002266',
- "LessGreater;": '\U00002276',
- "LessLess;": '\U00002AA1',
- "LessSlantEqual;": '\U00002A7D',
- "LessTilde;": '\U00002272',
- "Lfr;": '\U0001D50F',
- "Ll;": '\U000022D8',
- "Lleftarrow;": '\U000021DA',
- "Lmidot;": '\U0000013F',
- "LongLeftArrow;": '\U000027F5',
- "LongLeftRightArrow;": '\U000027F7',
- "LongRightArrow;": '\U000027F6',
- "Longleftarrow;": '\U000027F8',
- "Longleftrightarrow;": '\U000027FA',
- "Longrightarrow;": '\U000027F9',
- "Lopf;": '\U0001D543',
- "LowerLeftArrow;": '\U00002199',
- "LowerRightArrow;": '\U00002198',
- "Lscr;": '\U00002112',
- "Lsh;": '\U000021B0',
- "Lstrok;": '\U00000141',
- "Lt;": '\U0000226A',
- "Map;": '\U00002905',
- "Mcy;": '\U0000041C',
- "MediumSpace;": '\U0000205F',
- "Mellintrf;": '\U00002133',
- "Mfr;": '\U0001D510',
- "MinusPlus;": '\U00002213',
- "Mopf;": '\U0001D544',
- "Mscr;": '\U00002133',
- "Mu;": '\U0000039C',
- "NJcy;": '\U0000040A',
- "Nacute;": '\U00000143',
- "Ncaron;": '\U00000147',
- "Ncedil;": '\U00000145',
- "Ncy;": '\U0000041D',
- "NegativeMediumSpace;": '\U0000200B',
- "NegativeThickSpace;": '\U0000200B',
- "NegativeThinSpace;": '\U0000200B',
- "NegativeVeryThinSpace;": '\U0000200B',
- "NestedGreaterGreater;": '\U0000226B',
- "NestedLessLess;": '\U0000226A',
- "NewLine;": '\U0000000A',
- "Nfr;": '\U0001D511',
- "NoBreak;": '\U00002060',
- "NonBreakingSpace;": '\U000000A0',
- "Nopf;": '\U00002115',
- "Not;": '\U00002AEC',
- "NotCongruent;": '\U00002262',
- "NotCupCap;": '\U0000226D',
- "NotDoubleVerticalBar;": '\U00002226',
- "NotElement;": '\U00002209',
- "NotEqual;": '\U00002260',
- "NotExists;": '\U00002204',
- "NotGreater;": '\U0000226F',
- "NotGreaterEqual;": '\U00002271',
- "NotGreaterLess;": '\U00002279',
- "NotGreaterTilde;": '\U00002275',
- "NotLeftTriangle;": '\U000022EA',
- "NotLeftTriangleEqual;": '\U000022EC',
- "NotLess;": '\U0000226E',
- "NotLessEqual;": '\U00002270',
- "NotLessGreater;": '\U00002278',
- "NotLessTilde;": '\U00002274',
- "NotPrecedes;": '\U00002280',
- "NotPrecedesSlantEqual;": '\U000022E0',
- "NotReverseElement;": '\U0000220C',
- "NotRightTriangle;": '\U000022EB',
- "NotRightTriangleEqual;": '\U000022ED',
- "NotSquareSubsetEqual;": '\U000022E2',
- "NotSquareSupersetEqual;": '\U000022E3',
- "NotSubsetEqual;": '\U00002288',
- "NotSucceeds;": '\U00002281',
- "NotSucceedsSlantEqual;": '\U000022E1',
- "NotSupersetEqual;": '\U00002289',
- "NotTilde;": '\U00002241',
- "NotTildeEqual;": '\U00002244',
- "NotTildeFullEqual;": '\U00002247',
- "NotTildeTilde;": '\U00002249',
- "NotVerticalBar;": '\U00002224',
- "Nscr;": '\U0001D4A9',
- "Ntilde;": '\U000000D1',
- "Nu;": '\U0000039D',
- "OElig;": '\U00000152',
- "Oacute;": '\U000000D3',
- "Ocirc;": '\U000000D4',
- "Ocy;": '\U0000041E',
- "Odblac;": '\U00000150',
- "Ofr;": '\U0001D512',
- "Ograve;": '\U000000D2',
- "Omacr;": '\U0000014C',
- "Omega;": '\U000003A9',
- "Omicron;": '\U0000039F',
- "Oopf;": '\U0001D546',
- "OpenCurlyDoubleQuote;": '\U0000201C',
- "OpenCurlyQuote;": '\U00002018',
- "Or;": '\U00002A54',
- "Oscr;": '\U0001D4AA',
- "Oslash;": '\U000000D8',
- "Otilde;": '\U000000D5',
- "Otimes;": '\U00002A37',
- "Ouml;": '\U000000D6',
- "OverBar;": '\U0000203E',
- "OverBrace;": '\U000023DE',
- "OverBracket;": '\U000023B4',
- "OverParenthesis;": '\U000023DC',
- "PartialD;": '\U00002202',
- "Pcy;": '\U0000041F',
- "Pfr;": '\U0001D513',
- "Phi;": '\U000003A6',
- "Pi;": '\U000003A0',
- "PlusMinus;": '\U000000B1',
- "Poincareplane;": '\U0000210C',
- "Popf;": '\U00002119',
- "Pr;": '\U00002ABB',
- "Precedes;": '\U0000227A',
- "PrecedesEqual;": '\U00002AAF',
- "PrecedesSlantEqual;": '\U0000227C',
- "PrecedesTilde;": '\U0000227E',
- "Prime;": '\U00002033',
- "Product;": '\U0000220F',
- "Proportion;": '\U00002237',
- "Proportional;": '\U0000221D',
- "Pscr;": '\U0001D4AB',
- "Psi;": '\U000003A8',
- "QUOT;": '\U00000022',
- "Qfr;": '\U0001D514',
- "Qopf;": '\U0000211A',
- "Qscr;": '\U0001D4AC',
- "RBarr;": '\U00002910',
- "REG;": '\U000000AE',
- "Racute;": '\U00000154',
- "Rang;": '\U000027EB',
- "Rarr;": '\U000021A0',
- "Rarrtl;": '\U00002916',
- "Rcaron;": '\U00000158',
- "Rcedil;": '\U00000156',
- "Rcy;": '\U00000420',
- "Re;": '\U0000211C',
- "ReverseElement;": '\U0000220B',
- "ReverseEquilibrium;": '\U000021CB',
- "ReverseUpEquilibrium;": '\U0000296F',
- "Rfr;": '\U0000211C',
- "Rho;": '\U000003A1',
- "RightAngleBracket;": '\U000027E9',
- "RightArrow;": '\U00002192',
- "RightArrowBar;": '\U000021E5',
- "RightArrowLeftArrow;": '\U000021C4',
- "RightCeiling;": '\U00002309',
- "RightDoubleBracket;": '\U000027E7',
- "RightDownTeeVector;": '\U0000295D',
- "RightDownVector;": '\U000021C2',
- "RightDownVectorBar;": '\U00002955',
- "RightFloor;": '\U0000230B',
- "RightTee;": '\U000022A2',
- "RightTeeArrow;": '\U000021A6',
- "RightTeeVector;": '\U0000295B',
- "RightTriangle;": '\U000022B3',
- "RightTriangleBar;": '\U000029D0',
- "RightTriangleEqual;": '\U000022B5',
- "RightUpDownVector;": '\U0000294F',
- "RightUpTeeVector;": '\U0000295C',
- "RightUpVector;": '\U000021BE',
- "RightUpVectorBar;": '\U00002954',
- "RightVector;": '\U000021C0',
- "RightVectorBar;": '\U00002953',
- "Rightarrow;": '\U000021D2',
- "Ropf;": '\U0000211D',
- "RoundImplies;": '\U00002970',
- "Rrightarrow;": '\U000021DB',
- "Rscr;": '\U0000211B',
- "Rsh;": '\U000021B1',
- "RuleDelayed;": '\U000029F4',
- "SHCHcy;": '\U00000429',
- "SHcy;": '\U00000428',
- "SOFTcy;": '\U0000042C',
- "Sacute;": '\U0000015A',
- "Sc;": '\U00002ABC',
- "Scaron;": '\U00000160',
- "Scedil;": '\U0000015E',
- "Scirc;": '\U0000015C',
- "Scy;": '\U00000421',
- "Sfr;": '\U0001D516',
- "ShortDownArrow;": '\U00002193',
- "ShortLeftArrow;": '\U00002190',
- "ShortRightArrow;": '\U00002192',
- "ShortUpArrow;": '\U00002191',
- "Sigma;": '\U000003A3',
- "SmallCircle;": '\U00002218',
- "Sopf;": '\U0001D54A',
- "Sqrt;": '\U0000221A',
- "Square;": '\U000025A1',
- "SquareIntersection;": '\U00002293',
- "SquareSubset;": '\U0000228F',
- "SquareSubsetEqual;": '\U00002291',
- "SquareSuperset;": '\U00002290',
- "SquareSupersetEqual;": '\U00002292',
- "SquareUnion;": '\U00002294',
- "Sscr;": '\U0001D4AE',
- "Star;": '\U000022C6',
- "Sub;": '\U000022D0',
- "Subset;": '\U000022D0',
- "SubsetEqual;": '\U00002286',
- "Succeeds;": '\U0000227B',
- "SucceedsEqual;": '\U00002AB0',
- "SucceedsSlantEqual;": '\U0000227D',
- "SucceedsTilde;": '\U0000227F',
- "SuchThat;": '\U0000220B',
- "Sum;": '\U00002211',
- "Sup;": '\U000022D1',
- "Superset;": '\U00002283',
- "SupersetEqual;": '\U00002287',
- "Supset;": '\U000022D1',
- "THORN;": '\U000000DE',
- "TRADE;": '\U00002122',
- "TSHcy;": '\U0000040B',
- "TScy;": '\U00000426',
- "Tab;": '\U00000009',
- "Tau;": '\U000003A4',
- "Tcaron;": '\U00000164',
- "Tcedil;": '\U00000162',
- "Tcy;": '\U00000422',
- "Tfr;": '\U0001D517',
- "Therefore;": '\U00002234',
- "Theta;": '\U00000398',
- "ThinSpace;": '\U00002009',
- "Tilde;": '\U0000223C',
- "TildeEqual;": '\U00002243',
- "TildeFullEqual;": '\U00002245',
- "TildeTilde;": '\U00002248',
- "Topf;": '\U0001D54B',
- "TripleDot;": '\U000020DB',
- "Tscr;": '\U0001D4AF',
- "Tstrok;": '\U00000166',
- "Uacute;": '\U000000DA',
- "Uarr;": '\U0000219F',
- "Uarrocir;": '\U00002949',
- "Ubrcy;": '\U0000040E',
- "Ubreve;": '\U0000016C',
- "Ucirc;": '\U000000DB',
- "Ucy;": '\U00000423',
- "Udblac;": '\U00000170',
- "Ufr;": '\U0001D518',
- "Ugrave;": '\U000000D9',
- "Umacr;": '\U0000016A',
- "UnderBar;": '\U0000005F',
- "UnderBrace;": '\U000023DF',
- "UnderBracket;": '\U000023B5',
- "UnderParenthesis;": '\U000023DD',
- "Union;": '\U000022C3',
- "UnionPlus;": '\U0000228E',
- "Uogon;": '\U00000172',
- "Uopf;": '\U0001D54C',
- "UpArrow;": '\U00002191',
- "UpArrowBar;": '\U00002912',
- "UpArrowDownArrow;": '\U000021C5',
- "UpDownArrow;": '\U00002195',
- "UpEquilibrium;": '\U0000296E',
- "UpTee;": '\U000022A5',
- "UpTeeArrow;": '\U000021A5',
- "Uparrow;": '\U000021D1',
- "Updownarrow;": '\U000021D5',
- "UpperLeftArrow;": '\U00002196',
- "UpperRightArrow;": '\U00002197',
- "Upsi;": '\U000003D2',
- "Upsilon;": '\U000003A5',
- "Uring;": '\U0000016E',
- "Uscr;": '\U0001D4B0',
- "Utilde;": '\U00000168',
- "Uuml;": '\U000000DC',
- "VDash;": '\U000022AB',
- "Vbar;": '\U00002AEB',
- "Vcy;": '\U00000412',
- "Vdash;": '\U000022A9',
- "Vdashl;": '\U00002AE6',
- "Vee;": '\U000022C1',
- "Verbar;": '\U00002016',
- "Vert;": '\U00002016',
- "VerticalBar;": '\U00002223',
- "VerticalLine;": '\U0000007C',
- "VerticalSeparator;": '\U00002758',
- "VerticalTilde;": '\U00002240',
- "VeryThinSpace;": '\U0000200A',
- "Vfr;": '\U0001D519',
- "Vopf;": '\U0001D54D',
- "Vscr;": '\U0001D4B1',
- "Vvdash;": '\U000022AA',
- "Wcirc;": '\U00000174',
- "Wedge;": '\U000022C0',
- "Wfr;": '\U0001D51A',
- "Wopf;": '\U0001D54E',
- "Wscr;": '\U0001D4B2',
- "Xfr;": '\U0001D51B',
- "Xi;": '\U0000039E',
- "Xopf;": '\U0001D54F',
- "Xscr;": '\U0001D4B3',
- "YAcy;": '\U0000042F',
- "YIcy;": '\U00000407',
- "YUcy;": '\U0000042E',
- "Yacute;": '\U000000DD',
- "Ycirc;": '\U00000176',
- "Ycy;": '\U0000042B',
- "Yfr;": '\U0001D51C',
- "Yopf;": '\U0001D550',
- "Yscr;": '\U0001D4B4',
- "Yuml;": '\U00000178',
- "ZHcy;": '\U00000416',
- "Zacute;": '\U00000179',
- "Zcaron;": '\U0000017D',
- "Zcy;": '\U00000417',
- "Zdot;": '\U0000017B',
- "ZeroWidthSpace;": '\U0000200B',
- "Zeta;": '\U00000396',
- "Zfr;": '\U00002128',
- "Zopf;": '\U00002124',
- "Zscr;": '\U0001D4B5',
- "aacute;": '\U000000E1',
- "abreve;": '\U00000103',
- "ac;": '\U0000223E',
- "acd;": '\U0000223F',
- "acirc;": '\U000000E2',
- "acute;": '\U000000B4',
- "acy;": '\U00000430',
- "aelig;": '\U000000E6',
- "af;": '\U00002061',
- "afr;": '\U0001D51E',
- "agrave;": '\U000000E0',
- "alefsym;": '\U00002135',
- "aleph;": '\U00002135',
- "alpha;": '\U000003B1',
- "amacr;": '\U00000101',
- "amalg;": '\U00002A3F',
- "amp;": '\U00000026',
- "and;": '\U00002227',
- "andand;": '\U00002A55',
- "andd;": '\U00002A5C',
- "andslope;": '\U00002A58',
- "andv;": '\U00002A5A',
- "ang;": '\U00002220',
- "ange;": '\U000029A4',
- "angle;": '\U00002220',
- "angmsd;": '\U00002221',
- "angmsdaa;": '\U000029A8',
- "angmsdab;": '\U000029A9',
- "angmsdac;": '\U000029AA',
- "angmsdad;": '\U000029AB',
- "angmsdae;": '\U000029AC',
- "angmsdaf;": '\U000029AD',
- "angmsdag;": '\U000029AE',
- "angmsdah;": '\U000029AF',
- "angrt;": '\U0000221F',
- "angrtvb;": '\U000022BE',
- "angrtvbd;": '\U0000299D',
- "angsph;": '\U00002222',
- "angst;": '\U000000C5',
- "angzarr;": '\U0000237C',
- "aogon;": '\U00000105',
- "aopf;": '\U0001D552',
- "ap;": '\U00002248',
- "apE;": '\U00002A70',
- "apacir;": '\U00002A6F',
- "ape;": '\U0000224A',
- "apid;": '\U0000224B',
- "apos;": '\U00000027',
- "approx;": '\U00002248',
- "approxeq;": '\U0000224A',
- "aring;": '\U000000E5',
- "ascr;": '\U0001D4B6',
- "ast;": '\U0000002A',
- "asymp;": '\U00002248',
- "asympeq;": '\U0000224D',
- "atilde;": '\U000000E3',
- "auml;": '\U000000E4',
- "awconint;": '\U00002233',
- "awint;": '\U00002A11',
- "bNot;": '\U00002AED',
- "backcong;": '\U0000224C',
- "backepsilon;": '\U000003F6',
- "backprime;": '\U00002035',
- "backsim;": '\U0000223D',
- "backsimeq;": '\U000022CD',
- "barvee;": '\U000022BD',
- "barwed;": '\U00002305',
- "barwedge;": '\U00002305',
- "bbrk;": '\U000023B5',
- "bbrktbrk;": '\U000023B6',
- "bcong;": '\U0000224C',
- "bcy;": '\U00000431',
- "bdquo;": '\U0000201E',
- "becaus;": '\U00002235',
- "because;": '\U00002235',
- "bemptyv;": '\U000029B0',
- "bepsi;": '\U000003F6',
- "bernou;": '\U0000212C',
- "beta;": '\U000003B2',
- "beth;": '\U00002136',
- "between;": '\U0000226C',
- "bfr;": '\U0001D51F',
- "bigcap;": '\U000022C2',
- "bigcirc;": '\U000025EF',
- "bigcup;": '\U000022C3',
- "bigodot;": '\U00002A00',
- "bigoplus;": '\U00002A01',
- "bigotimes;": '\U00002A02',
- "bigsqcup;": '\U00002A06',
- "bigstar;": '\U00002605',
- "bigtriangledown;": '\U000025BD',
- "bigtriangleup;": '\U000025B3',
- "biguplus;": '\U00002A04',
- "bigvee;": '\U000022C1',
- "bigwedge;": '\U000022C0',
- "bkarow;": '\U0000290D',
- "blacklozenge;": '\U000029EB',
- "blacksquare;": '\U000025AA',
- "blacktriangle;": '\U000025B4',
- "blacktriangledown;": '\U000025BE',
- "blacktriangleleft;": '\U000025C2',
- "blacktriangleright;": '\U000025B8',
- "blank;": '\U00002423',
- "blk12;": '\U00002592',
- "blk14;": '\U00002591',
- "blk34;": '\U00002593',
- "block;": '\U00002588',
- "bnot;": '\U00002310',
- "bopf;": '\U0001D553',
- "bot;": '\U000022A5',
- "bottom;": '\U000022A5',
- "bowtie;": '\U000022C8',
- "boxDL;": '\U00002557',
- "boxDR;": '\U00002554',
- "boxDl;": '\U00002556',
- "boxDr;": '\U00002553',
- "boxH;": '\U00002550',
- "boxHD;": '\U00002566',
- "boxHU;": '\U00002569',
- "boxHd;": '\U00002564',
- "boxHu;": '\U00002567',
- "boxUL;": '\U0000255D',
- "boxUR;": '\U0000255A',
- "boxUl;": '\U0000255C',
- "boxUr;": '\U00002559',
- "boxV;": '\U00002551',
- "boxVH;": '\U0000256C',
- "boxVL;": '\U00002563',
- "boxVR;": '\U00002560',
- "boxVh;": '\U0000256B',
- "boxVl;": '\U00002562',
- "boxVr;": '\U0000255F',
- "boxbox;": '\U000029C9',
- "boxdL;": '\U00002555',
- "boxdR;": '\U00002552',
- "boxdl;": '\U00002510',
- "boxdr;": '\U0000250C',
- "boxh;": '\U00002500',
- "boxhD;": '\U00002565',
- "boxhU;": '\U00002568',
- "boxhd;": '\U0000252C',
- "boxhu;": '\U00002534',
- "boxminus;": '\U0000229F',
- "boxplus;": '\U0000229E',
- "boxtimes;": '\U000022A0',
- "boxuL;": '\U0000255B',
- "boxuR;": '\U00002558',
- "boxul;": '\U00002518',
- "boxur;": '\U00002514',
- "boxv;": '\U00002502',
- "boxvH;": '\U0000256A',
- "boxvL;": '\U00002561',
- "boxvR;": '\U0000255E',
- "boxvh;": '\U0000253C',
- "boxvl;": '\U00002524',
- "boxvr;": '\U0000251C',
- "bprime;": '\U00002035',
- "breve;": '\U000002D8',
- "brvbar;": '\U000000A6',
- "bscr;": '\U0001D4B7',
- "bsemi;": '\U0000204F',
- "bsim;": '\U0000223D',
- "bsime;": '\U000022CD',
- "bsol;": '\U0000005C',
- "bsolb;": '\U000029C5',
- "bsolhsub;": '\U000027C8',
- "bull;": '\U00002022',
- "bullet;": '\U00002022',
- "bump;": '\U0000224E',
- "bumpE;": '\U00002AAE',
- "bumpe;": '\U0000224F',
- "bumpeq;": '\U0000224F',
- "cacute;": '\U00000107',
- "cap;": '\U00002229',
- "capand;": '\U00002A44',
- "capbrcup;": '\U00002A49',
- "capcap;": '\U00002A4B',
- "capcup;": '\U00002A47',
- "capdot;": '\U00002A40',
- "caret;": '\U00002041',
- "caron;": '\U000002C7',
- "ccaps;": '\U00002A4D',
- "ccaron;": '\U0000010D',
- "ccedil;": '\U000000E7',
- "ccirc;": '\U00000109',
- "ccups;": '\U00002A4C',
- "ccupssm;": '\U00002A50',
- "cdot;": '\U0000010B',
- "cedil;": '\U000000B8',
- "cemptyv;": '\U000029B2',
- "cent;": '\U000000A2',
- "centerdot;": '\U000000B7',
- "cfr;": '\U0001D520',
- "chcy;": '\U00000447',
- "check;": '\U00002713',
- "checkmark;": '\U00002713',
- "chi;": '\U000003C7',
- "cir;": '\U000025CB',
- "cirE;": '\U000029C3',
- "circ;": '\U000002C6',
- "circeq;": '\U00002257',
- "circlearrowleft;": '\U000021BA',
- "circlearrowright;": '\U000021BB',
- "circledR;": '\U000000AE',
- "circledS;": '\U000024C8',
- "circledast;": '\U0000229B',
- "circledcirc;": '\U0000229A',
- "circleddash;": '\U0000229D',
- "cire;": '\U00002257',
- "cirfnint;": '\U00002A10',
- "cirmid;": '\U00002AEF',
- "cirscir;": '\U000029C2',
- "clubs;": '\U00002663',
- "clubsuit;": '\U00002663',
- "colon;": '\U0000003A',
- "colone;": '\U00002254',
- "coloneq;": '\U00002254',
- "comma;": '\U0000002C',
- "commat;": '\U00000040',
- "comp;": '\U00002201',
- "compfn;": '\U00002218',
- "complement;": '\U00002201',
- "complexes;": '\U00002102',
- "cong;": '\U00002245',
- "congdot;": '\U00002A6D',
- "conint;": '\U0000222E',
- "copf;": '\U0001D554',
- "coprod;": '\U00002210',
- "copy;": '\U000000A9',
- "copysr;": '\U00002117',
- "crarr;": '\U000021B5',
- "cross;": '\U00002717',
- "cscr;": '\U0001D4B8',
- "csub;": '\U00002ACF',
- "csube;": '\U00002AD1',
- "csup;": '\U00002AD0',
- "csupe;": '\U00002AD2',
- "ctdot;": '\U000022EF',
- "cudarrl;": '\U00002938',
- "cudarrr;": '\U00002935',
- "cuepr;": '\U000022DE',
- "cuesc;": '\U000022DF',
- "cularr;": '\U000021B6',
- "cularrp;": '\U0000293D',
- "cup;": '\U0000222A',
- "cupbrcap;": '\U00002A48',
- "cupcap;": '\U00002A46',
- "cupcup;": '\U00002A4A',
- "cupdot;": '\U0000228D',
- "cupor;": '\U00002A45',
- "curarr;": '\U000021B7',
- "curarrm;": '\U0000293C',
- "curlyeqprec;": '\U000022DE',
- "curlyeqsucc;": '\U000022DF',
- "curlyvee;": '\U000022CE',
- "curlywedge;": '\U000022CF',
- "curren;": '\U000000A4',
- "curvearrowleft;": '\U000021B6',
- "curvearrowright;": '\U000021B7',
- "cuvee;": '\U000022CE',
- "cuwed;": '\U000022CF',
- "cwconint;": '\U00002232',
- "cwint;": '\U00002231',
- "cylcty;": '\U0000232D',
- "dArr;": '\U000021D3',
- "dHar;": '\U00002965',
- "dagger;": '\U00002020',
- "daleth;": '\U00002138',
- "darr;": '\U00002193',
- "dash;": '\U00002010',
- "dashv;": '\U000022A3',
- "dbkarow;": '\U0000290F',
- "dblac;": '\U000002DD',
- "dcaron;": '\U0000010F',
- "dcy;": '\U00000434',
- "dd;": '\U00002146',
- "ddagger;": '\U00002021',
- "ddarr;": '\U000021CA',
- "ddotseq;": '\U00002A77',
- "deg;": '\U000000B0',
- "delta;": '\U000003B4',
- "demptyv;": '\U000029B1',
- "dfisht;": '\U0000297F',
- "dfr;": '\U0001D521',
- "dharl;": '\U000021C3',
- "dharr;": '\U000021C2',
- "diam;": '\U000022C4',
- "diamond;": '\U000022C4',
- "diamondsuit;": '\U00002666',
- "diams;": '\U00002666',
- "die;": '\U000000A8',
- "digamma;": '\U000003DD',
- "disin;": '\U000022F2',
- "div;": '\U000000F7',
- "divide;": '\U000000F7',
- "divideontimes;": '\U000022C7',
- "divonx;": '\U000022C7',
- "djcy;": '\U00000452',
- "dlcorn;": '\U0000231E',
- "dlcrop;": '\U0000230D',
- "dollar;": '\U00000024',
- "dopf;": '\U0001D555',
- "dot;": '\U000002D9',
- "doteq;": '\U00002250',
- "doteqdot;": '\U00002251',
- "dotminus;": '\U00002238',
- "dotplus;": '\U00002214',
- "dotsquare;": '\U000022A1',
- "doublebarwedge;": '\U00002306',
- "downarrow;": '\U00002193',
- "downdownarrows;": '\U000021CA',
- "downharpoonleft;": '\U000021C3',
- "downharpoonright;": '\U000021C2',
- "drbkarow;": '\U00002910',
- "drcorn;": '\U0000231F',
- "drcrop;": '\U0000230C',
- "dscr;": '\U0001D4B9',
- "dscy;": '\U00000455',
- "dsol;": '\U000029F6',
- "dstrok;": '\U00000111',
- "dtdot;": '\U000022F1',
- "dtri;": '\U000025BF',
- "dtrif;": '\U000025BE',
- "duarr;": '\U000021F5',
- "duhar;": '\U0000296F',
- "dwangle;": '\U000029A6',
- "dzcy;": '\U0000045F',
- "dzigrarr;": '\U000027FF',
- "eDDot;": '\U00002A77',
- "eDot;": '\U00002251',
- "eacute;": '\U000000E9',
- "easter;": '\U00002A6E',
- "ecaron;": '\U0000011B',
- "ecir;": '\U00002256',
- "ecirc;": '\U000000EA',
- "ecolon;": '\U00002255',
- "ecy;": '\U0000044D',
- "edot;": '\U00000117',
- "ee;": '\U00002147',
- "efDot;": '\U00002252',
- "efr;": '\U0001D522',
- "eg;": '\U00002A9A',
- "egrave;": '\U000000E8',
- "egs;": '\U00002A96',
- "egsdot;": '\U00002A98',
- "el;": '\U00002A99',
- "elinters;": '\U000023E7',
- "ell;": '\U00002113',
- "els;": '\U00002A95',
- "elsdot;": '\U00002A97',
- "emacr;": '\U00000113',
- "empty;": '\U00002205',
- "emptyset;": '\U00002205',
- "emptyv;": '\U00002205',
- "emsp;": '\U00002003',
- "emsp13;": '\U00002004',
- "emsp14;": '\U00002005',
- "eng;": '\U0000014B',
- "ensp;": '\U00002002',
- "eogon;": '\U00000119',
- "eopf;": '\U0001D556',
- "epar;": '\U000022D5',
- "eparsl;": '\U000029E3',
- "eplus;": '\U00002A71',
- "epsi;": '\U000003B5',
- "epsilon;": '\U000003B5',
- "epsiv;": '\U000003F5',
- "eqcirc;": '\U00002256',
- "eqcolon;": '\U00002255',
- "eqsim;": '\U00002242',
- "eqslantgtr;": '\U00002A96',
- "eqslantless;": '\U00002A95',
- "equals;": '\U0000003D',
- "equest;": '\U0000225F',
- "equiv;": '\U00002261',
- "equivDD;": '\U00002A78',
- "eqvparsl;": '\U000029E5',
- "erDot;": '\U00002253',
- "erarr;": '\U00002971',
- "escr;": '\U0000212F',
- "esdot;": '\U00002250',
- "esim;": '\U00002242',
- "eta;": '\U000003B7',
- "eth;": '\U000000F0',
- "euml;": '\U000000EB',
- "euro;": '\U000020AC',
- "excl;": '\U00000021',
- "exist;": '\U00002203',
- "expectation;": '\U00002130',
- "exponentiale;": '\U00002147',
- "fallingdotseq;": '\U00002252',
- "fcy;": '\U00000444',
- "female;": '\U00002640',
- "ffilig;": '\U0000FB03',
- "fflig;": '\U0000FB00',
- "ffllig;": '\U0000FB04',
- "ffr;": '\U0001D523',
- "filig;": '\U0000FB01',
- "flat;": '\U0000266D',
- "fllig;": '\U0000FB02',
- "fltns;": '\U000025B1',
- "fnof;": '\U00000192',
- "fopf;": '\U0001D557',
- "forall;": '\U00002200',
- "fork;": '\U000022D4',
- "forkv;": '\U00002AD9',
- "fpartint;": '\U00002A0D',
- "frac12;": '\U000000BD',
- "frac13;": '\U00002153',
- "frac14;": '\U000000BC',
- "frac15;": '\U00002155',
- "frac16;": '\U00002159',
- "frac18;": '\U0000215B',
- "frac23;": '\U00002154',
- "frac25;": '\U00002156',
- "frac34;": '\U000000BE',
- "frac35;": '\U00002157',
- "frac38;": '\U0000215C',
- "frac45;": '\U00002158',
- "frac56;": '\U0000215A',
- "frac58;": '\U0000215D',
- "frac78;": '\U0000215E',
- "frasl;": '\U00002044',
- "frown;": '\U00002322',
- "fscr;": '\U0001D4BB',
- "gE;": '\U00002267',
- "gEl;": '\U00002A8C',
- "gacute;": '\U000001F5',
- "gamma;": '\U000003B3',
- "gammad;": '\U000003DD',
- "gap;": '\U00002A86',
- "gbreve;": '\U0000011F',
- "gcirc;": '\U0000011D',
- "gcy;": '\U00000433',
- "gdot;": '\U00000121',
- "ge;": '\U00002265',
- "gel;": '\U000022DB',
- "geq;": '\U00002265',
- "geqq;": '\U00002267',
- "geqslant;": '\U00002A7E',
- "ges;": '\U00002A7E',
- "gescc;": '\U00002AA9',
- "gesdot;": '\U00002A80',
- "gesdoto;": '\U00002A82',
- "gesdotol;": '\U00002A84',
- "gesles;": '\U00002A94',
- "gfr;": '\U0001D524',
- "gg;": '\U0000226B',
- "ggg;": '\U000022D9',
- "gimel;": '\U00002137',
- "gjcy;": '\U00000453',
- "gl;": '\U00002277',
- "glE;": '\U00002A92',
- "gla;": '\U00002AA5',
- "glj;": '\U00002AA4',
- "gnE;": '\U00002269',
- "gnap;": '\U00002A8A',
- "gnapprox;": '\U00002A8A',
- "gne;": '\U00002A88',
- "gneq;": '\U00002A88',
- "gneqq;": '\U00002269',
- "gnsim;": '\U000022E7',
- "gopf;": '\U0001D558',
- "grave;": '\U00000060',
- "gscr;": '\U0000210A',
- "gsim;": '\U00002273',
- "gsime;": '\U00002A8E',
- "gsiml;": '\U00002A90',
- "gt;": '\U0000003E',
- "gtcc;": '\U00002AA7',
- "gtcir;": '\U00002A7A',
- "gtdot;": '\U000022D7',
- "gtlPar;": '\U00002995',
- "gtquest;": '\U00002A7C',
- "gtrapprox;": '\U00002A86',
- "gtrarr;": '\U00002978',
- "gtrdot;": '\U000022D7',
- "gtreqless;": '\U000022DB',
- "gtreqqless;": '\U00002A8C',
- "gtrless;": '\U00002277',
- "gtrsim;": '\U00002273',
- "hArr;": '\U000021D4',
- "hairsp;": '\U0000200A',
- "half;": '\U000000BD',
- "hamilt;": '\U0000210B',
- "hardcy;": '\U0000044A',
- "harr;": '\U00002194',
- "harrcir;": '\U00002948',
- "harrw;": '\U000021AD',
- "hbar;": '\U0000210F',
- "hcirc;": '\U00000125',
- "hearts;": '\U00002665',
- "heartsuit;": '\U00002665',
- "hellip;": '\U00002026',
- "hercon;": '\U000022B9',
- "hfr;": '\U0001D525',
- "hksearow;": '\U00002925',
- "hkswarow;": '\U00002926',
- "hoarr;": '\U000021FF',
- "homtht;": '\U0000223B',
- "hookleftarrow;": '\U000021A9',
- "hookrightarrow;": '\U000021AA',
- "hopf;": '\U0001D559',
- "horbar;": '\U00002015',
- "hscr;": '\U0001D4BD',
- "hslash;": '\U0000210F',
- "hstrok;": '\U00000127',
- "hybull;": '\U00002043',
- "hyphen;": '\U00002010',
- "iacute;": '\U000000ED',
- "ic;": '\U00002063',
- "icirc;": '\U000000EE',
- "icy;": '\U00000438',
- "iecy;": '\U00000435',
- "iexcl;": '\U000000A1',
- "iff;": '\U000021D4',
- "ifr;": '\U0001D526',
- "igrave;": '\U000000EC',
- "ii;": '\U00002148',
- "iiiint;": '\U00002A0C',
- "iiint;": '\U0000222D',
- "iinfin;": '\U000029DC',
- "iiota;": '\U00002129',
- "ijlig;": '\U00000133',
- "imacr;": '\U0000012B',
- "image;": '\U00002111',
- "imagline;": '\U00002110',
- "imagpart;": '\U00002111',
- "imath;": '\U00000131',
- "imof;": '\U000022B7',
- "imped;": '\U000001B5',
- "in;": '\U00002208',
- "incare;": '\U00002105',
- "infin;": '\U0000221E',
- "infintie;": '\U000029DD',
- "inodot;": '\U00000131',
- "int;": '\U0000222B',
- "intcal;": '\U000022BA',
- "integers;": '\U00002124',
- "intercal;": '\U000022BA',
- "intlarhk;": '\U00002A17',
- "intprod;": '\U00002A3C',
- "iocy;": '\U00000451',
- "iogon;": '\U0000012F',
- "iopf;": '\U0001D55A',
- "iota;": '\U000003B9',
- "iprod;": '\U00002A3C',
- "iquest;": '\U000000BF',
- "iscr;": '\U0001D4BE',
- "isin;": '\U00002208',
- "isinE;": '\U000022F9',
- "isindot;": '\U000022F5',
- "isins;": '\U000022F4',
- "isinsv;": '\U000022F3',
- "isinv;": '\U00002208',
- "it;": '\U00002062',
- "itilde;": '\U00000129',
- "iukcy;": '\U00000456',
- "iuml;": '\U000000EF',
- "jcirc;": '\U00000135',
- "jcy;": '\U00000439',
- "jfr;": '\U0001D527',
- "jmath;": '\U00000237',
- "jopf;": '\U0001D55B',
- "jscr;": '\U0001D4BF',
- "jsercy;": '\U00000458',
- "jukcy;": '\U00000454',
- "kappa;": '\U000003BA',
- "kappav;": '\U000003F0',
- "kcedil;": '\U00000137',
- "kcy;": '\U0000043A',
- "kfr;": '\U0001D528',
- "kgreen;": '\U00000138',
- "khcy;": '\U00000445',
- "kjcy;": '\U0000045C',
- "kopf;": '\U0001D55C',
- "kscr;": '\U0001D4C0',
- "lAarr;": '\U000021DA',
- "lArr;": '\U000021D0',
- "lAtail;": '\U0000291B',
- "lBarr;": '\U0000290E',
- "lE;": '\U00002266',
- "lEg;": '\U00002A8B',
- "lHar;": '\U00002962',
- "lacute;": '\U0000013A',
- "laemptyv;": '\U000029B4',
- "lagran;": '\U00002112',
- "lambda;": '\U000003BB',
- "lang;": '\U000027E8',
- "langd;": '\U00002991',
- "langle;": '\U000027E8',
- "lap;": '\U00002A85',
- "laquo;": '\U000000AB',
- "larr;": '\U00002190',
- "larrb;": '\U000021E4',
- "larrbfs;": '\U0000291F',
- "larrfs;": '\U0000291D',
- "larrhk;": '\U000021A9',
- "larrlp;": '\U000021AB',
- "larrpl;": '\U00002939',
- "larrsim;": '\U00002973',
- "larrtl;": '\U000021A2',
- "lat;": '\U00002AAB',
- "latail;": '\U00002919',
- "late;": '\U00002AAD',
- "lbarr;": '\U0000290C',
- "lbbrk;": '\U00002772',
- "lbrace;": '\U0000007B',
- "lbrack;": '\U0000005B',
- "lbrke;": '\U0000298B',
- "lbrksld;": '\U0000298F',
- "lbrkslu;": '\U0000298D',
- "lcaron;": '\U0000013E',
- "lcedil;": '\U0000013C',
- "lceil;": '\U00002308',
- "lcub;": '\U0000007B',
- "lcy;": '\U0000043B',
- "ldca;": '\U00002936',
- "ldquo;": '\U0000201C',
- "ldquor;": '\U0000201E',
- "ldrdhar;": '\U00002967',
- "ldrushar;": '\U0000294B',
- "ldsh;": '\U000021B2',
- "le;": '\U00002264',
- "leftarrow;": '\U00002190',
- "leftarrowtail;": '\U000021A2',
- "leftharpoondown;": '\U000021BD',
- "leftharpoonup;": '\U000021BC',
- "leftleftarrows;": '\U000021C7',
- "leftrightarrow;": '\U00002194',
- "leftrightarrows;": '\U000021C6',
- "leftrightharpoons;": '\U000021CB',
- "leftrightsquigarrow;": '\U000021AD',
- "leftthreetimes;": '\U000022CB',
- "leg;": '\U000022DA',
- "leq;": '\U00002264',
- "leqq;": '\U00002266',
- "leqslant;": '\U00002A7D',
- "les;": '\U00002A7D',
- "lescc;": '\U00002AA8',
- "lesdot;": '\U00002A7F',
- "lesdoto;": '\U00002A81',
- "lesdotor;": '\U00002A83',
- "lesges;": '\U00002A93',
- "lessapprox;": '\U00002A85',
- "lessdot;": '\U000022D6',
- "lesseqgtr;": '\U000022DA',
- "lesseqqgtr;": '\U00002A8B',
- "lessgtr;": '\U00002276',
- "lesssim;": '\U00002272',
- "lfisht;": '\U0000297C',
- "lfloor;": '\U0000230A',
- "lfr;": '\U0001D529',
- "lg;": '\U00002276',
- "lgE;": '\U00002A91',
- "lhard;": '\U000021BD',
- "lharu;": '\U000021BC',
- "lharul;": '\U0000296A',
- "lhblk;": '\U00002584',
- "ljcy;": '\U00000459',
- "ll;": '\U0000226A',
- "llarr;": '\U000021C7',
- "llcorner;": '\U0000231E',
- "llhard;": '\U0000296B',
- "lltri;": '\U000025FA',
- "lmidot;": '\U00000140',
- "lmoust;": '\U000023B0',
- "lmoustache;": '\U000023B0',
- "lnE;": '\U00002268',
- "lnap;": '\U00002A89',
- "lnapprox;": '\U00002A89',
- "lne;": '\U00002A87',
- "lneq;": '\U00002A87',
- "lneqq;": '\U00002268',
- "lnsim;": '\U000022E6',
- "loang;": '\U000027EC',
- "loarr;": '\U000021FD',
- "lobrk;": '\U000027E6',
- "longleftarrow;": '\U000027F5',
- "longleftrightarrow;": '\U000027F7',
- "longmapsto;": '\U000027FC',
- "longrightarrow;": '\U000027F6',
- "looparrowleft;": '\U000021AB',
- "looparrowright;": '\U000021AC',
- "lopar;": '\U00002985',
- "lopf;": '\U0001D55D',
- "loplus;": '\U00002A2D',
- "lotimes;": '\U00002A34',
- "lowast;": '\U00002217',
- "lowbar;": '\U0000005F',
- "loz;": '\U000025CA',
- "lozenge;": '\U000025CA',
- "lozf;": '\U000029EB',
- "lpar;": '\U00000028',
- "lparlt;": '\U00002993',
- "lrarr;": '\U000021C6',
- "lrcorner;": '\U0000231F',
- "lrhar;": '\U000021CB',
- "lrhard;": '\U0000296D',
- "lrm;": '\U0000200E',
- "lrtri;": '\U000022BF',
- "lsaquo;": '\U00002039',
- "lscr;": '\U0001D4C1',
- "lsh;": '\U000021B0',
- "lsim;": '\U00002272',
- "lsime;": '\U00002A8D',
- "lsimg;": '\U00002A8F',
- "lsqb;": '\U0000005B',
- "lsquo;": '\U00002018',
- "lsquor;": '\U0000201A',
- "lstrok;": '\U00000142',
- "lt;": '\U0000003C',
- "ltcc;": '\U00002AA6',
- "ltcir;": '\U00002A79',
- "ltdot;": '\U000022D6',
- "lthree;": '\U000022CB',
- "ltimes;": '\U000022C9',
- "ltlarr;": '\U00002976',
- "ltquest;": '\U00002A7B',
- "ltrPar;": '\U00002996',
- "ltri;": '\U000025C3',
- "ltrie;": '\U000022B4',
- "ltrif;": '\U000025C2',
- "lurdshar;": '\U0000294A',
- "luruhar;": '\U00002966',
- "mDDot;": '\U0000223A',
- "macr;": '\U000000AF',
- "male;": '\U00002642',
- "malt;": '\U00002720',
- "maltese;": '\U00002720',
- "map;": '\U000021A6',
- "mapsto;": '\U000021A6',
- "mapstodown;": '\U000021A7',
- "mapstoleft;": '\U000021A4',
- "mapstoup;": '\U000021A5',
- "marker;": '\U000025AE',
- "mcomma;": '\U00002A29',
- "mcy;": '\U0000043C',
- "mdash;": '\U00002014',
- "measuredangle;": '\U00002221',
- "mfr;": '\U0001D52A',
- "mho;": '\U00002127',
- "micro;": '\U000000B5',
- "mid;": '\U00002223',
- "midast;": '\U0000002A',
- "midcir;": '\U00002AF0',
- "middot;": '\U000000B7',
- "minus;": '\U00002212',
- "minusb;": '\U0000229F',
- "minusd;": '\U00002238',
- "minusdu;": '\U00002A2A',
- "mlcp;": '\U00002ADB',
- "mldr;": '\U00002026',
- "mnplus;": '\U00002213',
- "models;": '\U000022A7',
- "mopf;": '\U0001D55E',
- "mp;": '\U00002213',
- "mscr;": '\U0001D4C2',
- "mstpos;": '\U0000223E',
- "mu;": '\U000003BC',
- "multimap;": '\U000022B8',
- "mumap;": '\U000022B8',
- "nLeftarrow;": '\U000021CD',
- "nLeftrightarrow;": '\U000021CE',
- "nRightarrow;": '\U000021CF',
- "nVDash;": '\U000022AF',
- "nVdash;": '\U000022AE',
- "nabla;": '\U00002207',
- "nacute;": '\U00000144',
- "nap;": '\U00002249',
- "napos;": '\U00000149',
- "napprox;": '\U00002249',
- "natur;": '\U0000266E',
- "natural;": '\U0000266E',
- "naturals;": '\U00002115',
- "nbsp;": '\U000000A0',
- "ncap;": '\U00002A43',
- "ncaron;": '\U00000148',
- "ncedil;": '\U00000146',
- "ncong;": '\U00002247',
- "ncup;": '\U00002A42',
- "ncy;": '\U0000043D',
- "ndash;": '\U00002013',
- "ne;": '\U00002260',
- "neArr;": '\U000021D7',
- "nearhk;": '\U00002924',
- "nearr;": '\U00002197',
- "nearrow;": '\U00002197',
- "nequiv;": '\U00002262',
- "nesear;": '\U00002928',
- "nexist;": '\U00002204',
- "nexists;": '\U00002204',
- "nfr;": '\U0001D52B',
- "nge;": '\U00002271',
- "ngeq;": '\U00002271',
- "ngsim;": '\U00002275',
- "ngt;": '\U0000226F',
- "ngtr;": '\U0000226F',
- "nhArr;": '\U000021CE',
- "nharr;": '\U000021AE',
- "nhpar;": '\U00002AF2',
- "ni;": '\U0000220B',
- "nis;": '\U000022FC',
- "nisd;": '\U000022FA',
- "niv;": '\U0000220B',
- "njcy;": '\U0000045A',
- "nlArr;": '\U000021CD',
- "nlarr;": '\U0000219A',
- "nldr;": '\U00002025',
- "nle;": '\U00002270',
- "nleftarrow;": '\U0000219A',
- "nleftrightarrow;": '\U000021AE',
- "nleq;": '\U00002270',
- "nless;": '\U0000226E',
- "nlsim;": '\U00002274',
- "nlt;": '\U0000226E',
- "nltri;": '\U000022EA',
- "nltrie;": '\U000022EC',
- "nmid;": '\U00002224',
- "nopf;": '\U0001D55F',
- "not;": '\U000000AC',
- "notin;": '\U00002209',
- "notinva;": '\U00002209',
- "notinvb;": '\U000022F7',
- "notinvc;": '\U000022F6',
- "notni;": '\U0000220C',
- "notniva;": '\U0000220C',
- "notnivb;": '\U000022FE',
- "notnivc;": '\U000022FD',
- "npar;": '\U00002226',
- "nparallel;": '\U00002226',
- "npolint;": '\U00002A14',
- "npr;": '\U00002280',
- "nprcue;": '\U000022E0',
- "nprec;": '\U00002280',
- "nrArr;": '\U000021CF',
- "nrarr;": '\U0000219B',
- "nrightarrow;": '\U0000219B',
- "nrtri;": '\U000022EB',
- "nrtrie;": '\U000022ED',
- "nsc;": '\U00002281',
- "nsccue;": '\U000022E1',
- "nscr;": '\U0001D4C3',
- "nshortmid;": '\U00002224',
- "nshortparallel;": '\U00002226',
- "nsim;": '\U00002241',
- "nsime;": '\U00002244',
- "nsimeq;": '\U00002244',
- "nsmid;": '\U00002224',
- "nspar;": '\U00002226',
- "nsqsube;": '\U000022E2',
- "nsqsupe;": '\U000022E3',
- "nsub;": '\U00002284',
- "nsube;": '\U00002288',
- "nsubseteq;": '\U00002288',
- "nsucc;": '\U00002281',
- "nsup;": '\U00002285',
- "nsupe;": '\U00002289',
- "nsupseteq;": '\U00002289',
- "ntgl;": '\U00002279',
- "ntilde;": '\U000000F1',
- "ntlg;": '\U00002278',
- "ntriangleleft;": '\U000022EA',
- "ntrianglelefteq;": '\U000022EC',
- "ntriangleright;": '\U000022EB',
- "ntrianglerighteq;": '\U000022ED',
- "nu;": '\U000003BD',
- "num;": '\U00000023',
- "numero;": '\U00002116',
- "numsp;": '\U00002007',
- "nvDash;": '\U000022AD',
- "nvHarr;": '\U00002904',
- "nvdash;": '\U000022AC',
- "nvinfin;": '\U000029DE',
- "nvlArr;": '\U00002902',
- "nvrArr;": '\U00002903',
- "nwArr;": '\U000021D6',
- "nwarhk;": '\U00002923',
- "nwarr;": '\U00002196',
- "nwarrow;": '\U00002196',
- "nwnear;": '\U00002927',
- "oS;": '\U000024C8',
- "oacute;": '\U000000F3',
- "oast;": '\U0000229B',
- "ocir;": '\U0000229A',
- "ocirc;": '\U000000F4',
- "ocy;": '\U0000043E',
- "odash;": '\U0000229D',
- "odblac;": '\U00000151',
- "odiv;": '\U00002A38',
- "odot;": '\U00002299',
- "odsold;": '\U000029BC',
- "oelig;": '\U00000153',
- "ofcir;": '\U000029BF',
- "ofr;": '\U0001D52C',
- "ogon;": '\U000002DB',
- "ograve;": '\U000000F2',
- "ogt;": '\U000029C1',
- "ohbar;": '\U000029B5',
- "ohm;": '\U000003A9',
- "oint;": '\U0000222E',
- "olarr;": '\U000021BA',
- "olcir;": '\U000029BE',
- "olcross;": '\U000029BB',
- "oline;": '\U0000203E',
- "olt;": '\U000029C0',
- "omacr;": '\U0000014D',
- "omega;": '\U000003C9',
- "omicron;": '\U000003BF',
- "omid;": '\U000029B6',
- "ominus;": '\U00002296',
- "oopf;": '\U0001D560',
- "opar;": '\U000029B7',
- "operp;": '\U000029B9',
- "oplus;": '\U00002295',
- "or;": '\U00002228',
- "orarr;": '\U000021BB',
- "ord;": '\U00002A5D',
- "order;": '\U00002134',
- "orderof;": '\U00002134',
- "ordf;": '\U000000AA',
- "ordm;": '\U000000BA',
- "origof;": '\U000022B6',
- "oror;": '\U00002A56',
- "orslope;": '\U00002A57',
- "orv;": '\U00002A5B',
- "oscr;": '\U00002134',
- "oslash;": '\U000000F8',
- "osol;": '\U00002298',
- "otilde;": '\U000000F5',
- "otimes;": '\U00002297',
- "otimesas;": '\U00002A36',
- "ouml;": '\U000000F6',
- "ovbar;": '\U0000233D',
- "par;": '\U00002225',
- "para;": '\U000000B6',
- "parallel;": '\U00002225',
- "parsim;": '\U00002AF3',
- "parsl;": '\U00002AFD',
- "part;": '\U00002202',
- "pcy;": '\U0000043F',
- "percnt;": '\U00000025',
- "period;": '\U0000002E',
- "permil;": '\U00002030',
- "perp;": '\U000022A5',
- "pertenk;": '\U00002031',
- "pfr;": '\U0001D52D',
- "phi;": '\U000003C6',
- "phiv;": '\U000003D5',
- "phmmat;": '\U00002133',
- "phone;": '\U0000260E',
- "pi;": '\U000003C0',
- "pitchfork;": '\U000022D4',
- "piv;": '\U000003D6',
- "planck;": '\U0000210F',
- "planckh;": '\U0000210E',
- "plankv;": '\U0000210F',
- "plus;": '\U0000002B',
- "plusacir;": '\U00002A23',
- "plusb;": '\U0000229E',
- "pluscir;": '\U00002A22',
- "plusdo;": '\U00002214',
- "plusdu;": '\U00002A25',
- "pluse;": '\U00002A72',
- "plusmn;": '\U000000B1',
- "plussim;": '\U00002A26',
- "plustwo;": '\U00002A27',
- "pm;": '\U000000B1',
- "pointint;": '\U00002A15',
- "popf;": '\U0001D561',
- "pound;": '\U000000A3',
- "pr;": '\U0000227A',
- "prE;": '\U00002AB3',
- "prap;": '\U00002AB7',
- "prcue;": '\U0000227C',
- "pre;": '\U00002AAF',
- "prec;": '\U0000227A',
- "precapprox;": '\U00002AB7',
- "preccurlyeq;": '\U0000227C',
- "preceq;": '\U00002AAF',
- "precnapprox;": '\U00002AB9',
- "precneqq;": '\U00002AB5',
- "precnsim;": '\U000022E8',
- "precsim;": '\U0000227E',
- "prime;": '\U00002032',
- "primes;": '\U00002119',
- "prnE;": '\U00002AB5',
- "prnap;": '\U00002AB9',
- "prnsim;": '\U000022E8',
- "prod;": '\U0000220F',
- "profalar;": '\U0000232E',
- "profline;": '\U00002312',
- "profsurf;": '\U00002313',
- "prop;": '\U0000221D',
- "propto;": '\U0000221D',
- "prsim;": '\U0000227E',
- "prurel;": '\U000022B0',
- "pscr;": '\U0001D4C5',
- "psi;": '\U000003C8',
- "puncsp;": '\U00002008',
- "qfr;": '\U0001D52E',
- "qint;": '\U00002A0C',
- "qopf;": '\U0001D562',
- "qprime;": '\U00002057',
- "qscr;": '\U0001D4C6',
- "quaternions;": '\U0000210D',
- "quatint;": '\U00002A16',
- "quest;": '\U0000003F',
- "questeq;": '\U0000225F',
- "quot;": '\U00000022',
- "rAarr;": '\U000021DB',
- "rArr;": '\U000021D2',
- "rAtail;": '\U0000291C',
- "rBarr;": '\U0000290F',
- "rHar;": '\U00002964',
- "racute;": '\U00000155',
- "radic;": '\U0000221A',
- "raemptyv;": '\U000029B3',
- "rang;": '\U000027E9',
- "rangd;": '\U00002992',
- "range;": '\U000029A5',
- "rangle;": '\U000027E9',
- "raquo;": '\U000000BB',
- "rarr;": '\U00002192',
- "rarrap;": '\U00002975',
- "rarrb;": '\U000021E5',
- "rarrbfs;": '\U00002920',
- "rarrc;": '\U00002933',
- "rarrfs;": '\U0000291E',
- "rarrhk;": '\U000021AA',
- "rarrlp;": '\U000021AC',
- "rarrpl;": '\U00002945',
- "rarrsim;": '\U00002974',
- "rarrtl;": '\U000021A3',
- "rarrw;": '\U0000219D',
- "ratail;": '\U0000291A',
- "ratio;": '\U00002236',
- "rationals;": '\U0000211A',
- "rbarr;": '\U0000290D',
- "rbbrk;": '\U00002773',
- "rbrace;": '\U0000007D',
- "rbrack;": '\U0000005D',
- "rbrke;": '\U0000298C',
- "rbrksld;": '\U0000298E',
- "rbrkslu;": '\U00002990',
- "rcaron;": '\U00000159',
- "rcedil;": '\U00000157',
- "rceil;": '\U00002309',
- "rcub;": '\U0000007D',
- "rcy;": '\U00000440',
- "rdca;": '\U00002937',
- "rdldhar;": '\U00002969',
- "rdquo;": '\U0000201D',
- "rdquor;": '\U0000201D',
- "rdsh;": '\U000021B3',
- "real;": '\U0000211C',
- "realine;": '\U0000211B',
- "realpart;": '\U0000211C',
- "reals;": '\U0000211D',
- "rect;": '\U000025AD',
- "reg;": '\U000000AE',
- "rfisht;": '\U0000297D',
- "rfloor;": '\U0000230B',
- "rfr;": '\U0001D52F',
- "rhard;": '\U000021C1',
- "rharu;": '\U000021C0',
- "rharul;": '\U0000296C',
- "rho;": '\U000003C1',
- "rhov;": '\U000003F1',
- "rightarrow;": '\U00002192',
- "rightarrowtail;": '\U000021A3',
- "rightharpoondown;": '\U000021C1',
- "rightharpoonup;": '\U000021C0',
- "rightleftarrows;": '\U000021C4',
- "rightleftharpoons;": '\U000021CC',
- "rightrightarrows;": '\U000021C9',
- "rightsquigarrow;": '\U0000219D',
- "rightthreetimes;": '\U000022CC',
- "ring;": '\U000002DA',
- "risingdotseq;": '\U00002253',
- "rlarr;": '\U000021C4',
- "rlhar;": '\U000021CC',
- "rlm;": '\U0000200F',
- "rmoust;": '\U000023B1',
- "rmoustache;": '\U000023B1',
- "rnmid;": '\U00002AEE',
- "roang;": '\U000027ED',
- "roarr;": '\U000021FE',
- "robrk;": '\U000027E7',
- "ropar;": '\U00002986',
- "ropf;": '\U0001D563',
- "roplus;": '\U00002A2E',
- "rotimes;": '\U00002A35',
- "rpar;": '\U00000029',
- "rpargt;": '\U00002994',
- "rppolint;": '\U00002A12',
- "rrarr;": '\U000021C9',
- "rsaquo;": '\U0000203A',
- "rscr;": '\U0001D4C7',
- "rsh;": '\U000021B1',
- "rsqb;": '\U0000005D',
- "rsquo;": '\U00002019',
- "rsquor;": '\U00002019',
- "rthree;": '\U000022CC',
- "rtimes;": '\U000022CA',
- "rtri;": '\U000025B9',
- "rtrie;": '\U000022B5',
- "rtrif;": '\U000025B8',
- "rtriltri;": '\U000029CE',
- "ruluhar;": '\U00002968',
- "rx;": '\U0000211E',
- "sacute;": '\U0000015B',
- "sbquo;": '\U0000201A',
- "sc;": '\U0000227B',
- "scE;": '\U00002AB4',
- "scap;": '\U00002AB8',
- "scaron;": '\U00000161',
- "sccue;": '\U0000227D',
- "sce;": '\U00002AB0',
- "scedil;": '\U0000015F',
- "scirc;": '\U0000015D',
- "scnE;": '\U00002AB6',
- "scnap;": '\U00002ABA',
- "scnsim;": '\U000022E9',
- "scpolint;": '\U00002A13',
- "scsim;": '\U0000227F',
- "scy;": '\U00000441',
- "sdot;": '\U000022C5',
- "sdotb;": '\U000022A1',
- "sdote;": '\U00002A66',
- "seArr;": '\U000021D8',
- "searhk;": '\U00002925',
- "searr;": '\U00002198',
- "searrow;": '\U00002198',
- "sect;": '\U000000A7',
- "semi;": '\U0000003B',
- "seswar;": '\U00002929',
- "setminus;": '\U00002216',
- "setmn;": '\U00002216',
- "sext;": '\U00002736',
- "sfr;": '\U0001D530',
- "sfrown;": '\U00002322',
- "sharp;": '\U0000266F',
- "shchcy;": '\U00000449',
- "shcy;": '\U00000448',
- "shortmid;": '\U00002223',
- "shortparallel;": '\U00002225',
- "shy;": '\U000000AD',
- "sigma;": '\U000003C3',
- "sigmaf;": '\U000003C2',
- "sigmav;": '\U000003C2',
- "sim;": '\U0000223C',
- "simdot;": '\U00002A6A',
- "sime;": '\U00002243',
- "simeq;": '\U00002243',
- "simg;": '\U00002A9E',
- "simgE;": '\U00002AA0',
- "siml;": '\U00002A9D',
- "simlE;": '\U00002A9F',
- "simne;": '\U00002246',
- "simplus;": '\U00002A24',
- "simrarr;": '\U00002972',
- "slarr;": '\U00002190',
- "smallsetminus;": '\U00002216',
- "smashp;": '\U00002A33',
- "smeparsl;": '\U000029E4',
- "smid;": '\U00002223',
- "smile;": '\U00002323',
- "smt;": '\U00002AAA',
- "smte;": '\U00002AAC',
- "softcy;": '\U0000044C',
- "sol;": '\U0000002F',
- "solb;": '\U000029C4',
- "solbar;": '\U0000233F',
- "sopf;": '\U0001D564',
- "spades;": '\U00002660',
- "spadesuit;": '\U00002660',
- "spar;": '\U00002225',
- "sqcap;": '\U00002293',
- "sqcup;": '\U00002294',
- "sqsub;": '\U0000228F',
- "sqsube;": '\U00002291',
- "sqsubset;": '\U0000228F',
- "sqsubseteq;": '\U00002291',
- "sqsup;": '\U00002290',
- "sqsupe;": '\U00002292',
- "sqsupset;": '\U00002290',
- "sqsupseteq;": '\U00002292',
- "squ;": '\U000025A1',
- "square;": '\U000025A1',
- "squarf;": '\U000025AA',
- "squf;": '\U000025AA',
- "srarr;": '\U00002192',
- "sscr;": '\U0001D4C8',
- "ssetmn;": '\U00002216',
- "ssmile;": '\U00002323',
- "sstarf;": '\U000022C6',
- "star;": '\U00002606',
- "starf;": '\U00002605',
- "straightepsilon;": '\U000003F5',
- "straightphi;": '\U000003D5',
- "strns;": '\U000000AF',
- "sub;": '\U00002282',
- "subE;": '\U00002AC5',
- "subdot;": '\U00002ABD',
- "sube;": '\U00002286',
- "subedot;": '\U00002AC3',
- "submult;": '\U00002AC1',
- "subnE;": '\U00002ACB',
- "subne;": '\U0000228A',
- "subplus;": '\U00002ABF',
- "subrarr;": '\U00002979',
- "subset;": '\U00002282',
- "subseteq;": '\U00002286',
- "subseteqq;": '\U00002AC5',
- "subsetneq;": '\U0000228A',
- "subsetneqq;": '\U00002ACB',
- "subsim;": '\U00002AC7',
- "subsub;": '\U00002AD5',
- "subsup;": '\U00002AD3',
- "succ;": '\U0000227B',
- "succapprox;": '\U00002AB8',
- "succcurlyeq;": '\U0000227D',
- "succeq;": '\U00002AB0',
- "succnapprox;": '\U00002ABA',
- "succneqq;": '\U00002AB6',
- "succnsim;": '\U000022E9',
- "succsim;": '\U0000227F',
- "sum;": '\U00002211',
- "sung;": '\U0000266A',
- "sup;": '\U00002283',
- "sup1;": '\U000000B9',
- "sup2;": '\U000000B2',
- "sup3;": '\U000000B3',
- "supE;": '\U00002AC6',
- "supdot;": '\U00002ABE',
- "supdsub;": '\U00002AD8',
- "supe;": '\U00002287',
- "supedot;": '\U00002AC4',
- "suphsol;": '\U000027C9',
- "suphsub;": '\U00002AD7',
- "suplarr;": '\U0000297B',
- "supmult;": '\U00002AC2',
- "supnE;": '\U00002ACC',
- "supne;": '\U0000228B',
- "supplus;": '\U00002AC0',
- "supset;": '\U00002283',
- "supseteq;": '\U00002287',
- "supseteqq;": '\U00002AC6',
- "supsetneq;": '\U0000228B',
- "supsetneqq;": '\U00002ACC',
- "supsim;": '\U00002AC8',
- "supsub;": '\U00002AD4',
- "supsup;": '\U00002AD6',
- "swArr;": '\U000021D9',
- "swarhk;": '\U00002926',
- "swarr;": '\U00002199',
- "swarrow;": '\U00002199',
- "swnwar;": '\U0000292A',
- "szlig;": '\U000000DF',
- "target;": '\U00002316',
- "tau;": '\U000003C4',
- "tbrk;": '\U000023B4',
- "tcaron;": '\U00000165',
- "tcedil;": '\U00000163',
- "tcy;": '\U00000442',
- "tdot;": '\U000020DB',
- "telrec;": '\U00002315',
- "tfr;": '\U0001D531',
- "there4;": '\U00002234',
- "therefore;": '\U00002234',
- "theta;": '\U000003B8',
- "thetasym;": '\U000003D1',
- "thetav;": '\U000003D1',
- "thickapprox;": '\U00002248',
- "thicksim;": '\U0000223C',
- "thinsp;": '\U00002009',
- "thkap;": '\U00002248',
- "thksim;": '\U0000223C',
- "thorn;": '\U000000FE',
- "tilde;": '\U000002DC',
- "times;": '\U000000D7',
- "timesb;": '\U000022A0',
- "timesbar;": '\U00002A31',
- "timesd;": '\U00002A30',
- "tint;": '\U0000222D',
- "toea;": '\U00002928',
- "top;": '\U000022A4',
- "topbot;": '\U00002336',
- "topcir;": '\U00002AF1',
- "topf;": '\U0001D565',
- "topfork;": '\U00002ADA',
- "tosa;": '\U00002929',
- "tprime;": '\U00002034',
- "trade;": '\U00002122',
- "triangle;": '\U000025B5',
- "triangledown;": '\U000025BF',
- "triangleleft;": '\U000025C3',
- "trianglelefteq;": '\U000022B4',
- "triangleq;": '\U0000225C',
- "triangleright;": '\U000025B9',
- "trianglerighteq;": '\U000022B5',
- "tridot;": '\U000025EC',
- "trie;": '\U0000225C',
- "triminus;": '\U00002A3A',
- "triplus;": '\U00002A39',
- "trisb;": '\U000029CD',
- "tritime;": '\U00002A3B',
- "trpezium;": '\U000023E2',
- "tscr;": '\U0001D4C9',
- "tscy;": '\U00000446',
- "tshcy;": '\U0000045B',
- "tstrok;": '\U00000167',
- "twixt;": '\U0000226C',
- "twoheadleftarrow;": '\U0000219E',
- "twoheadrightarrow;": '\U000021A0',
- "uArr;": '\U000021D1',
- "uHar;": '\U00002963',
- "uacute;": '\U000000FA',
- "uarr;": '\U00002191',
- "ubrcy;": '\U0000045E',
- "ubreve;": '\U0000016D',
- "ucirc;": '\U000000FB',
- "ucy;": '\U00000443',
- "udarr;": '\U000021C5',
- "udblac;": '\U00000171',
- "udhar;": '\U0000296E',
- "ufisht;": '\U0000297E',
- "ufr;": '\U0001D532',
- "ugrave;": '\U000000F9',
- "uharl;": '\U000021BF',
- "uharr;": '\U000021BE',
- "uhblk;": '\U00002580',
- "ulcorn;": '\U0000231C',
- "ulcorner;": '\U0000231C',
- "ulcrop;": '\U0000230F',
- "ultri;": '\U000025F8',
- "umacr;": '\U0000016B',
- "uml;": '\U000000A8',
- "uogon;": '\U00000173',
- "uopf;": '\U0001D566',
- "uparrow;": '\U00002191',
- "updownarrow;": '\U00002195',
- "upharpoonleft;": '\U000021BF',
- "upharpoonright;": '\U000021BE',
- "uplus;": '\U0000228E',
- "upsi;": '\U000003C5',
- "upsih;": '\U000003D2',
- "upsilon;": '\U000003C5',
- "upuparrows;": '\U000021C8',
- "urcorn;": '\U0000231D',
- "urcorner;": '\U0000231D',
- "urcrop;": '\U0000230E',
- "uring;": '\U0000016F',
- "urtri;": '\U000025F9',
- "uscr;": '\U0001D4CA',
- "utdot;": '\U000022F0',
- "utilde;": '\U00000169',
- "utri;": '\U000025B5',
- "utrif;": '\U000025B4',
- "uuarr;": '\U000021C8',
- "uuml;": '\U000000FC',
- "uwangle;": '\U000029A7',
- "vArr;": '\U000021D5',
- "vBar;": '\U00002AE8',
- "vBarv;": '\U00002AE9',
- "vDash;": '\U000022A8',
- "vangrt;": '\U0000299C',
- "varepsilon;": '\U000003F5',
- "varkappa;": '\U000003F0',
- "varnothing;": '\U00002205',
- "varphi;": '\U000003D5',
- "varpi;": '\U000003D6',
- "varpropto;": '\U0000221D',
- "varr;": '\U00002195',
- "varrho;": '\U000003F1',
- "varsigma;": '\U000003C2',
- "vartheta;": '\U000003D1',
- "vartriangleleft;": '\U000022B2',
- "vartriangleright;": '\U000022B3',
- "vcy;": '\U00000432',
- "vdash;": '\U000022A2',
- "vee;": '\U00002228',
- "veebar;": '\U000022BB',
- "veeeq;": '\U0000225A',
- "vellip;": '\U000022EE',
- "verbar;": '\U0000007C',
- "vert;": '\U0000007C',
- "vfr;": '\U0001D533',
- "vltri;": '\U000022B2',
- "vopf;": '\U0001D567',
- "vprop;": '\U0000221D',
- "vrtri;": '\U000022B3',
- "vscr;": '\U0001D4CB',
- "vzigzag;": '\U0000299A',
- "wcirc;": '\U00000175',
- "wedbar;": '\U00002A5F',
- "wedge;": '\U00002227',
- "wedgeq;": '\U00002259',
- "weierp;": '\U00002118',
- "wfr;": '\U0001D534',
- "wopf;": '\U0001D568',
- "wp;": '\U00002118',
- "wr;": '\U00002240',
- "wreath;": '\U00002240',
- "wscr;": '\U0001D4CC',
- "xcap;": '\U000022C2',
- "xcirc;": '\U000025EF',
- "xcup;": '\U000022C3',
- "xdtri;": '\U000025BD',
- "xfr;": '\U0001D535',
- "xhArr;": '\U000027FA',
- "xharr;": '\U000027F7',
- "xi;": '\U000003BE',
- "xlArr;": '\U000027F8',
- "xlarr;": '\U000027F5',
- "xmap;": '\U000027FC',
- "xnis;": '\U000022FB',
- "xodot;": '\U00002A00',
- "xopf;": '\U0001D569',
- "xoplus;": '\U00002A01',
- "xotime;": '\U00002A02',
- "xrArr;": '\U000027F9',
- "xrarr;": '\U000027F6',
- "xscr;": '\U0001D4CD',
- "xsqcup;": '\U00002A06',
- "xuplus;": '\U00002A04',
- "xutri;": '\U000025B3',
- "xvee;": '\U000022C1',
- "xwedge;": '\U000022C0',
- "yacute;": '\U000000FD',
- "yacy;": '\U0000044F',
- "ycirc;": '\U00000177',
- "ycy;": '\U0000044B',
- "yen;": '\U000000A5',
- "yfr;": '\U0001D536',
- "yicy;": '\U00000457',
- "yopf;": '\U0001D56A',
- "yscr;": '\U0001D4CE',
- "yucy;": '\U0000044E',
- "yuml;": '\U000000FF',
- "zacute;": '\U0000017A',
- "zcaron;": '\U0000017E',
- "zcy;": '\U00000437',
- "zdot;": '\U0000017C',
- "zeetrf;": '\U00002128',
- "zeta;": '\U000003B6',
- "zfr;": '\U0001D537',
- "zhcy;": '\U00000436',
- "zigrarr;": '\U000021DD',
- "zopf;": '\U0001D56B',
- "zscr;": '\U0001D4CF',
- "zwj;": '\U0000200D',
- "zwnj;": '\U0000200C',
- "AElig": '\U000000C6',
- "AMP": '\U00000026',
- "Aacute": '\U000000C1',
- "Acirc": '\U000000C2',
- "Agrave": '\U000000C0',
- "Aring": '\U000000C5',
- "Atilde": '\U000000C3',
- "Auml": '\U000000C4',
- "COPY": '\U000000A9',
- "Ccedil": '\U000000C7',
- "ETH": '\U000000D0',
- "Eacute": '\U000000C9',
- "Ecirc": '\U000000CA',
- "Egrave": '\U000000C8',
- "Euml": '\U000000CB',
- "GT": '\U0000003E',
- "Iacute": '\U000000CD',
- "Icirc": '\U000000CE',
- "Igrave": '\U000000CC',
- "Iuml": '\U000000CF',
- "LT": '\U0000003C',
- "Ntilde": '\U000000D1',
- "Oacute": '\U000000D3',
- "Ocirc": '\U000000D4',
- "Ograve": '\U000000D2',
- "Oslash": '\U000000D8',
- "Otilde": '\U000000D5',
- "Ouml": '\U000000D6',
- "QUOT": '\U00000022',
- "REG": '\U000000AE',
- "THORN": '\U000000DE',
- "Uacute": '\U000000DA',
- "Ucirc": '\U000000DB',
- "Ugrave": '\U000000D9',
- "Uuml": '\U000000DC',
- "Yacute": '\U000000DD',
- "aacute": '\U000000E1',
- "acirc": '\U000000E2',
- "acute": '\U000000B4',
- "aelig": '\U000000E6',
- "agrave": '\U000000E0',
- "amp": '\U00000026',
- "aring": '\U000000E5',
- "atilde": '\U000000E3',
- "auml": '\U000000E4',
- "brvbar": '\U000000A6',
- "ccedil": '\U000000E7',
- "cedil": '\U000000B8',
- "cent": '\U000000A2',
- "copy": '\U000000A9',
- "curren": '\U000000A4',
- "deg": '\U000000B0',
- "divide": '\U000000F7',
- "eacute": '\U000000E9',
- "ecirc": '\U000000EA',
- "egrave": '\U000000E8',
- "eth": '\U000000F0',
- "euml": '\U000000EB',
- "frac12": '\U000000BD',
- "frac14": '\U000000BC',
- "frac34": '\U000000BE',
- "gt": '\U0000003E',
- "iacute": '\U000000ED',
- "icirc": '\U000000EE',
- "iexcl": '\U000000A1',
- "igrave": '\U000000EC',
- "iquest": '\U000000BF',
- "iuml": '\U000000EF',
- "laquo": '\U000000AB',
- "lt": '\U0000003C',
- "macr": '\U000000AF',
- "micro": '\U000000B5',
- "middot": '\U000000B7',
- "nbsp": '\U000000A0',
- "not": '\U000000AC',
- "ntilde": '\U000000F1',
- "oacute": '\U000000F3',
- "ocirc": '\U000000F4',
- "ograve": '\U000000F2',
- "ordf": '\U000000AA',
- "ordm": '\U000000BA',
- "oslash": '\U000000F8',
- "otilde": '\U000000F5',
- "ouml": '\U000000F6',
- "para": '\U000000B6',
- "plusmn": '\U000000B1',
- "pound": '\U000000A3',
- "quot": '\U00000022',
- "raquo": '\U000000BB',
- "reg": '\U000000AE',
- "sect": '\U000000A7',
- "shy": '\U000000AD',
- "sup1": '\U000000B9',
- "sup2": '\U000000B2',
- "sup3": '\U000000B3',
- "szlig": '\U000000DF',
- "thorn": '\U000000FE',
- "times": '\U000000D7',
- "uacute": '\U000000FA',
- "ucirc": '\U000000FB',
- "ugrave": '\U000000F9',
- "uml": '\U000000A8',
- "uuml": '\U000000FC',
- "yacute": '\U000000FD',
- "yen": '\U000000A5',
- "yuml": '\U000000FF',
-}
-
-// HTML entities that are two unicode codepoints.
-var entity2 = map[string][2]rune{
- // TODO(nigeltao): Handle replacements that are wider than their names.
- // "nLt;": {'\u226A', '\u20D2'},
- // "nGt;": {'\u226B', '\u20D2'},
- "NotEqualTilde;": {'\u2242', '\u0338'},
- "NotGreaterFullEqual;": {'\u2267', '\u0338'},
- "NotGreaterGreater;": {'\u226B', '\u0338'},
- "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
- "NotHumpDownHump;": {'\u224E', '\u0338'},
- "NotHumpEqual;": {'\u224F', '\u0338'},
- "NotLeftTriangleBar;": {'\u29CF', '\u0338'},
- "NotLessLess;": {'\u226A', '\u0338'},
- "NotLessSlantEqual;": {'\u2A7D', '\u0338'},
- "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
- "NotNestedLessLess;": {'\u2AA1', '\u0338'},
- "NotPrecedesEqual;": {'\u2AAF', '\u0338'},
- "NotRightTriangleBar;": {'\u29D0', '\u0338'},
- "NotSquareSubset;": {'\u228F', '\u0338'},
- "NotSquareSuperset;": {'\u2290', '\u0338'},
- "NotSubset;": {'\u2282', '\u20D2'},
- "NotSucceedsEqual;": {'\u2AB0', '\u0338'},
- "NotSucceedsTilde;": {'\u227F', '\u0338'},
- "NotSuperset;": {'\u2283', '\u20D2'},
- "ThickSpace;": {'\u205F', '\u200A'},
- "acE;": {'\u223E', '\u0333'},
- "bne;": {'\u003D', '\u20E5'},
- "bnequiv;": {'\u2261', '\u20E5'},
- "caps;": {'\u2229', '\uFE00'},
- "cups;": {'\u222A', '\uFE00'},
- "fjlig;": {'\u0066', '\u006A'},
- "gesl;": {'\u22DB', '\uFE00'},
- "gvertneqq;": {'\u2269', '\uFE00'},
- "gvnE;": {'\u2269', '\uFE00'},
- "lates;": {'\u2AAD', '\uFE00'},
- "lesg;": {'\u22DA', '\uFE00'},
- "lvertneqq;": {'\u2268', '\uFE00'},
- "lvnE;": {'\u2268', '\uFE00'},
- "nGg;": {'\u22D9', '\u0338'},
- "nGtv;": {'\u226B', '\u0338'},
- "nLl;": {'\u22D8', '\u0338'},
- "nLtv;": {'\u226A', '\u0338'},
- "nang;": {'\u2220', '\u20D2'},
- "napE;": {'\u2A70', '\u0338'},
- "napid;": {'\u224B', '\u0338'},
- "nbump;": {'\u224E', '\u0338'},
- "nbumpe;": {'\u224F', '\u0338'},
- "ncongdot;": {'\u2A6D', '\u0338'},
- "nedot;": {'\u2250', '\u0338'},
- "nesim;": {'\u2242', '\u0338'},
- "ngE;": {'\u2267', '\u0338'},
- "ngeqq;": {'\u2267', '\u0338'},
- "ngeqslant;": {'\u2A7E', '\u0338'},
- "nges;": {'\u2A7E', '\u0338'},
- "nlE;": {'\u2266', '\u0338'},
- "nleqq;": {'\u2266', '\u0338'},
- "nleqslant;": {'\u2A7D', '\u0338'},
- "nles;": {'\u2A7D', '\u0338'},
- "notinE;": {'\u22F9', '\u0338'},
- "notindot;": {'\u22F5', '\u0338'},
- "nparsl;": {'\u2AFD', '\u20E5'},
- "npart;": {'\u2202', '\u0338'},
- "npre;": {'\u2AAF', '\u0338'},
- "npreceq;": {'\u2AAF', '\u0338'},
- "nrarrc;": {'\u2933', '\u0338'},
- "nrarrw;": {'\u219D', '\u0338'},
- "nsce;": {'\u2AB0', '\u0338'},
- "nsubE;": {'\u2AC5', '\u0338'},
- "nsubset;": {'\u2282', '\u20D2'},
- "nsubseteqq;": {'\u2AC5', '\u0338'},
- "nsucceq;": {'\u2AB0', '\u0338'},
- "nsupE;": {'\u2AC6', '\u0338'},
- "nsupset;": {'\u2283', '\u20D2'},
- "nsupseteqq;": {'\u2AC6', '\u0338'},
- "nvap;": {'\u224D', '\u20D2'},
- "nvge;": {'\u2265', '\u20D2'},
- "nvgt;": {'\u003E', '\u20D2'},
- "nvle;": {'\u2264', '\u20D2'},
- "nvlt;": {'\u003C', '\u20D2'},
- "nvltrie;": {'\u22B4', '\u20D2'},
- "nvrtrie;": {'\u22B5', '\u20D2'},
- "nvsim;": {'\u223C', '\u20D2'},
- "race;": {'\u223D', '\u0331'},
- "smtes;": {'\u2AAC', '\uFE00'},
- "sqcaps;": {'\u2293', '\uFE00'},
- "sqcups;": {'\u2294', '\uFE00'},
- "varsubsetneq;": {'\u228A', '\uFE00'},
- "varsubsetneqq;": {'\u2ACB', '\uFE00'},
- "varsupsetneq;": {'\u228B', '\uFE00'},
- "varsupsetneqq;": {'\u2ACC', '\uFE00'},
- "vnsub;": {'\u2282', '\u20D2'},
- "vnsup;": {'\u2283', '\u20D2'},
- "vsubnE;": {'\u2ACB', '\uFE00'},
- "vsubne;": {'\u228A', '\uFE00'},
- "vsupnE;": {'\u2ACC', '\uFE00'},
- "vsupne;": {'\u228B', '\uFE00'},
-}
diff --git a/etcd/vendor/golang.org/x/net/html/escape.go b/etcd/vendor/golang.org/x/net/html/escape.go
deleted file mode 100644
index d856139620..0000000000
--- a/etcd/vendor/golang.org/x/net/html/escape.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "bytes"
- "strings"
- "unicode/utf8"
-)
-
-// These replacements permit compatibility with old numeric entities that
-// assumed Windows-1252 encoding.
-// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
-var replacementTable = [...]rune{
- '\u20AC', // First entry is what 0x80 should be replaced with.
- '\u0081',
- '\u201A',
- '\u0192',
- '\u201E',
- '\u2026',
- '\u2020',
- '\u2021',
- '\u02C6',
- '\u2030',
- '\u0160',
- '\u2039',
- '\u0152',
- '\u008D',
- '\u017D',
- '\u008F',
- '\u0090',
- '\u2018',
- '\u2019',
- '\u201C',
- '\u201D',
- '\u2022',
- '\u2013',
- '\u2014',
- '\u02DC',
- '\u2122',
- '\u0161',
- '\u203A',
- '\u0153',
- '\u009D',
- '\u017E',
- '\u0178', // Last entry is 0x9F.
- // 0x00->'\uFFFD' is handled programmatically.
- // 0x0D->'\u000D' is a no-op.
-}
-
-// unescapeEntity reads an entity like "<" from b[src:] and writes the
-// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
-// Precondition: b[src] == '&' && dst <= src.
-// attribute should be true if parsing an attribute value.
-func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
- // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
-
- // i starts at 1 because we already know that s[0] == '&'.
- i, s := 1, b[src:]
-
- if len(s) <= 1 {
- b[dst] = b[src]
- return dst + 1, src + 1
- }
-
- if s[i] == '#' {
- if len(s) <= 3 { // We need to have at least ".".
- b[dst] = b[src]
- return dst + 1, src + 1
- }
- i++
- c := s[i]
- hex := false
- if c == 'x' || c == 'X' {
- hex = true
- i++
- }
-
- x := '\x00'
- for i < len(s) {
- c = s[i]
- i++
- if hex {
- if '0' <= c && c <= '9' {
- x = 16*x + rune(c) - '0'
- continue
- } else if 'a' <= c && c <= 'f' {
- x = 16*x + rune(c) - 'a' + 10
- continue
- } else if 'A' <= c && c <= 'F' {
- x = 16*x + rune(c) - 'A' + 10
- continue
- }
- } else if '0' <= c && c <= '9' {
- x = 10*x + rune(c) - '0'
- continue
- }
- if c != ';' {
- i--
- }
- break
- }
-
- if i <= 3 { // No characters matched.
- b[dst] = b[src]
- return dst + 1, src + 1
- }
-
- if 0x80 <= x && x <= 0x9F {
- // Replace characters from Windows-1252 with UTF-8 equivalents.
- x = replacementTable[x-0x80]
- } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
- // Replace invalid characters with the replacement character.
- x = '\uFFFD'
- }
-
- return dst + utf8.EncodeRune(b[dst:], x), src + i
- }
-
- // Consume the maximum number of characters possible, with the
- // consumed characters matching one of the named references.
-
- for i < len(s) {
- c := s[i]
- i++
- // Lower-cased characters are more common in entities, so we check for them first.
- if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
- continue
- }
- if c != ';' {
- i--
- }
- break
- }
-
- entityName := string(s[1:i])
- if entityName == "" {
- // No-op.
- } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
- // No-op.
- } else if x := entity[entityName]; x != 0 {
- return dst + utf8.EncodeRune(b[dst:], x), src + i
- } else if x := entity2[entityName]; x[0] != 0 {
- dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
- return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
- } else if !attribute {
- maxLen := len(entityName) - 1
- if maxLen > longestEntityWithoutSemicolon {
- maxLen = longestEntityWithoutSemicolon
- }
- for j := maxLen; j > 1; j-- {
- if x := entity[entityName[:j]]; x != 0 {
- return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
- }
- }
- }
-
- dst1, src1 = dst+i, src+i
- copy(b[dst:dst1], b[src:src1])
- return dst1, src1
-}
-
-// unescape unescapes b's entities in-place, so that "a<b" becomes "a':
- esc = ">"
- case '"':
- // """ is shorter than """.
- esc = """
- case '\r':
- esc = "
"
- default:
- panic("unrecognized escape character")
- }
- s = s[i+1:]
- if _, err := w.WriteString(esc); err != nil {
- return err
- }
- i = strings.IndexAny(s, escapedChars)
- }
- _, err := w.WriteString(s)
- return err
-}
-
-// EscapeString escapes special characters like "<" to become "<". It
-// escapes only five such characters: <, >, &, ' and ".
-// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
-// always true.
-func EscapeString(s string) string {
- if strings.IndexAny(s, escapedChars) == -1 {
- return s
- }
- var buf bytes.Buffer
- escape(&buf, s)
- return buf.String()
-}
-
-// UnescapeString unescapes entities like "<" to become "<". It unescapes a
-// larger range of entities than EscapeString escapes. For example, "á"
-// unescapes to "á", as does "á" and "&xE1;".
-// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
-// always true.
-func UnescapeString(s string) string {
- for _, c := range s {
- if c == '&' {
- return string(unescape([]byte(s), false))
- }
- }
- return s
-}
diff --git a/etcd/vendor/golang.org/x/net/html/foreign.go b/etcd/vendor/golang.org/x/net/html/foreign.go
deleted file mode 100644
index 9da9e9dc42..0000000000
--- a/etcd/vendor/golang.org/x/net/html/foreign.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "strings"
-)
-
-func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
- for i := range aa {
- if newName, ok := nameMap[aa[i].Key]; ok {
- aa[i].Key = newName
- }
- }
-}
-
-func adjustForeignAttributes(aa []Attribute) {
- for i, a := range aa {
- if a.Key == "" || a.Key[0] != 'x' {
- continue
- }
- switch a.Key {
- case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
- "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
- j := strings.Index(a.Key, ":")
- aa[i].Namespace = a.Key[:j]
- aa[i].Key = a.Key[j+1:]
- }
- }
-}
-
-func htmlIntegrationPoint(n *Node) bool {
- if n.Type != ElementNode {
- return false
- }
- switch n.Namespace {
- case "math":
- if n.Data == "annotation-xml" {
- for _, a := range n.Attr {
- if a.Key == "encoding" {
- val := strings.ToLower(a.Val)
- if val == "text/html" || val == "application/xhtml+xml" {
- return true
- }
- }
- }
- }
- case "svg":
- switch n.Data {
- case "desc", "foreignObject", "title":
- return true
- }
- }
- return false
-}
-
-func mathMLTextIntegrationPoint(n *Node) bool {
- if n.Namespace != "math" {
- return false
- }
- switch n.Data {
- case "mi", "mo", "mn", "ms", "mtext":
- return true
- }
- return false
-}
-
-// Section 12.2.6.5.
-var breakout = map[string]bool{
- "b": true,
- "big": true,
- "blockquote": true,
- "body": true,
- "br": true,
- "center": true,
- "code": true,
- "dd": true,
- "div": true,
- "dl": true,
- "dt": true,
- "em": true,
- "embed": true,
- "h1": true,
- "h2": true,
- "h3": true,
- "h4": true,
- "h5": true,
- "h6": true,
- "head": true,
- "hr": true,
- "i": true,
- "img": true,
- "li": true,
- "listing": true,
- "menu": true,
- "meta": true,
- "nobr": true,
- "ol": true,
- "p": true,
- "pre": true,
- "ruby": true,
- "s": true,
- "small": true,
- "span": true,
- "strong": true,
- "strike": true,
- "sub": true,
- "sup": true,
- "table": true,
- "tt": true,
- "u": true,
- "ul": true,
- "var": true,
-}
-
-// Section 12.2.6.5.
-var svgTagNameAdjustments = map[string]string{
- "altglyph": "altGlyph",
- "altglyphdef": "altGlyphDef",
- "altglyphitem": "altGlyphItem",
- "animatecolor": "animateColor",
- "animatemotion": "animateMotion",
- "animatetransform": "animateTransform",
- "clippath": "clipPath",
- "feblend": "feBlend",
- "fecolormatrix": "feColorMatrix",
- "fecomponenttransfer": "feComponentTransfer",
- "fecomposite": "feComposite",
- "feconvolvematrix": "feConvolveMatrix",
- "fediffuselighting": "feDiffuseLighting",
- "fedisplacementmap": "feDisplacementMap",
- "fedistantlight": "feDistantLight",
- "feflood": "feFlood",
- "fefunca": "feFuncA",
- "fefuncb": "feFuncB",
- "fefuncg": "feFuncG",
- "fefuncr": "feFuncR",
- "fegaussianblur": "feGaussianBlur",
- "feimage": "feImage",
- "femerge": "feMerge",
- "femergenode": "feMergeNode",
- "femorphology": "feMorphology",
- "feoffset": "feOffset",
- "fepointlight": "fePointLight",
- "fespecularlighting": "feSpecularLighting",
- "fespotlight": "feSpotLight",
- "fetile": "feTile",
- "feturbulence": "feTurbulence",
- "foreignobject": "foreignObject",
- "glyphref": "glyphRef",
- "lineargradient": "linearGradient",
- "radialgradient": "radialGradient",
- "textpath": "textPath",
-}
-
-// Section 12.2.6.1
-var mathMLAttributeAdjustments = map[string]string{
- "definitionurl": "definitionURL",
-}
-
-var svgAttributeAdjustments = map[string]string{
- "attributename": "attributeName",
- "attributetype": "attributeType",
- "basefrequency": "baseFrequency",
- "baseprofile": "baseProfile",
- "calcmode": "calcMode",
- "clippathunits": "clipPathUnits",
- "diffuseconstant": "diffuseConstant",
- "edgemode": "edgeMode",
- "filterunits": "filterUnits",
- "glyphref": "glyphRef",
- "gradienttransform": "gradientTransform",
- "gradientunits": "gradientUnits",
- "kernelmatrix": "kernelMatrix",
- "kernelunitlength": "kernelUnitLength",
- "keypoints": "keyPoints",
- "keysplines": "keySplines",
- "keytimes": "keyTimes",
- "lengthadjust": "lengthAdjust",
- "limitingconeangle": "limitingConeAngle",
- "markerheight": "markerHeight",
- "markerunits": "markerUnits",
- "markerwidth": "markerWidth",
- "maskcontentunits": "maskContentUnits",
- "maskunits": "maskUnits",
- "numoctaves": "numOctaves",
- "pathlength": "pathLength",
- "patterncontentunits": "patternContentUnits",
- "patterntransform": "patternTransform",
- "patternunits": "patternUnits",
- "pointsatx": "pointsAtX",
- "pointsaty": "pointsAtY",
- "pointsatz": "pointsAtZ",
- "preservealpha": "preserveAlpha",
- "preserveaspectratio": "preserveAspectRatio",
- "primitiveunits": "primitiveUnits",
- "refx": "refX",
- "refy": "refY",
- "repeatcount": "repeatCount",
- "repeatdur": "repeatDur",
- "requiredextensions": "requiredExtensions",
- "requiredfeatures": "requiredFeatures",
- "specularconstant": "specularConstant",
- "specularexponent": "specularExponent",
- "spreadmethod": "spreadMethod",
- "startoffset": "startOffset",
- "stddeviation": "stdDeviation",
- "stitchtiles": "stitchTiles",
- "surfacescale": "surfaceScale",
- "systemlanguage": "systemLanguage",
- "tablevalues": "tableValues",
- "targetx": "targetX",
- "targety": "targetY",
- "textlength": "textLength",
- "viewbox": "viewBox",
- "viewtarget": "viewTarget",
- "xchannelselector": "xChannelSelector",
- "ychannelselector": "yChannelSelector",
- "zoomandpan": "zoomAndPan",
-}
diff --git a/etcd/vendor/golang.org/x/net/html/node.go b/etcd/vendor/golang.org/x/net/html/node.go
deleted file mode 100644
index 1350eef22c..0000000000
--- a/etcd/vendor/golang.org/x/net/html/node.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "golang.org/x/net/html/atom"
-)
-
-// A NodeType is the type of a Node.
-type NodeType uint32
-
-const (
- ErrorNode NodeType = iota
- TextNode
- DocumentNode
- ElementNode
- CommentNode
- DoctypeNode
- // RawNode nodes are not returned by the parser, but can be part of the
- // Node tree passed to func Render to insert raw HTML (without escaping).
- // If so, this package makes no guarantee that the rendered HTML is secure
- // (from e.g. Cross Site Scripting attacks) or well-formed.
- RawNode
- scopeMarkerNode
-)
-
-// Section 12.2.4.3 says "The markers are inserted when entering applet,
-// object, marquee, template, td, th, and caption elements, and are used
-// to prevent formatting from "leaking" into applet, object, marquee,
-// template, td, th, and caption elements".
-var scopeMarker = Node{Type: scopeMarkerNode}
-
-// A Node consists of a NodeType and some Data (tag name for element nodes,
-// content for text) and are part of a tree of Nodes. Element nodes may also
-// have a Namespace and contain a slice of Attributes. Data is unescaped, so
-// that it looks like "a 0 {
- return (*s)[i-1]
- }
- return nil
-}
-
-// index returns the index of the top-most occurrence of n in the stack, or -1
-// if n is not present.
-func (s *nodeStack) index(n *Node) int {
- for i := len(*s) - 1; i >= 0; i-- {
- if (*s)[i] == n {
- return i
- }
- }
- return -1
-}
-
-// contains returns whether a is within s.
-func (s *nodeStack) contains(a atom.Atom) bool {
- for _, n := range *s {
- if n.DataAtom == a && n.Namespace == "" {
- return true
- }
- }
- return false
-}
-
-// insert inserts a node at the given index.
-func (s *nodeStack) insert(i int, n *Node) {
- (*s) = append(*s, nil)
- copy((*s)[i+1:], (*s)[i:])
- (*s)[i] = n
-}
-
-// remove removes a node from the stack. It is a no-op if n is not present.
-func (s *nodeStack) remove(n *Node) {
- i := s.index(n)
- if i == -1 {
- return
- }
- copy((*s)[i:], (*s)[i+1:])
- j := len(*s) - 1
- (*s)[j] = nil
- *s = (*s)[:j]
-}
-
-type insertionModeStack []insertionMode
-
-func (s *insertionModeStack) pop() (im insertionMode) {
- i := len(*s)
- im = (*s)[i-1]
- *s = (*s)[:i-1]
- return im
-}
-
-func (s *insertionModeStack) top() insertionMode {
- if i := len(*s); i > 0 {
- return (*s)[i-1]
- }
- return nil
-}
diff --git a/etcd/vendor/golang.org/x/net/html/parse.go b/etcd/vendor/golang.org/x/net/html/parse.go
deleted file mode 100644
index 291c91908d..0000000000
--- a/etcd/vendor/golang.org/x/net/html/parse.go
+++ /dev/null
@@ -1,2460 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package html
-
-import (
- "errors"
- "fmt"
- "io"
- "strings"
-
- a "golang.org/x/net/html/atom"
-)
-
-// A parser implements the HTML5 parsing algorithm:
-// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction
-type parser struct {
- // tokenizer provides the tokens for the parser.
- tokenizer *Tokenizer
- // tok is the most recently read token.
- tok Token
- // Self-closing tags like are treated as start tags, except that
- // hasSelfClosingToken is set while they are being processed.
- hasSelfClosingToken bool
- // doc is the document root element.
- doc *Node
- // The stack of open elements (section 12.2.4.2) and active formatting
- // elements (section 12.2.4.3).
- oe, afe nodeStack
- // Element pointers (section 12.2.4.4).
- head, form *Node
- // Other parsing state flags (section 12.2.4.5).
- scripting, framesetOK bool
- // The stack of template insertion modes
- templateStack insertionModeStack
- // im is the current insertion mode.
- im insertionMode
- // originalIM is the insertion mode to go back to after completing a text
- // or inTableText insertion mode.
- originalIM insertionMode
- // fosterParenting is whether new elements should be inserted according to
- // the foster parenting rules (section 12.2.6.1).
- fosterParenting bool
- // quirks is whether the parser is operating in "quirks mode."
- quirks bool
- // fragment is whether the parser is parsing an HTML fragment.
- fragment bool
- // context is the context element when parsing an HTML fragment
- // (section 12.4).
- context *Node
-}
-
-func (p *parser) top() *Node {
- if n := p.oe.top(); n != nil {
- return n
- }
- return p.doc
-}
-
-// Stop tags for use in popUntil. These come from section 12.2.4.2.
-var (
- defaultScopeStopTags = map[string][]a.Atom{
- "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
- "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
- "svg": {a.Desc, a.ForeignObject, a.Title},
- }
-)
-
-type scope int
-
-const (
- defaultScope scope = iota
- listItemScope
- buttonScope
- tableScope
- tableRowScope
- tableBodyScope
- selectScope
-)
-
-// popUntil pops the stack of open elements at the highest element whose tag
-// is in matchTags, provided there is no higher element in the scope's stop
-// tags (as defined in section 12.2.4.2). It returns whether or not there was
-// such an element. If there was not, popUntil leaves the stack unchanged.
-//
-// For example, the set of stop tags for table scope is: "html", "table". If
-// the stack was:
-// ["html", "body", "font", "table", "b", "i", "u"]
-// then popUntil(tableScope, "font") would return false, but
-// popUntil(tableScope, "i") would return true and the stack would become:
-// ["html", "body", "font", "table", "b"]
-//
-// If an element's tag is in both the stop tags and matchTags, then the stack
-// will be popped and the function returns true (provided, of course, there was
-// no higher element in the stack that was also in the stop tags). For example,
-// popUntil(tableScope, "table") returns true and leaves:
-// ["html", "body", "font"]
-func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
- if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
- p.oe = p.oe[:i]
- return true
- }
- return false
-}
-
-// indexOfElementInScope returns the index in p.oe of the highest element whose
-// tag is in matchTags that is in scope. If no matching element is in scope, it
-// returns -1.
-func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
- for i := len(p.oe) - 1; i >= 0; i-- {
- tagAtom := p.oe[i].DataAtom
- if p.oe[i].Namespace == "" {
- for _, t := range matchTags {
- if t == tagAtom {
- return i
- }
- }
- switch s {
- case defaultScope:
- // No-op.
- case listItemScope:
- if tagAtom == a.Ol || tagAtom == a.Ul {
- return -1
- }
- case buttonScope:
- if tagAtom == a.Button {
- return -1
- }
- case tableScope:
- if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template {
- return -1
- }
- case selectScope:
- if tagAtom != a.Optgroup && tagAtom != a.Option {
- return -1
- }
- default:
- panic("unreachable")
- }
- }
- switch s {
- case defaultScope, listItemScope, buttonScope:
- for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
- if t == tagAtom {
- return -1
- }
- }
- }
- }
- return -1
-}
-
-// elementInScope is like popUntil, except that it doesn't modify the stack of
-// open elements.
-func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
- return p.indexOfElementInScope(s, matchTags...) != -1
-}
-
-// clearStackToContext pops elements off the stack of open elements until a
-// scope-defined element is found.
-func (p *parser) clearStackToContext(s scope) {
- for i := len(p.oe) - 1; i >= 0; i-- {
- tagAtom := p.oe[i].DataAtom
- switch s {
- case tableScope:
- if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template {
- p.oe = p.oe[:i+1]
- return
- }
- case tableRowScope:
- if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template {
- p.oe = p.oe[:i+1]
- return
- }
- case tableBodyScope:
- if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template {
- p.oe = p.oe[:i+1]
- return
- }
- default:
- panic("unreachable")
- }
- }
-}
-
-// parseGenericRawTextElements implements the generic raw text element parsing
-// algorithm defined in 12.2.6.2.
-// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text
-// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part
-// officially, need to make tokenizer consider both states.
-func (p *parser) parseGenericRawTextElement() {
- p.addElement()
- p.originalIM = p.im
- p.im = textIM
-}
-
-// generateImpliedEndTags pops nodes off the stack of open elements as long as
-// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc.
-// If exceptions are specified, nodes with that name will not be popped off.
-func (p *parser) generateImpliedEndTags(exceptions ...string) {
- var i int
-loop:
- for i = len(p.oe) - 1; i >= 0; i-- {
- n := p.oe[i]
- if n.Type != ElementNode {
- break
- }
- switch n.DataAtom {
- case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc:
- for _, except := range exceptions {
- if n.Data == except {
- break loop
- }
- }
- continue
- }
- break
- }
-
- p.oe = p.oe[:i+1]
-}
-
-// addChild adds a child node n to the top element, and pushes n onto the stack
-// of open elements if it is an element node.
-func (p *parser) addChild(n *Node) {
- if p.shouldFosterParent() {
- p.fosterParent(n)
- } else {
- p.top().AppendChild(n)
- }
-
- if n.Type == ElementNode {
- p.oe = append(p.oe, n)
- }
-}
-
-// shouldFosterParent returns whether the next node to be added should be
-// foster parented.
-func (p *parser) shouldFosterParent() bool {
- if p.fosterParenting {
- switch p.top().DataAtom {
- case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
- return true
- }
- }
- return false
-}
-
-// fosterParent adds a child node according to the foster parenting rules.
-// Section 12.2.6.1, "foster parenting".
-func (p *parser) fosterParent(n *Node) {
- var table, parent, prev, template *Node
- var i int
- for i = len(p.oe) - 1; i >= 0; i-- {
- if p.oe[i].DataAtom == a.Table {
- table = p.oe[i]
- break
- }
- }
-
- var j int
- for j = len(p.oe) - 1; j >= 0; j-- {
- if p.oe[j].DataAtom == a.Template {
- template = p.oe[j]
- break
- }
- }
-
- if template != nil && (table == nil || j > i) {
- template.AppendChild(n)
- return
- }
-
- if table == nil {
- // The foster parent is the html element.
- parent = p.oe[0]
- } else {
- parent = table.Parent
- }
- if parent == nil {
- parent = p.oe[i-1]
- }
-
- if table != nil {
- prev = table.PrevSibling
- } else {
- prev = parent.LastChild
- }
- if prev != nil && prev.Type == TextNode && n.Type == TextNode {
- prev.Data += n.Data
- return
- }
-
- parent.InsertBefore(n, table)
-}
-
-// addText adds text to the preceding node if it is a text node, or else it
-// calls addChild with a new text node.
-func (p *parser) addText(text string) {
- if text == "" {
- return
- }
-
- if p.shouldFosterParent() {
- p.fosterParent(&Node{
- Type: TextNode,
- Data: text,
- })
- return
- }
-
- t := p.top()
- if n := t.LastChild; n != nil && n.Type == TextNode {
- n.Data += text
- return
- }
- p.addChild(&Node{
- Type: TextNode,
- Data: text,
- })
-}
-
-// addElement adds a child element based on the current token.
-func (p *parser) addElement() {
- p.addChild(&Node{
- Type: ElementNode,
- DataAtom: p.tok.DataAtom,
- Data: p.tok.Data,
- Attr: p.tok.Attr,
- })
-}
-
-// Section 12.2.4.3.
-func (p *parser) addFormattingElement() {
- tagAtom, attr := p.tok.DataAtom, p.tok.Attr
- p.addElement()
-
- // Implement the Noah's Ark clause, but with three per family instead of two.
- identicalElements := 0
-findIdenticalElements:
- for i := len(p.afe) - 1; i >= 0; i-- {
- n := p.afe[i]
- if n.Type == scopeMarkerNode {
- break
- }
- if n.Type != ElementNode {
- continue
- }
- if n.Namespace != "" {
- continue
- }
- if n.DataAtom != tagAtom {
- continue
- }
- if len(n.Attr) != len(attr) {
- continue
- }
- compareAttributes:
- for _, t0 := range n.Attr {
- for _, t1 := range attr {
- if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
- // Found a match for this attribute, continue with the next attribute.
- continue compareAttributes
- }
- }
- // If we get here, there is no attribute that matches a.
- // Therefore the element is not identical to the new one.
- continue findIdenticalElements
- }
-
- identicalElements++
- if identicalElements >= 3 {
- p.afe.remove(n)
- }
- }
-
- p.afe = append(p.afe, p.top())
-}
-
-// Section 12.2.4.3.
-func (p *parser) clearActiveFormattingElements() {
- for {
- if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode {
- return
- }
- }
-}
-
-// Section 12.2.4.3.
-func (p *parser) reconstructActiveFormattingElements() {
- n := p.afe.top()
- if n == nil {
- return
- }
- if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
- return
- }
- i := len(p.afe) - 1
- for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
- if i == 0 {
- i = -1
- break
- }
- i--
- n = p.afe[i]
- }
- for {
- i++
- clone := p.afe[i].clone()
- p.addChild(clone)
- p.afe[i] = clone
- if i == len(p.afe)-1 {
- break
- }
- }
-}
-
-// Section 12.2.5.
-func (p *parser) acknowledgeSelfClosingTag() {
- p.hasSelfClosingToken = false
-}
-
-// An insertion mode (section 12.2.4.1) is the state transition function from
-// a particular state in the HTML5 parser's state machine. It updates the
-// parser's fields depending on parser.tok (where ErrorToken means EOF).
-// It returns whether the token was consumed.
-type insertionMode func(*parser) bool
-
-// setOriginalIM sets the insertion mode to return to after completing a text or
-// inTableText insertion mode.
-// Section 12.2.4.1, "using the rules for".
-func (p *parser) setOriginalIM() {
- if p.originalIM != nil {
- panic("html: bad parser state: originalIM was set twice")
- }
- p.originalIM = p.im
-}
-
-// Section 12.2.4.1, "reset the insertion mode".
-func (p *parser) resetInsertionMode() {
- for i := len(p.oe) - 1; i >= 0; i-- {
- n := p.oe[i]
- last := i == 0
- if last && p.context != nil {
- n = p.context
- }
-
- switch n.DataAtom {
- case a.Select:
- if !last {
- for ancestor, first := n, p.oe[0]; ancestor != first; {
- ancestor = p.oe[p.oe.index(ancestor)-1]
- switch ancestor.DataAtom {
- case a.Template:
- p.im = inSelectIM
- return
- case a.Table:
- p.im = inSelectInTableIM
- return
- }
- }
- }
- p.im = inSelectIM
- case a.Td, a.Th:
- // TODO: remove this divergence from the HTML5 spec.
- //
- // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
- p.im = inCellIM
- case a.Tr:
- p.im = inRowIM
- case a.Tbody, a.Thead, a.Tfoot:
- p.im = inTableBodyIM
- case a.Caption:
- p.im = inCaptionIM
- case a.Colgroup:
- p.im = inColumnGroupIM
- case a.Table:
- p.im = inTableIM
- case a.Template:
- // TODO: remove this divergence from the HTML5 spec.
- if n.Namespace != "" {
- continue
- }
- p.im = p.templateStack.top()
- case a.Head:
- // TODO: remove this divergence from the HTML5 spec.
- //
- // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
- p.im = inHeadIM
- case a.Body:
- p.im = inBodyIM
- case a.Frameset:
- p.im = inFramesetIM
- case a.Html:
- if p.head == nil {
- p.im = beforeHeadIM
- } else {
- p.im = afterHeadIM
- }
- default:
- if last {
- p.im = inBodyIM
- return
- }
- continue
- }
- return
- }
-}
-
-const whitespace = " \t\r\n\f"
-
-// Section 12.2.6.4.1.
-func initialIM(p *parser) bool {
- switch p.tok.Type {
- case TextToken:
- p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
- if len(p.tok.Data) == 0 {
- // It was all whitespace, so ignore it.
- return true
- }
- case CommentToken:
- p.doc.AppendChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- return true
- case DoctypeToken:
- n, quirks := parseDoctype(p.tok.Data)
- p.doc.AppendChild(n)
- p.quirks = quirks
- p.im = beforeHTMLIM
- return true
- }
- p.quirks = true
- p.im = beforeHTMLIM
- return false
-}
-
-// Section 12.2.6.4.2.
-func beforeHTMLIM(p *parser) bool {
- switch p.tok.Type {
- case DoctypeToken:
- // Ignore the token.
- return true
- case TextToken:
- p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
- if len(p.tok.Data) == 0 {
- // It was all whitespace, so ignore it.
- return true
- }
- case StartTagToken:
- if p.tok.DataAtom == a.Html {
- p.addElement()
- p.im = beforeHeadIM
- return true
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Head, a.Body, a.Html, a.Br:
- p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
- return false
- default:
- // Ignore the token.
- return true
- }
- case CommentToken:
- p.doc.AppendChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- return true
- }
- p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
- return false
-}
-
-// Section 12.2.6.4.3.
-func beforeHeadIM(p *parser) bool {
- switch p.tok.Type {
- case TextToken:
- p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
- if len(p.tok.Data) == 0 {
- // It was all whitespace, so ignore it.
- return true
- }
- case StartTagToken:
- switch p.tok.DataAtom {
- case a.Head:
- p.addElement()
- p.head = p.top()
- p.im = inHeadIM
- return true
- case a.Html:
- return inBodyIM(p)
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Head, a.Body, a.Html, a.Br:
- p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
- return false
- default:
- // Ignore the token.
- return true
- }
- case CommentToken:
- p.addChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- return true
- case DoctypeToken:
- // Ignore the token.
- return true
- }
-
- p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
- return false
-}
-
-// Section 12.2.6.4.4.
-func inHeadIM(p *parser) bool {
- switch p.tok.Type {
- case TextToken:
- s := strings.TrimLeft(p.tok.Data, whitespace)
- if len(s) < len(p.tok.Data) {
- // Add the initial whitespace to the current node.
- p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
- if s == "" {
- return true
- }
- p.tok.Data = s
- }
- case StartTagToken:
- switch p.tok.DataAtom {
- case a.Html:
- return inBodyIM(p)
- case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta:
- p.addElement()
- p.oe.pop()
- p.acknowledgeSelfClosingTag()
- return true
- case a.Noscript:
- if p.scripting {
- p.parseGenericRawTextElement()
- return true
- }
- p.addElement()
- p.im = inHeadNoscriptIM
- // Don't let the tokenizer go into raw text mode when scripting is disabled.
- p.tokenizer.NextIsNotRawText()
- return true
- case a.Script, a.Title:
- p.addElement()
- p.setOriginalIM()
- p.im = textIM
- return true
- case a.Noframes, a.Style:
- p.parseGenericRawTextElement()
- return true
- case a.Head:
- // Ignore the token.
- return true
- case a.Template:
- // TODO: remove this divergence from the HTML5 spec.
- //
- // We don't handle all of the corner cases when mixing foreign
- // content (i.e. or ) with . Without this
- // early return, we can get into an infinite loop, possibly because
- // of the "TODO... further divergence" a little below.
- //
- // As a workaround, if we are mixing foreign content and templates,
- // just ignore the rest of the HTML. Foreign content is rare and a
- // relatively old HTML feature. Templates are also rare and a
- // relatively new HTML feature. Their combination is very rare.
- for _, e := range p.oe {
- if e.Namespace != "" {
- p.im = ignoreTheRemainingTokens
- return true
- }
- }
-
- p.addElement()
- p.afe = append(p.afe, &scopeMarker)
- p.framesetOK = false
- p.im = inTemplateIM
- p.templateStack = append(p.templateStack, inTemplateIM)
- return true
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Head:
- p.oe.pop()
- p.im = afterHeadIM
- return true
- case a.Body, a.Html, a.Br:
- p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
- return false
- case a.Template:
- if !p.oe.contains(a.Template) {
- return true
- }
- // TODO: remove this further divergence from the HTML5 spec.
- //
- // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
- p.generateImpliedEndTags()
- for i := len(p.oe) - 1; i >= 0; i-- {
- if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
- p.oe = p.oe[:i]
- break
- }
- }
- p.clearActiveFormattingElements()
- p.templateStack.pop()
- p.resetInsertionMode()
- return true
- default:
- // Ignore the token.
- return true
- }
- case CommentToken:
- p.addChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- return true
- case DoctypeToken:
- // Ignore the token.
- return true
- }
-
- p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
- return false
-}
-
-// Section 12.2.6.4.5.
-func inHeadNoscriptIM(p *parser) bool {
- switch p.tok.Type {
- case DoctypeToken:
- // Ignore the token.
- return true
- case StartTagToken:
- switch p.tok.DataAtom {
- case a.Html:
- return inBodyIM(p)
- case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style:
- return inHeadIM(p)
- case a.Head:
- // Ignore the token.
- return true
- case a.Noscript:
- // Don't let the tokenizer go into raw text mode even when a
- // tag is in "in head noscript" insertion mode.
- p.tokenizer.NextIsNotRawText()
- // Ignore the token.
- return true
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Noscript, a.Br:
- default:
- // Ignore the token.
- return true
- }
- case TextToken:
- s := strings.TrimLeft(p.tok.Data, whitespace)
- if len(s) == 0 {
- // It was all whitespace.
- return inHeadIM(p)
- }
- case CommentToken:
- return inHeadIM(p)
- }
- p.oe.pop()
- if p.top().DataAtom != a.Head {
- panic("html: the new current node will be a head element.")
- }
- p.im = inHeadIM
- if p.tok.DataAtom == a.Noscript {
- return true
- }
- return false
-}
-
-// Section 12.2.6.4.6.
-func afterHeadIM(p *parser) bool {
- switch p.tok.Type {
- case TextToken:
- s := strings.TrimLeft(p.tok.Data, whitespace)
- if len(s) < len(p.tok.Data) {
- // Add the initial whitespace to the current node.
- p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
- if s == "" {
- return true
- }
- p.tok.Data = s
- }
- case StartTagToken:
- switch p.tok.DataAtom {
- case a.Html:
- return inBodyIM(p)
- case a.Body:
- p.addElement()
- p.framesetOK = false
- p.im = inBodyIM
- return true
- case a.Frameset:
- p.addElement()
- p.im = inFramesetIM
- return true
- case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
- p.oe = append(p.oe, p.head)
- defer p.oe.remove(p.head)
- return inHeadIM(p)
- case a.Head:
- // Ignore the token.
- return true
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Body, a.Html, a.Br:
- // Drop down to creating an implied tag.
- case a.Template:
- return inHeadIM(p)
- default:
- // Ignore the token.
- return true
- }
- case CommentToken:
- p.addChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- return true
- case DoctypeToken:
- // Ignore the token.
- return true
- }
-
- p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
- p.framesetOK = true
- return false
-}
-
-// copyAttributes copies attributes of src not found on dst to dst.
-func copyAttributes(dst *Node, src Token) {
- if len(src.Attr) == 0 {
- return
- }
- attr := map[string]string{}
- for _, t := range dst.Attr {
- attr[t.Key] = t.Val
- }
- for _, t := range src.Attr {
- if _, ok := attr[t.Key]; !ok {
- dst.Attr = append(dst.Attr, t)
- attr[t.Key] = t.Val
- }
- }
-}
-
-// Section 12.2.6.4.7.
-func inBodyIM(p *parser) bool {
- switch p.tok.Type {
- case TextToken:
- d := p.tok.Data
- switch n := p.oe.top(); n.DataAtom {
- case a.Pre, a.Listing:
- if n.FirstChild == nil {
- // Ignore a newline at the start of a block.
- if d != "" && d[0] == '\r' {
- d = d[1:]
- }
- if d != "" && d[0] == '\n' {
- d = d[1:]
- }
- }
- }
- d = strings.Replace(d, "\x00", "", -1)
- if d == "" {
- return true
- }
- p.reconstructActiveFormattingElements()
- p.addText(d)
- if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
- // There were non-whitespace characters inserted.
- p.framesetOK = false
- }
- case StartTagToken:
- switch p.tok.DataAtom {
- case a.Html:
- if p.oe.contains(a.Template) {
- return true
- }
- copyAttributes(p.oe[0], p.tok)
- case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
- return inHeadIM(p)
- case a.Body:
- if p.oe.contains(a.Template) {
- return true
- }
- if len(p.oe) >= 2 {
- body := p.oe[1]
- if body.Type == ElementNode && body.DataAtom == a.Body {
- p.framesetOK = false
- copyAttributes(body, p.tok)
- }
- }
- case a.Frameset:
- if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
- // Ignore the token.
- return true
- }
- body := p.oe[1]
- if body.Parent != nil {
- body.Parent.RemoveChild(body)
- }
- p.oe = p.oe[:1]
- p.addElement()
- p.im = inFramesetIM
- return true
- case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
- p.popUntil(buttonScope, a.P)
- p.addElement()
- case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
- p.popUntil(buttonScope, a.P)
- switch n := p.top(); n.DataAtom {
- case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
- p.oe.pop()
- }
- p.addElement()
- case a.Pre, a.Listing:
- p.popUntil(buttonScope, a.P)
- p.addElement()
- // The newline, if any, will be dealt with by the TextToken case.
- p.framesetOK = false
- case a.Form:
- if p.form != nil && !p.oe.contains(a.Template) {
- // Ignore the token
- return true
- }
- p.popUntil(buttonScope, a.P)
- p.addElement()
- if !p.oe.contains(a.Template) {
- p.form = p.top()
- }
- case a.Li:
- p.framesetOK = false
- for i := len(p.oe) - 1; i >= 0; i-- {
- node := p.oe[i]
- switch node.DataAtom {
- case a.Li:
- p.oe = p.oe[:i]
- case a.Address, a.Div, a.P:
- continue
- default:
- if !isSpecialElement(node) {
- continue
- }
- }
- break
- }
- p.popUntil(buttonScope, a.P)
- p.addElement()
- case a.Dd, a.Dt:
- p.framesetOK = false
- for i := len(p.oe) - 1; i >= 0; i-- {
- node := p.oe[i]
- switch node.DataAtom {
- case a.Dd, a.Dt:
- p.oe = p.oe[:i]
- case a.Address, a.Div, a.P:
- continue
- default:
- if !isSpecialElement(node) {
- continue
- }
- }
- break
- }
- p.popUntil(buttonScope, a.P)
- p.addElement()
- case a.Plaintext:
- p.popUntil(buttonScope, a.P)
- p.addElement()
- case a.Button:
- p.popUntil(defaultScope, a.Button)
- p.reconstructActiveFormattingElements()
- p.addElement()
- p.framesetOK = false
- case a.A:
- for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
- if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
- p.inBodyEndTagFormatting(a.A, "a")
- p.oe.remove(n)
- p.afe.remove(n)
- break
- }
- }
- p.reconstructActiveFormattingElements()
- p.addFormattingElement()
- case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
- p.reconstructActiveFormattingElements()
- p.addFormattingElement()
- case a.Nobr:
- p.reconstructActiveFormattingElements()
- if p.elementInScope(defaultScope, a.Nobr) {
- p.inBodyEndTagFormatting(a.Nobr, "nobr")
- p.reconstructActiveFormattingElements()
- }
- p.addFormattingElement()
- case a.Applet, a.Marquee, a.Object:
- p.reconstructActiveFormattingElements()
- p.addElement()
- p.afe = append(p.afe, &scopeMarker)
- p.framesetOK = false
- case a.Table:
- if !p.quirks {
- p.popUntil(buttonScope, a.P)
- }
- p.addElement()
- p.framesetOK = false
- p.im = inTableIM
- return true
- case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
- p.reconstructActiveFormattingElements()
- p.addElement()
- p.oe.pop()
- p.acknowledgeSelfClosingTag()
- if p.tok.DataAtom == a.Input {
- for _, t := range p.tok.Attr {
- if t.Key == "type" {
- if strings.ToLower(t.Val) == "hidden" {
- // Skip setting framesetOK = false
- return true
- }
- }
- }
- }
- p.framesetOK = false
- case a.Param, a.Source, a.Track:
- p.addElement()
- p.oe.pop()
- p.acknowledgeSelfClosingTag()
- case a.Hr:
- p.popUntil(buttonScope, a.P)
- p.addElement()
- p.oe.pop()
- p.acknowledgeSelfClosingTag()
- p.framesetOK = false
- case a.Image:
- p.tok.DataAtom = a.Img
- p.tok.Data = a.Img.String()
- return false
- case a.Textarea:
- p.addElement()
- p.setOriginalIM()
- p.framesetOK = false
- p.im = textIM
- case a.Xmp:
- p.popUntil(buttonScope, a.P)
- p.reconstructActiveFormattingElements()
- p.framesetOK = false
- p.parseGenericRawTextElement()
- case a.Iframe:
- p.framesetOK = false
- p.parseGenericRawTextElement()
- case a.Noembed:
- p.parseGenericRawTextElement()
- case a.Noscript:
- if p.scripting {
- p.parseGenericRawTextElement()
- return true
- }
- p.reconstructActiveFormattingElements()
- p.addElement()
- // Don't let the tokenizer go into raw text mode when scripting is disabled.
- p.tokenizer.NextIsNotRawText()
- case a.Select:
- p.reconstructActiveFormattingElements()
- p.addElement()
- p.framesetOK = false
- p.im = inSelectIM
- return true
- case a.Optgroup, a.Option:
- if p.top().DataAtom == a.Option {
- p.oe.pop()
- }
- p.reconstructActiveFormattingElements()
- p.addElement()
- case a.Rb, a.Rtc:
- if p.elementInScope(defaultScope, a.Ruby) {
- p.generateImpliedEndTags()
- }
- p.addElement()
- case a.Rp, a.Rt:
- if p.elementInScope(defaultScope, a.Ruby) {
- p.generateImpliedEndTags("rtc")
- }
- p.addElement()
- case a.Math, a.Svg:
- p.reconstructActiveFormattingElements()
- if p.tok.DataAtom == a.Math {
- adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
- } else {
- adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
- }
- adjustForeignAttributes(p.tok.Attr)
- p.addElement()
- p.top().Namespace = p.tok.Data
- if p.hasSelfClosingToken {
- p.oe.pop()
- p.acknowledgeSelfClosingTag()
- }
- return true
- case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
- // Ignore the token.
- default:
- p.reconstructActiveFormattingElements()
- p.addElement()
- }
- case EndTagToken:
- switch p.tok.DataAtom {
- case a.Body:
- if p.elementInScope(defaultScope, a.Body) {
- p.im = afterBodyIM
- }
- case a.Html:
- if p.elementInScope(defaultScope, a.Body) {
- p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
- return false
- }
- return true
- case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
- p.popUntil(defaultScope, p.tok.DataAtom)
- case a.Form:
- if p.oe.contains(a.Template) {
- i := p.indexOfElementInScope(defaultScope, a.Form)
- if i == -1 {
- // Ignore the token.
- return true
- }
- p.generateImpliedEndTags()
- if p.oe[i].DataAtom != a.Form {
- // Ignore the token.
- return true
- }
- p.popUntil(defaultScope, a.Form)
- } else {
- node := p.form
- p.form = nil
- i := p.indexOfElementInScope(defaultScope, a.Form)
- if node == nil || i == -1 || p.oe[i] != node {
- // Ignore the token.
- return true
- }
- p.generateImpliedEndTags()
- p.oe.remove(node)
- }
- case a.P:
- if !p.elementInScope(buttonScope, a.P) {
- p.parseImpliedToken(StartTagToken, a.P, a.P.String())
- }
- p.popUntil(buttonScope, a.P)
- case a.Li:
- p.popUntil(listItemScope, a.Li)
- case a.Dd, a.Dt:
- p.popUntil(defaultScope, p.tok.DataAtom)
- case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
- p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
- case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
- p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
- case a.Applet, a.Marquee, a.Object:
- if p.popUntil(defaultScope, p.tok.DataAtom) {
- p.clearActiveFormattingElements()
- }
- case a.Br:
- p.tok.Type = StartTagToken
- return false
- case a.Template:
- return inHeadIM(p)
- default:
- p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
- }
- case CommentToken:
- p.addChild(&Node{
- Type: CommentNode,
- Data: p.tok.Data,
- })
- case ErrorToken:
- // TODO: remove this divergence from the HTML5 spec.
- if len(p.templateStack) > 0 {
- p.im = inTemplateIM
- return false
- }
- for _, e := range p.oe {
- switch e.DataAtom {
- case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
- a.Thead, a.Tr, a.Body, a.Html:
- default:
- return true
- }
- }
- }
-
- return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
- // This is the "adoption agency" algorithm, described at
- // https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
- // TODO: this is a fairly literal line-by-line translation of that algorithm.
- // Once the code successfully parses the comprehensive test suite, we should
- // refactor this code to be more idiomatic.
-
- // Steps 1-2
- if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 {
- p.oe.pop()
- return
- }
-
- // Steps 3-5. The outer loop.
- for i := 0; i < 8; i++ {
- // Step 6. Find the formatting element.
- var formattingElement *Node
- for j := len(p.afe) - 1; j >= 0; j-- {
- if p.afe[j].Type == scopeMarkerNode {
- break
- }
- if p.afe[j].DataAtom == tagAtom {
- formattingElement = p.afe[j]
- break
- }
- }
- if formattingElement == nil {
- p.inBodyEndTagOther(tagAtom, tagName)
- return
- }
-
- // Step 7. Ignore the tag if formatting element is not in the stack of open elements.
- feIndex := p.oe.index(formattingElement)
- if feIndex == -1 {
- p.afe.remove(formattingElement)
- return
- }
- // Step 8. Ignore the tag if formatting element is not in the scope.
- if !p.elementInScope(defaultScope, tagAtom) {
- // Ignore the tag.
- return
- }
-
- // Step 9. This step is omitted because it's just a parse error but no need to return.
-
- // Steps 10-11. Find the furthest block.
- var furthestBlock *Node
- for _, e := range p.oe[feIndex:] {
- if isSpecialElement(e) {
- furthestBlock = e
- break
- }
- }
- if furthestBlock == nil {
- e := p.oe.pop()
- for e != formattingElement {
- e = p.oe.pop()
- }
- p.afe.remove(e)
- return
- }
-
- // Steps 12-13. Find the common ancestor and bookmark node.
- commonAncestor := p.oe[feIndex-1]
- bookmark := p.afe.index(formattingElement)
-
- // Step 14. The inner loop. Find the lastNode to reparent.
- lastNode := furthestBlock
- node := furthestBlock
- x := p.oe.index(node)
- // Step 14.1.
- j := 0
- for {
- // Step 14.2.
- j++
- // Step. 14.3.
- x--
- node = p.oe[x]
- // Step 14.4. Go to the next step if node is formatting element.
- if node == formattingElement {
- break
- }
- // Step 14.5. Remove node from the list of active formatting elements if
- // inner loop counter is greater than three and node is in the list of
- // active formatting elements.
- if ni := p.afe.index(node); j > 3 && ni > -1 {
- p.afe.remove(node)
- // If any element of the list of active formatting elements is removed,
- // we need to take care whether bookmark should be decremented or not.
- // This is because the value of bookmark may exceed the size of the
- // list by removing elements from the list.
- if ni <= bookmark {
- bookmark--
- }
- continue
- }
- // Step 14.6. Continue the next inner loop if node is not in the list of
- // active formatting elements.
- if p.afe.index(node) == -1 {
- p.oe.remove(node)
- continue
- }
- // Step 14.7.
- clone := node.clone()
- p.afe[p.afe.index(node)] = clone
- p.oe[p.oe.index(node)] = clone
- node = clone
- // Step 14.8.
- if lastNode == furthestBlock {
- bookmark = p.afe.index(node) + 1
- }
- // Step 14.9.
- if lastNode.Parent != nil {
- lastNode.Parent.RemoveChild(lastNode)
- }
- node.AppendChild(lastNode)
- // Step 14.10.
- lastNode = node
- }
-
- // Step 15. Reparent lastNode to the common ancestor,
- // or for misnested table nodes, to the foster parent.
- if lastNode.Parent != nil {
- lastNode.Parent.RemoveChild(lastNode)
- }
- switch commonAncestor.DataAtom {
- case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
- p.fosterParent(lastNode)
- default:
- commonAncestor.AppendChild(lastNode)
- }
-
- // Steps 16-18. Reparent nodes from the furthest block's children
- // to a clone of the formatting element.
- clone := formattingElement.clone()
- reparentChildren(clone, furthestBlock)
- furthestBlock.AppendChild(clone)
-
- // Step 19. Fix up the list of active formatting elements.
- if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
- // Move the bookmark with the rest of the list.
- bookmark--
- }
- p.afe.remove(formattingElement)
- p.afe.insert(bookmark, clone)
-
- // Step 20. Fix up the stack of open elements.
- p.oe.remove(formattingElement)
- p.oe.insert(p.oe.index(furthestBlock)+1, clone)
- }
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
- for i := len(p.oe) - 1; i >= 0; i-- {
- // Two element nodes have the same tag if they have the same Data (a
- // string-typed field). As an optimization, for common HTML tags, each
- // Data string is assigned a unique, non-zero DataAtom (a uint32-typed
- // field), since integer comparison is faster than string comparison.
- // Uncommon (custom) tags get a zero DataAtom.
- //
- // The if condition here is equivalent to (p.oe[i].Data == tagName).
- if (p.oe[i].DataAtom == tagAtom) &&
- ((tagAtom != 0) || (p.oe[i].Data == tagName)) {
- p.oe = p.oe[:i]
- break
- }
- if isSpecialElement(p.oe[i]) {
- break
- }
- }
-}
-
-// Section 12.2.6.4.8.
-func textIM(p *parser) bool {
- switch p.tok.Type {
- case ErrorToken:
- p.oe.pop()
- case TextToken:
- d := p.tok.Data
- if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
- // Ignore a newline at the start of a