diff --git a/go.mod b/go.mod index be3d3aae61..8785fded17 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Netflix/go-expect v0.0.0-20210722184520-ef0bf57d82b3 github.com/alecthomas/jsonschema v0.0.0-20210526225647-edb03dcab7bc github.com/buildpacks/pack v0.24.0 - github.com/cloudevents/sdk-go/v2 v2.8.0 + github.com/cloudevents/sdk-go/v2 v2.10.1 github.com/containerd/containerd v1.6.6 github.com/containers/image/v5 v5.19.1 github.com/coreos/go-semver v0.3.0 @@ -41,15 +41,15 @@ require ( golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.1.0 - k8s.io/api v0.23.5 - k8s.io/apimachinery v0.23.5 + k8s.io/api v0.23.9 + k8s.io/apimachinery v0.23.9 k8s.io/client-go v1.5.2 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - knative.dev/client v0.31.1 - knative.dev/eventing v0.31.3-0.20220802083815-e345f5f3695d - knative.dev/hack v0.0.0-20220629135029-9e09abcd61f0 - knative.dev/pkg v0.0.0-20220412134708-e325df66cb51 - knative.dev/serving v0.31.1-0.20220630164831-69a88e92b069 + knative.dev/client v0.34.0 + knative.dev/eventing v0.34.2 + knative.dev/hack v0.0.0-20220823140917-8d1e4ccf9dc3 + knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 + knative.dev/serving v0.34.1 ) require ( @@ -188,12 +188,12 @@ require ( go.opencensus.io v0.23.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.20.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect @@ -209,12 +209,12 @@ require ( gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect gopkg.in/src-d/go-git.v4 v4.13.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiextensions-apiserver v0.23.4 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.23.9 // indirect k8s.io/cli-runtime v0.23.4 // indirect - k8s.io/klog/v2 v2.60.1-0.20220317184644-43cc75f9ae89 // indirect + k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect - knative.dev/networking v0.0.0-20220412163509-1145ec58c8be // indirect + knative.dev/networking v0.0.0-20220818010248-e51df7cdf571 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect diff --git a/go.sum b/go.sum index ac5822b20d..f59cfe0fb4 100644 --- a/go.sum +++ b/go.sum @@ -594,8 +594,9 @@ github.com/cloudevents/sdk-go/sql/v2 v2.8.0 h1:gWednxJHL0Ycf93XeEFyQxYj81A7b4eNw github.com/cloudevents/sdk-go/sql/v2 v2.8.0/go.mod h1:u9acNJbhmi1wnDJro4PEAqbr4N1LTCyEUClErxbPS1A= github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY= github.com/cloudevents/sdk-go/v2 v2.5.0/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U= -github.com/cloudevents/sdk-go/v2 v2.8.0 h1:kmRaLbsafZmidZ0rZ6h7WOMqCkRMcVTLV5lxV/HKQ9Y= github.com/cloudevents/sdk-go/v2 v2.8.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs= +github.com/cloudevents/sdk-go/v2 v2.10.1 h1:qNFovJ18fWOd8Q9ydWJPk1oiFudXyv1GxJIP7MwPjuM= +github.com/cloudevents/sdk-go/v2 v2.10.1/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -2780,8 +2781,9 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -2791,8 +2793,9 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.20.0 h1:N4oPlghZwYG55MlU6LXk/Zp00FVNE9X9wrYO8CEs4lc= go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= goa.design/goa/v3 v3.0.9/go.mod h1:oJR8VOFa4HV7wCQv4XhPtvyknz0B3VFFRUWDdEmI0FI= goa.design/goa/v3 v3.1.3/go.mod h1:GEog3KvHosQPKrrZSlpXDSnm7PpmzZEiy3mLxI/FtXM= @@ -3059,8 +3062,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -3740,8 +3744,9 @@ gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw= gorm.io/driver/postgres v1.0.0/go.mod h1:wtMFcOzmuA5QigNsgEIb7O5lhvH1tHAF1RbWmLWV4to= gorm.io/driver/postgres v1.0.2/go.mod h1:FvRSYfBI9jEp6ZSjlpS9qNcSjxwYxFc03UOTrHdvvYA= @@ -3775,8 +3780,8 @@ k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= k8s.io/apiextensions-apiserver v0.20.7/go.mod h1:rBGJeRYoDJi1jJFHPA4QWXV6YX/5scZfSdkuMSgWoyA= k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= k8s.io/apiextensions-apiserver v0.22.5/go.mod h1:tIXeZ0BrDxUb1PoAz+tgOz43Zi1Bp4BEEqVtUccMJbE= -k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU= -k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= +k8s.io/apiextensions-apiserver v0.23.9 h1:q6X/HhgKo7/Up1p1TeNGNAwqcaBcxaxjxxwXa/5ht+E= +k8s.io/apiextensions-apiserver v0.23.9/go.mod h1:uu79PjF1T6YbfFqL5kVTmEdxb40Z0eHM7MfHDHz9dho= k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= @@ -3788,7 +3793,7 @@ k8s.io/apiserver v0.20.7/go.mod h1:7gbB7UjDdP1/epYBGnIUE6jWY4Wpz99cZ7igfDa9rv4= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= +k8s.io/apiserver v0.23.9/go.mod h1:vIXpgCnHep34bP/y+wGhYdn1NgxAvWtntxfEjst0e74= k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY= k8s.io/cli-runtime v0.23.4 h1:C3AFQmo4TK4dlVPLOI62gtHEHu0OfA2Cp4UVRZ1JXns= k8s.io/cli-runtime v0.23.4/go.mod h1:7KywUNTUibmHPqmpDFuRO1kc9RhsufHv2lkjCm2YZyM= @@ -3802,8 +3807,7 @@ k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NI k8s.io/code-generator v0.20.7/go.mod h1:i6FmG+QxaLxvJsezvZp0q/gAEzzOz3U53KFibghWToU= k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.5/go.mod h1:sbdWCOVob+KaQ5O7xs8PNNaCTpbWVqNgA6EPwLOmRNk= -k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/code-generator v0.23.9/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.19.7/go.mod h1:YX8spPBgwl3I6UGcSdQiEMAqRMSUsGQOW7SEr4+Qa3U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= @@ -3813,7 +3817,7 @@ k8s.io/component-base v0.20.7/go.mod h1:878UWprXC07P2CWFg+jjvTfxJSlkHp1v2m1MTkNQ k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= +k8s.io/component-base v0.23.9/go.mod h1:WUNtIRIMd9WBS2r5LCSNZoh6f/Uh+8O+aGuZUG5t428= k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -3833,7 +3837,7 @@ k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220307231824-4627b89bbf1b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20220613173612-397b4ae3bce7/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -3847,8 +3851,8 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1-0.20220317184644-43cc75f9ae89 h1:bUNlsw5yb353zbKMj8srOr6V2Ajhz1VkTKonP1L8r2o= -k8s.io/klog/v2 v2.60.1-0.20220317184644-43cc75f9ae89/go.mod h1:N3kgBtsFxMb4nQ0eBDgbHEt/dtxBuTkSFQ+7K5OUoz4= +k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f h1:dltw7bAn8bCrQ2CmzzhgoieUZEbWqrvIGVdHGioP5nY= +k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= @@ -3879,36 +3883,37 @@ k8s.io/utils v0.0.0-20220127004650-9b3446523e65/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/caching v0.0.0-20210803185815-4e553d2275a0/go.mod h1:Vs+HND39+KKaIQp9M3m3Jmt4YtznpitDQ3n53gxbDYQ= -knative.dev/caching v0.0.0-20220412163508-8b5c244b8182/go.mod h1:BFtnxIjI27VMV52u4vHhplij9j5PbQRXFlDMv7EMjbM= -knative.dev/client v0.31.1 h1:69BfGRYqc9Uc94aJjhqTvyMCAqDPjFZc78G2EhKDCMA= -knative.dev/client v0.31.1/go.mod h1:393rr9RYzIuy7LVyvfoCucSeg/XsXfLq64taSRnukmE= +knative.dev/caching v0.0.0-20220818010648-9df7bb739739/go.mod h1:q5//FJ59aFRK42YiLSaxgBzH18DBhrtSc7UWapwXT9Q= +knative.dev/client v0.34.0 h1:CY62Bd/sodq8aeL6dYGYnDCvpe39qiPGXJHtR4Rktbk= +knative.dev/client v0.34.0/go.mod h1:mquhJiwkZytOKzLjRu3K+5HWRM6YAplWfvGUbCFZI4M= +knative.dev/control-protocol v0.0.0-20220818153549-f18dbde7d9bd/go.mod h1:vO3Xc0k0h6fFVsVG9kNMUMcVKG7MAx7jMbZDvgSuzwI= knative.dev/eventing v0.25.0/go.mod h1:8jIsrnSONPgv+m63OTzpwZQJiQASYl77C3llCyYlBMU= -knative.dev/eventing v0.31.0/go.mod h1:XgJY27IxyBjmu/mz53cVlz+oMPPzzRaVXlPmWKCqEd8= -knative.dev/eventing v0.31.3-0.20220802083815-e345f5f3695d h1:FpXtHZNEVbbXsqkcA2+JPg1WIqsc7/eBu0Slo5hy2pg= -knative.dev/eventing v0.31.3-0.20220802083815-e345f5f3695d/go.mod h1:XgJY27IxyBjmu/mz53cVlz+oMPPzzRaVXlPmWKCqEd8= +knative.dev/eventing v0.34.1/go.mod h1:6UnNnPrEUNAM9PfCpf7L8N7G/1vq+HQlpOjzndY6ryw= +knative.dev/eventing v0.34.2 h1:JaY8WhpFQuwjI6PYvme3KS0kvX677F6rH16mI6IDYqk= +knative.dev/eventing v0.34.2/go.mod h1:6UnNnPrEUNAM9PfCpf7L8N7G/1vq+HQlpOjzndY6ryw= knative.dev/hack v0.0.0-20210622141627-e28525d8d260/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220128200847-51a42b2eb63e/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack v0.0.0-20220411131823-6ffd8417de7c/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack v0.0.0-20220629135029-9e09abcd61f0 h1:Vk/ZoMI94uQyaaLd4IG7Tgy6fFkH9tbMTCDe126a1+w= -knative.dev/hack v0.0.0-20220629135029-9e09abcd61f0/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20220815132133-e9a8475f4329/go.mod h1:t/azP8I/Cygaw+87O7rkAPrNRjCelmtfSzWzu/9TM7I= +knative.dev/hack v0.0.0-20220823140917-8d1e4ccf9dc3 h1:umaeMRecA0g5g48L9tnEAkTBIitr9eKWMyJYo9YttAA= +knative.dev/hack v0.0.0-20220823140917-8d1e4ccf9dc3/go.mod h1:t/azP8I/Cygaw+87O7rkAPrNRjCelmtfSzWzu/9TM7I= knative.dev/hack/schema v0.0.0-20210622141627-e28525d8d260/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= -knative.dev/hack/schema v0.0.0-20220411131823-6ffd8417de7c/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= +knative.dev/hack/schema v0.0.0-20220823140917-8d1e4ccf9dc3/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/networking v0.0.0-20210803181815-acdfd41c575c/go.mod h1:UA9m1M3rGssy63gVwjSh7CYoWTKZNO8cnY9QsIu7tyo= -knative.dev/networking v0.0.0-20220412163509-1145ec58c8be h1:MmwR4SfwlXgt/jnjronkTTOKBrwN1mP/VNhHH08pIoc= -knative.dev/networking v0.0.0-20220412163509-1145ec58c8be/go.mod h1:6OZIUimxPelIIudzHWRd+Lc7ippC5t+DC8CsZKCOjcI= +knative.dev/networking v0.0.0-20220818010248-e51df7cdf571 h1:Lu/TsJjxg1p+2CMr2LNHEdEFBNHYjDoZv2f1QZoM8jg= +knative.dev/networking v0.0.0-20220818010248-e51df7cdf571/go.mod h1:m3ataWRwmbHjOY9sCFvcDWRNLVITxVl0fH0RxdCa4jE= knative.dev/pkg v0.0.0-20210803160015-21eb4c167cc5/go.mod h1:RPk5txNA3apR9X40D4MpUOP9/VqOG8CrtABWfOwGVS4= knative.dev/pkg v0.0.0-20211215065729-552319d4f55b/go.mod h1:hrD91/shO1o4KMZa4oWhnbRPmVJhvq86TLy/STF/qf8= knative.dev/pkg v0.0.0-20220104185830-52e42b760b54/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY= knative.dev/pkg v0.0.0-20220131144930-f4b57aef0006/go.mod h1:bZMFTPDPHV3wXuiQ09UJuEGYYQnfpe81MCxNvsMAiJk= -knative.dev/pkg v0.0.0-20220412134708-e325df66cb51 h1:4AmaxeY7+r/PYYz3HS9pMY21Mw3ykO6STLFEk2FoJ2s= -knative.dev/pkg v0.0.0-20220412134708-e325df66cb51/go.mod h1:j2MeD8s+JoCu1vegX80GbRXV/xd20Jm1NznxBYtVXiM= +knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 h1:GNmzHVaUo3zoi/wtIN71LPQaWy6DdoYzmb+GIq2s4fw= +knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15/go.mod h1:YLjXbkQLlGHok+u0FLfMbBHFzY9WGu3GHhnrptoAy8I= knative.dev/reconciler-test v0.0.0-20210803183715-b61cc77c06f6/go.mod h1:+Kovy+G5zXZNcuO/uB+zfo37vFKZzsLIlWezt/nKMz0= -knative.dev/reconciler-test v0.0.0-20220412165608-994f0c3fab62/go.mod h1:BZai3XTZ7+A3HSwgm5cSgwS/Oc/9mOZJrAk+TtESABA= +knative.dev/reconciler-test v0.0.0-20220818122349-177f8264c28c/go.mod h1:A437yxlDVDVKQv779WlB9Nj9lWAMoOKHQXFXls24Sps= knative.dev/serving v0.25.0/go.mod h1:24E4fVyViFnz8aAaafzdrYKB7CAsQr4FMU7QXoIE6CI= -knative.dev/serving v0.31.0/go.mod h1:ObA3YEL77+M60xu4T3cUSpD+AX5eZN6Ww0pHg8iA6NE= -knative.dev/serving v0.31.1-0.20220630164831-69a88e92b069 h1:cCJJEu2rNF526l8o+9iUPqdtJUfhyWqqgZky6Lpueso= -knative.dev/serving v0.31.1-0.20220630164831-69a88e92b069/go.mod h1:dbb5Zcvev8SpzPdFQH/PGr7BAkNUHxsLFeEvu9309VM= +knative.dev/serving v0.34.0/go.mod h1:IyfedOBq3KzcD5dZONjbix2BfS0jOwDq5td8UE9CjCk= +knative.dev/serving v0.34.1 h1:AKZk/oEWrtVrllTlp5L6uSX9sfJg5aERUtWeXMv8g2E= +knative.dev/serving v0.34.1/go.mod h1:IyfedOBq3KzcD5dZONjbix2BfS0jOwDq5td8UE9CjCk= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -3930,7 +3935,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQb sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= diff --git a/third_party/VENDOR-LICENSE/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/utils/internal/third_party/forked/golang/LICENSE similarity index 100% rename from third_party/VENDOR-LICENSE/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE rename to third_party/VENDOR-LICENSE/k8s.io/utils/internal/third_party/forked/golang/LICENSE diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go index ee2d51abc6..ea8fbfbb4d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -97,6 +97,7 @@ type ceClient struct { receiverMu sync.Mutex eventDefaulterFns []EventDefaulter pollGoroutines int + blockingCallback bool } func (c *ceClient) applyOptions(opts ...Option) error { @@ -248,14 +249,22 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { continue } - // Do not block on the invoker. - wg.Add(1) - go func() { + callback := func() { if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) } - wg.Done() - }() + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } } }() } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go index d0fe9dbaa9..938478162b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -8,6 +8,7 @@ package client import ( "context" "fmt" + "github.com/cloudevents/sdk-go/v2/binding" ) @@ -113,3 +114,15 @@ func WithInboundContextDecorator(dec func(context.Context, binding.Message) cont return nil } } + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go index 0f18314827..8fc449ed94 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -73,7 +73,7 @@ func (e Event) Data() []byte { } // DataAs attempts to populate the provided data object with the event payload. -// data should be a pointer type. +// obj should be a pointer type. func (e Event) DataAs(obj interface{}) error { data := e.Data() diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go index 7ca5ad2715..06204b2a1f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -157,7 +157,14 @@ func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ... buf := new(bytes.Buffer) buf.ReadFrom(message.BodyReader) errorStr := buf.String() - err = NewResult(res.StatusCode, "%s", errorStr) + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } } } } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go index fb7bcd27ef..71e7346f30 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -6,8 +6,11 @@ package http import ( + "bytes" "context" "errors" + "io" + "io/ioutil" "net/http" "net/url" "time" @@ -53,6 +56,24 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam retry := 0 results := make([]protocol.Result, 0) + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + for { msg, result := p.doOnce(req) @@ -90,6 +111,8 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam } DoBackoff: + resetBody(req, body) + // Wait for the correct amount of backoff time. // total tries = retry + 1 @@ -103,3 +126,20 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam results = append(results, result) } } + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index b0814e7c9b..3ba05276f1 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,12 @@ Releases ======== +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + v1.7.0 (2021-05-06) =================== diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index faa0a05946..f45af149c1 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -103,7 +103,7 @@ // if err != nil { // return err // } -// defer multierr.AppendInvoke(err, multierr.Close(conn)) +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) // // ... // } // @@ -372,6 +372,14 @@ func inspect(errors []error) (res inspectResult) { // fromSlice converts the given list of errors into a single error. func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + res := inspect(errors) switch res.Count { case 0: @@ -381,8 +389,13 @@ func fromSlice(errors []error) error { return errors[res.FirstErrorIdx] case len(errors): if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := make([]error, len(errors)) + copy(out, errors) + return &multiError{errors: out} } } diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index bfc493c80e..1793b08c89 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,27 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + ## 1.20.0 (4 Jan 2022) Enhancements: diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go index 3567a9a1e6..8f86c430f0 100644 --- a/vendor/go.uber.org/zap/level.go +++ b/vendor/go.uber.org/zap/level.go @@ -86,6 +86,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { return a } +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + // Enabled implements the zapcore.LevelEnabler interface, which allows the // AtomicLevel to be used in place of traditional static levels. func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index f116bd936f..087c742228 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -24,9 +24,9 @@ import ( "fmt" "io/ioutil" "os" - "runtime" "strings" + "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/zapcore" ) @@ -259,8 +259,10 @@ func (log *Logger) clone() *Logger { } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. const callerSkipOffset = 2 // Check the level first to reduce the cost of disabled log calls. @@ -307,42 +309,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput - if log.addCaller { - frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) - if !defined { + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } + return ce + } - ce.Entry.Caller = zapcore.EntryCaller{ - Defined: defined, + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, PC: frame.PC, File: frame.File, Line: frame.Line, Function: frame.Function, } } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String - } - return ce -} + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() -// getCallerFrame gets caller frame. The argument skip is the number of stack -// frames to ascend, with 0 identifying the caller of getCallerFrame. The -// boolean ok is false if it was not possible to recover the information. -// -// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. -func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { - const skipOffset = 2 // skip getCallerFrame and Callers - - pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc) - if numFrames < 1 { - return + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() } - frame, _ = runtime.CallersFrames(pc).Next() - return frame, frame.PC != 0 + return ce } diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go index 0cf8c1ddff..3d187fa566 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -24,62 +24,153 @@ import ( "runtime" "sync" + "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" ) -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull ) -func takeStacktrace(skip int) string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Callers and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(skip+2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage } - i := 0 - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if i != 0 { - buffer.AppendByte('\n') + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] } + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) return buffer.String() } -type programCounters struct { - pcs []uintptr +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } } -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) } diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 505c714ac4..c5d751b821 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -364,7 +364,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final := enc.clone() final.buf.AppendByte('{') - if final.LevelKey != "" { + if final.LevelKey != "" && final.EncodeLevel != nil { final.addKey(final.LevelKey) cur := final.buf.Len() final.EncodeLevel(ent.Level, final) diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e575c9f432..56e88dc0c8 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -55,6 +55,18 @@ const ( _maxLevel = FatalLevel ) +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + // String returns a lower-case ASCII representation of the log level. func (l Level) String() string { switch l { diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 9857fe53d3..4c0850a45a 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -8,22 +8,35 @@ package errgroup import ( "context" + "fmt" "sync" ) +type token struct{} + // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() wg sync.WaitGroup + sem chan token + errOnce sync.Once err error } +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + // WithContext returns a new Group and an associated Context derived from ctx. // // The derived Context is canceled the first time a function passed to Go @@ -45,14 +58,48 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. // // The first call to return a non-nil error cancels the group; its error will be // returned by Wait. func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + g.wg.Add(1) go func() { - defer g.wg.Done() + defer g.done() if err := f(); err != nil { g.errOnce.Do(func() { @@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) { }) } }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f..0173b6982e 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc05..268558a0d6 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index a9c945e1d0..d45cbe1720 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -28,12 +28,14 @@ Historical context is available here: Semantic versioning is used in this repository. It contains several Go modules with different levels of stability: - `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags -- `k8s.io/tools` - no stable API yet (may change eventually), `tools/v0.Y.Z` tags - `examples` - no stable API, no tags, no intention to ever stabilize Exempt from the API stability guarantee are items (packages, functions, etc.) which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those -may still change in incompatible ways or get removed entirely. +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. ---- diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 33743ffb8d..65ac479ab6 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -34,18 +34,6 @@ import ( // mutex locking. var ( - // contextualLoggingEnabled controls whether contextual logging is - // active. Disabling it may have some small performance benefit. - contextualLoggingEnabled = true - - // globalLogger is the global Logger chosen by users of klog, nil if - // none is available. - globalLogger *Logger - - // globalLoggerOptions contains the options that were supplied for - // globalLogger. - globalLoggerOptions loggerOptions - // klogLogger is used as fallback for logging through the normal klog code // when no Logger is set. klogLogger logr.Logger = logr.New(&klogger{}) @@ -80,27 +68,17 @@ func SetLogger(logger logr.Logger) { // Supporting direct calls is recommended because it avoids the overhead of // routing log entries through klogr into klog and then into the actual Logger // backend. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - globalLogger = &logger - globalLoggerOptions = loggerOptions{} + logging.logger = &logger + logging.loggerOptions = loggerOptions{} for _, opt := range opts { - opt(&globalLoggerOptions) + opt(&logging.loggerOptions) } } // ContextualLogger determines whether the logger passed to // SetLoggerWithOptions may also get called directly. Such a logger cannot rely // on verbosity checking in klog. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func ContextualLogger(enabled bool) LoggerOption { return func(o *loggerOptions) { o.contextualLogger = enabled @@ -108,11 +86,6 @@ func ContextualLogger(enabled bool) LoggerOption { } // FlushLogger provides a callback for flushing data buffered by the logger. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func FlushLogger(flush func()) LoggerOption { return func(o *loggerOptions) { o.flush = flush @@ -121,11 +94,6 @@ func FlushLogger(flush func()) LoggerOption { // LoggerOption implements the functional parameter paradigm for // SetLoggerWithOptions. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type LoggerOption func(o *loggerOptions) type loggerOptions struct { @@ -139,8 +107,8 @@ type loggerOptions struct { // Modifying the logger is not thread-safe and should be done while no other // goroutines invoke log calls, usually during program initialization. func ClearLogger() { - globalLogger = nil - globalLoggerOptions = loggerOptions{} + logging.logger = nil + logging.loggerOptions = loggerOptions{} } // EnableContextualLogging controls whether contextual logging is enabled. @@ -151,25 +119,15 @@ func ClearLogger() { // to avoid the additional overhead for contextual logging. // // This must be called during initialization before goroutines are started. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func EnableContextualLogging(enabled bool) { - contextualLoggingEnabled = enabled + logging.contextualLoggingEnabled = enabled } // FromContext retrieves a logger set by the caller or, if not set, // falls back to the program's global logger (a Logger instance or klog // itself). -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func FromContext(ctx context.Context) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { if logger, err := logr.FromContext(ctx); err == nil { return logger } @@ -181,11 +139,6 @@ func FromContext(ctx context.Context) Logger { // TODO can be used as a last resort by code that has no means of // receiving a logger from its caller. FromContext or an explicit logger // parameter should be used instead. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func TODO() Logger { return Background() } @@ -194,16 +147,11 @@ func TODO() Logger { // that logger was initialized by the program and not by code that should // better receive a logger via its parameters. TODO can be used as a temporary // solution for such code. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func Background() Logger { - if globalLoggerOptions.contextualLogger { - // Is non-nil because globalLoggerOptions.contextualLogger is + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *globalLogger + return *logging.logger } return klogLogger @@ -211,13 +159,8 @@ func Background() Logger { // LoggerWithValues returns logger.WithValues(...kv) when // contextual logging is enabled, otherwise the logger. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func LoggerWithValues(logger Logger, kv ...interface{}) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithValues(kv...) } return logger @@ -225,13 +168,8 @@ func LoggerWithValues(logger Logger, kv ...interface{}) Logger { // LoggerWithName returns logger.WithName(name) when contextual logging is // enabled, otherwise the logger. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func LoggerWithName(logger Logger, name string) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithName(name) } return logger @@ -239,13 +177,8 @@ func LoggerWithName(logger Logger, name string) Logger { // NewContext returns logr.NewContext(ctx, logger) when // contextual logging is enabled, otherwise ctx. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func NewContext(ctx context.Context, logger Logger) context.Context { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logr.NewContext(ctx, logger) } return ctx diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go index 43cd08190f..602c3ed9e6 100644 --- a/vendor/k8s.io/klog/v2/imports.go +++ b/vendor/k8s.io/klog/v2/imports.go @@ -24,35 +24,15 @@ import ( // without directly importing it. // Logger in this package is exactly the same as logr.Logger. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type Logger = logr.Logger // LogSink in this package is exactly the same as logr.LogSink. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type LogSink = logr.LogSink // Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type RuntimeInfo = logr.RuntimeInfo var ( // New is an alias for logr.New. - // - // Experimental - // - // Notice: This variable is EXPERIMENTAL and may be changed or removed in a - // later release. New = logr.New ) diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 0000000000..03d692c8f8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 0000000000..b8b6af5c81 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 0000000000..f27bd14472 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d897313682..f85d7ccf83 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -20,6 +20,8 @@ import ( "bytes" "fmt" "strconv" + + "github.com/go-logr/logr" ) // WithValues implements LogSink.WithValues. The old key/value pairs are @@ -44,53 +46,49 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// TrimDuplicates deduplicates elements provided in multiple key/value tuple -// slices, whilst maintaining the distinction between where the items are -// contained. -func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { - // maintain a map of all seen keys - seenKeys := map[interface{}]struct{}{} - // build the same number of output slices as inputs - outs := make([][]interface{}, len(kvLists)) - // iterate over the input slices backwards, as 'later' kv specifications - // of the same key will take precedence over earlier ones - for i := len(kvLists) - 1; i >= 0; i-- { - // initialise this output slice - outs[i] = []interface{}{} - // obtain a reference to the kvList we are processing - // and make sure it has an even number of entries - kvList := kvLists[i] - if len(kvList)%2 != 0 { - kvList = append(kvList, missingValue) - } +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } - // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for - // slices that have an even number of elements. - // We add (len(kvList) % 2) here to handle the case where there is an - // odd number of elements in a kvList. - // If there is an odd number, then the last element in the slice will - // have the value 'null'. - for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { - k := kvList[i2] - // if we have already seen this key, do not include it again - if _, ok := seenKeys[k]; ok { - continue - } - // make a note that we've observed a new key - seenKeys[k] = struct{}{} - // attempt to obtain the value of the key - var v interface{} - // i2+1 should only ever be out of bounds if we handling the first - // iteration over a slice with an odd number of elements - if i2+1 < len(kvList) { - v = kvList[i2+1] - } - // add this KV tuple to the *start* of the output list to maintain - // the original order as we are iterating over the slice backwards - outs[i] = append([]interface{}{k, v}, outs[i]...) + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue } + merged = append(merged, key, first[i+1]) } - return outs + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged } const missingValue = "(MISSING)" @@ -111,10 +109,10 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. - if k, ok := k.(string); ok { + if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(k) + b.WriteString(sK) } else { b.WriteString(fmt.Sprintf("%s", k)) } @@ -131,6 +129,24 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { writeStringValue(b, true, v) case error: writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, true, value) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided // to format byte slices with "%+q". The advantages of that are: @@ -163,6 +179,18 @@ func StringerToString(s fmt.Stringer) (ret string) { return } +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + // ErrorToString converts an error to a string, // handling panics if they occur. func ErrorToString(err error) (ret string) { diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index db58f8baa6..2c218f698c 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -77,6 +77,8 @@ func KRef(namespace, name string) ObjectRef { } // KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. func KObjs(arg interface{}) []ObjectRef { s := reflect.ValueOf(arg) if s.Kind() != reflect.Slice { @@ -92,3 +94,65 @@ func KObjs(arg interface{}) []ObjectRef { } return objectRefs } + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return objectRefs +} + +func (ks kobjSlice) process() ([]interface{}, error) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, nil + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Errorf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Errorf("", item) + } + } + return objectRefs, nil +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index bb6f64be49..652fadcd4e 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -41,6 +41,10 @@ // // -logtostderr=true // Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. // -alsologtostderr=false // Logs are written to standard error as well as to files. // -stderrthreshold=ERROR @@ -91,9 +95,10 @@ import ( "github.com/go-logr/logr" "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" "k8s.io/klog/v2/internal/serialize" "k8s.io/klog/v2/internal/severity" - "k8s.io/utils/clock" ) // severityValue identifies the sort of log: info, warning etc. It also implements @@ -242,6 +247,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -263,6 +272,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -271,15 +291,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -287,10 +307,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -404,19 +421,19 @@ func InitFlags(flagset *flag.FlagSet) { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file (no effect when -logtostderr=true)") flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ "If the value is 0, the maximum file size is unlimited.") flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files (no effect when -logtostderr=true)") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") } @@ -426,8 +443,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *Logger + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -437,26 +466,14 @@ type loggingT struct { // Level flag. Handled atomically. stderrThreshold severityValue // The -stderrthreshold flag. - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. file [severity.NumSeverity]flushSyncWriter - // flushD holds a flushDaemon that frequently flushes log file buffers. - flushD *flushDaemon // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -496,7 +513,48 @@ type loggingT struct { filter LogFilter } -var logging loggingT +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + for i := range s.vmodule.filter { + filter[i] = s.vmodule.filter[i] + } + s.vmodule.filter = filter + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // bufferCache maintains the free list. It uses its own mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + bufferCache buffer.Buffers + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} + +var logging = loggingT{ + settings: settings{ + contextualLoggingEnabled: true, + }, +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -520,6 +578,55 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool var timeNow = time.Now // Stubbed out for testing. +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, + } +} + +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() +} + +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} + /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. @@ -688,7 +795,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. l.bufferCache.PutBuffer(b) } @@ -757,7 +864,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() @@ -765,7 +872,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { - globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) + logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -822,12 +929,15 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. @@ -847,25 +957,6 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1077,9 +1168,9 @@ func (f *flushDaemon) isRunning() bool { return f.stopC != nil } -// StopFlushDaemon stops the flush daemon, if running. +// StopFlushDaemon stops the flush daemon, if running, and flushes once. // This prevents klog from leaking goroutines on shutdown. After stopping -// the daemon, you can still manually flush buffers by calling Flush(). +// the daemon, you can still manually flush buffers again by calling Flush(). func StopFlushDaemon() { logging.flushD.stop() } @@ -1109,8 +1200,8 @@ func (l *loggingT) flushAll() { file.Sync() // ignore error } } - if globalLoggerOptions.flush != nil { - globalLoggerOptions.flush() + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() } } @@ -1158,7 +1249,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1196,10 +1287,10 @@ type Verbose struct { } func newVerbose(level Level, b bool) Verbose { - if globalLogger == nil { + if logging.logger == nil { return Verbose{b, nil} } - v := globalLogger.V(int(level)) + v := logging.logger.V(int(level)) return Verbose{b, &v} } @@ -1318,7 +1409,7 @@ func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. @@ -1347,37 +1438,37 @@ func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(severity.InfoLog, globalLogger, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(severity.InfoLog, globalLogger, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) } // InfolnDepth acts as Infoln but uses depth to determine which call frame to log. // InfolnDepth(0, "msg") is the same as Infoln("msg"). func InfolnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) } // InfofDepth acts as Infof but uses depth to determine which call frame to log. // InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). func InfofDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1389,79 +1480,79 @@ func InfofDepth(depth int, format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(severity.WarningLog, globalLogger, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(severity.WarningLog, globalLogger, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) } // WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. // WarninglnDepth(0, "msg") is the same as Warningln("msg"). func WarninglnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) } // WarningfDepth acts as Warningf but uses depth to determine which call frame to log. // WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). func WarningfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. // ErrorlnDepth(0, "msg") is the same as Errorln("msg"). func ErrorlnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) } // ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. // ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). func ErrorfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1474,52 +1565,63 @@ func ErrorfDepth(depth int, format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls OsExit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. // FatallnDepth(0, "msg") is the same as Fatalln("msg"). func FatallnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. // FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). func FatalfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1530,41 +1632,41 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. // ExitlnDepth(0, "msg") is the same as Exitln("msg"). func ExitlnDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // ExitfDepth acts as Exitf but uses depth to determine which call frame to log. // ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). func ExitfDepth(depth int, format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index cdb3834fa1..027a4014af 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -25,11 +25,6 @@ import ( // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. -// -// Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. func NewKlogr() Logger { return New(&klogger{}) } @@ -48,11 +43,11 @@ func (l *klogger) Init(info logr.RuntimeInfo) { } func (l klogger) Info(level int, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) + V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l klogger) Enabled(level int) bool { @@ -60,11 +55,11 @@ func (l klogger) Enabled(level int) bool { } func (l klogger) Error(err error, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) + ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go new file mode 100644 index 0000000000..fd4db44072 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package golang_lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/k8s.io/utils/lru/lru.go b/vendor/k8s.io/utils/lru/lru.go new file mode 100644 index 0000000000..5d0077abfb --- /dev/null +++ b/vendor/k8s.io/utils/lru/lru.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package lru + +import ( + "sync" + + groupcache "k8s.io/utils/internal/third_party/forked/golang/golang-lru" +) + +type Key = groupcache.Key + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + cache *groupcache.Cache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) *Cache { + return &Cache{ + cache: groupcache.New(size), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.cache.Get(key) +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Remove(key) +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.cache.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Clear() +} diff --git a/vendor/knative.dev/client/pkg/eventing/v1/client.go b/vendor/knative.dev/client/pkg/eventing/v1/client.go index 0d6effb636..ec2e06ba08 100644 --- a/vendor/knative.dev/client/pkg/eventing/v1/client.go +++ b/vendor/knative.dev/client/pkg/eventing/v1/client.go @@ -19,14 +19,13 @@ import ( "fmt" "time" - "knative.dev/client/pkg/config" - - "k8s.io/client-go/util/retry" - apis_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/retry" + "knative.dev/client/pkg/config" + v1 "knative.dev/eventing/pkg/apis/duck/v1" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" "knative.dev/eventing/pkg/client/clientset/versioned/scheme" clientv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1" @@ -38,6 +37,7 @@ import ( ) type TriggerUpdateFunc func(origTrigger *eventingv1.Trigger) (*eventingv1.Trigger, error) +type BrokerUpdateFunc func(origBroker *eventingv1.Broker) (*eventingv1.Broker, error) // KnEventingClient to Eventing Sources. All methods are relative to the // namespace specified during construction @@ -64,6 +64,10 @@ type KnEventingClient interface { DeleteBroker(ctx context.Context, name string, timeout time.Duration) error // ListBrokers returns list of broker CRDs ListBrokers(ctx context.Context) (*eventingv1.BrokerList, error) + // UpdateBroker is used to update an instance of broker + UpdateBroker(ctx context.Context, broker *eventingv1.Broker) error + // UpdateBrokerWithRetry is used to update an instance of broker + UpdateBrokerWithRetry(ctx context.Context, name string, updateFunc BrokerUpdateFunc, nrRetries int) error } // KnEventingClient is a combination of Sources client interface and namespace @@ -349,6 +353,45 @@ func (c *knEventingClient) ListBrokers(ctx context.Context) (*eventingv1.BrokerL return brokerListNew, nil } +// UpdateBroker is used to update an instance of broker +func (c *knEventingClient) UpdateBroker(ctx context.Context, broker *eventingv1.Broker) error { + _, err := c.client.Brokers(c.namespace).Update(ctx, broker, meta_v1.UpdateOptions{}) + if err != nil { + return kn_errors.GetError(err) + } + return nil +} + +func (c *knEventingClient) UpdateBrokerWithRetry(ctx context.Context, name string, updateFunc BrokerUpdateFunc, nrRetries int) error { + return updateBrokerWithRetry(ctx, c, name, updateFunc, nrRetries) +} + +func updateBrokerWithRetry(ctx context.Context, c KnEventingClient, name string, updateFunc BrokerUpdateFunc, nrRetries int) error { + b := config.DefaultRetry + b.Steps = nrRetries + updateBrokerFunc := func() error { + return updateBroker(ctx, c, name, updateFunc) + } + err := retry.RetryOnConflict(b, updateBrokerFunc) + return err +} + +func updateBroker(ctx context.Context, c KnEventingClient, name string, updateFunc BrokerUpdateFunc) error { + broker, err := c.GetBroker(ctx, name) + if err != nil { + return err + } + if broker.GetDeletionTimestamp() != nil { + return fmt.Errorf("can't update broker %s because it has been marked for deletion", name) + } + updatedBroker, err := updateFunc(broker.DeepCopy()) + if err != nil { + return err + } + + return c.UpdateBroker(ctx, updatedBroker) +} + // BrokerBuilder is for building the broker type BrokerBuilder struct { broker *eventingv1.Broker @@ -363,6 +406,13 @@ func NewBrokerBuilder(name string) *BrokerBuilder { }} } +// NewBrokerBuilderFromExisting returns broker builder from original broker +func NewBrokerBuilderFromExisting(broker *eventingv1.Broker) *BrokerBuilder { + return &BrokerBuilder{ + broker: broker, + } +} + // WithGvk add the GVK coordinates for read tests func (b *BrokerBuilder) WithGvk() *BrokerBuilder { _ = updateEventingGVK(b.broker) @@ -387,6 +437,87 @@ func (b *BrokerBuilder) Class(class string) *BrokerBuilder { return b } +// DlSink for the broker builder +func (b *BrokerBuilder) DlSink(dlSink *duckv1.Destination) *BrokerBuilder { + empty := duckv1.Destination{} + if dlSink == nil || *dlSink == empty { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.DeadLetterSink = dlSink + return b +} + +// Retry for the broker builder +func (b *BrokerBuilder) Retry(retry *int32) *BrokerBuilder { + if retry == nil || *retry == 0 { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.Retry = retry + return b +} + +// Timeout for the broker builder +func (b *BrokerBuilder) Timeout(timeout *string) *BrokerBuilder { + if timeout == nil || *timeout == "" { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.Timeout = timeout + return b +} + +// BackoffPolicy for the broker builder +func (b *BrokerBuilder) BackoffPolicy(policyType *v1.BackoffPolicyType) *BrokerBuilder { + if policyType == nil || *policyType == "" { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.BackoffPolicy = policyType + return b +} + +// BackoffDelay for the broker builder +func (b *BrokerBuilder) BackoffDelay(backoffDelay *string) *BrokerBuilder { + if backoffDelay == nil || *backoffDelay == "" { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.BackoffDelay = backoffDelay + return b +} + +// RetryAfterMax for the broker builder +func (b *BrokerBuilder) RetryAfterMax(max *string) *BrokerBuilder { + if max == nil || *max == "" { + return b + } + if b.broker.Spec.Delivery == nil { + b.broker.Spec.Delivery = &v1.DeliverySpec{} + } + b.broker.Spec.Delivery.RetryAfterMax = max + return b + +} + +// Config for the broker builder +func (b *BrokerBuilder) Config(config *duckv1.KReference) *BrokerBuilder { + b.broker.Spec.Config = config + return b + +} + // Build to return an instance of broker object func (b *BrokerBuilder) Build() *eventingv1.Broker { return b.broker diff --git a/vendor/knative.dev/client/pkg/eventing/v1/client_mock.go b/vendor/knative.dev/client/pkg/eventing/v1/client_mock.go index d4df618643..e8cea8559f 100644 --- a/vendor/knative.dev/client/pkg/eventing/v1/client_mock.go +++ b/vendor/knative.dev/client/pkg/eventing/v1/client_mock.go @@ -104,7 +104,7 @@ func (c *MockKnEventingClient) ListTriggers(context.Context) (*eventingv1.Trigge return call.Result[0].(*eventingv1.TriggerList), mock.ErrorOrNil(call.Result[1]) } -// UpdateTrigger records a call for ListTriggers with the expected result and error (nil if none) +// UpdateTrigger records a call for UpdateTrigger with the expected result and error (nil if none) func (sr *EventingRecorder) UpdateTrigger(trigger interface{}, err error) { sr.r.Add("UpdateTrigger", []interface{}{trigger}, []interface{}{err}) } @@ -163,6 +163,20 @@ func (c *MockKnEventingClient) ListBrokers(context.Context) (*eventingv1.BrokerL return call.Result[0].(*eventingv1.BrokerList), mock.ErrorOrNil(call.Result[1]) } +// UpdateBroker records a call for UpdateBroker with the expected result and error (nil if none) +func (sr *EventingRecorder) UpdateBroker(broker *eventingv1.Broker, err error) { + sr.r.Add("UpdateBroker", []interface{}{broker}, []interface{}{err}) +} + +func (c *MockKnEventingClient) UpdateBroker(ctx context.Context, broker *eventingv1.Broker) error { + call := c.recorder.r.VerifyCall("UpdateBroker") + return mock.ErrorOrNil(call.Result[0]) +} + +func (c *MockKnEventingClient) UpdateBrokerWithRetry(ctx context.Context, name string, updateFunc BrokerUpdateFunc, nrRetries int) error { + return updateBrokerWithRetry(ctx, c, name, updateFunc, nrRetries) +} + // Validate validates whether every recorded action has been called func (sr *EventingRecorder) Validate() { sr.r.CheckThatAllRecordedMethodsHaveBeenCalled() diff --git a/vendor/knative.dev/client/pkg/kn/flags/podspec.go b/vendor/knative.dev/client/pkg/kn/flags/podspec.go index f79efab060..760941486b 100644 --- a/vendor/knative.dev/client/pkg/kn/flags/podspec.go +++ b/vendor/knative.dev/client/pkg/kn/flags/podspec.go @@ -40,6 +40,11 @@ type PodSpecFlags struct { Command []string Arg []string + LivenessProbe string + LivenessProbeOpts string + ReadinessProbe string + ReadinessProbeOpts string + ExtraContainers string Resources ResourceOptions @@ -137,18 +142,20 @@ func (p *PodSpecFlags) AddFlags(flagset *pflag.FlagSet) []string { flagNames = append(flagNames, "env-file") flagset.StringArrayVarP(&p.Mount, "mount", "", []string{}, - "Mount a ConfigMap (prefix cm: or config-map:), a Secret (prefix secret: or sc:), or an existing Volume (without any prefix) on the specified directory. "+ - "Example: --mount /mydir=cm:myconfigmap, --mount /mydir=secret:mysecret, or --mount /mydir=myvolume. "+ - "When a configmap or a secret is specified, a corresponding volume is automatically generated. "+ - "You can specify a volume subpath by following the volume name with slash separated path. "+ + "Mount a ConfigMap (prefix cm: or config-map:), a Secret (prefix secret: or sc:), an EmptyDir (prefix ed: or emptyDir:), "+ + "a PersistentVolumeClaim (prefix pvc: or persistentVolumeClaim) or an existing Volume (without any prefix) on the specified directory. "+ + "Example: --mount /mydir=cm:myconfigmap, --mount /mydir=secret:mysecret, --mount /mydir=emptyDir:myvol "+ + "or --mount /mydir=myvolume. When a configmap or a secret is specified, a corresponding volume is "+ + "automatically generated. You can specify a volume subpath by following the volume name with slash separated path. "+ "Example: --mount /mydir=cm:myconfigmap/subpath/to/be/mounted. "+ "You can use this flag multiple times. "+ "For unmounting a directory, append \"-\", e.g. --mount /mydir-, which also removes any auto-generated volume.") flagNames = append(flagNames, "mount") flagset.StringArrayVarP(&p.Volume, "volume", "", []string{}, - "Add a volume from a ConfigMap (prefix cm: or config-map:) or a Secret (prefix secret: or sc:). "+ - "Example: --volume myvolume=cm:myconfigmap or --volume myvolume=secret:mysecret. "+ + "Add a volume from a ConfigMap (prefix cm: or config-map:) a Secret (prefix secret: or sc:), "+ + "an EmptyDir (prefix ed: or emptyDir:) or a PersistentVolumeClaim (prefix pvc: or persistentVolumeClaim). "+ + "Example: --volume myvolume=cm:myconfigmap, --volume myvolume=secret:mysecret or --volume emptyDir:myvol:size=1Gi,type=Memory. "+ "You can use this flag multiple times. "+ "To unset a ConfigMap/Secret reference, append \"-\" to the name, e.g. --volume myvolume-.") flagNames = append(flagNames, "volume") @@ -175,6 +182,24 @@ func (p *PodSpecFlags) AddFlags(flagset *pflag.FlagSet) []string { "Example: --containers ./containers.yaml or --containers -.") flagNames = append(flagNames, "containers") + // Probes + commonProbeDescription := "Supported probe types are HTTGet, Exec and TCPSocket. " + + "Format: [http,https]:host:port:path, exec:cmd[,cmd,...], tcp:host:port." + commonProbeOptsDesc := "Common opts (comma separated, case insensitive): InitialDelaySeconds=, FailureThreshold=, " + + "SuccessThreshold=, PeriodSeconds=, TimeoutSeconds=" + flagset.StringVarP(&p.LivenessProbe, "probe-liveness", "", "", "Add liveness probe to Service deployment. "+ + commonProbeDescription) + flagNames = append(flagNames, "probe-liveness") + flagset.StringVarP(&p.LivenessProbeOpts, "probe-liveness-opts", "", "", "Add common options to liveness probe. "+ + commonProbeOptsDesc) + flagNames = append(flagNames, "probe-liveness-opts") + flagset.StringVarP(&p.ReadinessProbe, "probe-readiness", "", "", "Add readiness probe to Service deployment. "+ + commonProbeDescription) + flagNames = append(flagNames, "probe-readiness") + flagset.StringVarP(&p.ReadinessProbeOpts, "probe-readiness-opts", "", "", "Add common options to readiness probe. "+ + commonProbeOptsDesc) + flagNames = append(flagNames, "probe-liveness-opts") + flagset.StringSliceVar(&p.Resources.Limits, "limit", nil, @@ -354,5 +379,29 @@ func (p *PodSpecFlags) ResolvePodSpec(podSpec *corev1.PodSpec, flags *pflag.Flag UpdateContainers(podSpec, fromFile.Containers) } + if flags.Changed("probe-liveness") { + if err := UpdateLivenessProbe(podSpec, p.LivenessProbe); err != nil { + return err + } + } + + if flags.Changed("probe-liveness-opts") { + if err := UpdateLivenessProbeOpts(podSpec, p.LivenessProbeOpts); err != nil { + return err + } + } + + if flags.Changed("probe-readiness") { + if err := UpdateReadinessProbe(podSpec, p.ReadinessProbe); err != nil { + return err + } + } + + if flags.Changed("probe-readiness-opts") { + if err := UpdateReadinessProbeOpts(podSpec, p.ReadinessProbeOpts); err != nil { + return err + } + } + return nil } diff --git a/vendor/knative.dev/client/pkg/kn/flags/podspec_helper.go b/vendor/knative.dev/client/pkg/kn/flags/podspec_helper.go index e45824049f..b59f35daa1 100644 --- a/vendor/knative.dev/client/pkg/kn/flags/podspec_helper.go +++ b/vendor/knative.dev/client/pkg/kn/flags/podspec_helper.go @@ -20,6 +20,9 @@ import ( "strconv" "strings" + "k8s.io/apimachinery/pkg/util/intstr" + + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/yaml" corev1 "k8s.io/api/core/v1" @@ -35,6 +38,8 @@ type VolumeSourceType int const ( ConfigMapVolumeSourceType VolumeSourceType = iota SecretVolumeSourceType + EmptyDirVolumeSourceType + PVCVolumeSourceType PortFormatErr = "the port specification '%s' is not valid. Please provide in the format 'NAME:PORT', where 'NAME' is optional. Examples: '--port h2c:8080' , '--port 8080'." ) @@ -307,6 +312,60 @@ func UpdateContainers(spec *corev1.PodSpec, containers []corev1.Container) { } } +// UpdateLivenessProbe updates container liveness probe based on provided string +func UpdateLivenessProbe(spec *corev1.PodSpec, probeString string) error { + c := containerOfPodSpec(spec) + handler, err := resolveProbeHandler(probeString) + if err != nil { + return err + } + if c.LivenessProbe == nil { + c.LivenessProbe = &corev1.Probe{} + } + c.LivenessProbe.ProbeHandler = *handler + return nil +} + +// UpdateLivenessProbeOpts updates container liveness probe commons options based on provided string +func UpdateLivenessProbeOpts(spec *corev1.PodSpec, probeString string) error { + c := containerOfPodSpec(spec) + if c.LivenessProbe == nil { + c.LivenessProbe = &corev1.Probe{} + } + err := resolveProbeOptions(c.LivenessProbe, probeString) + if err != nil { + return err + } + return nil +} + +// UpdateReadinessProbe updates container readiness probe based on provided string +func UpdateReadinessProbe(spec *corev1.PodSpec, probeString string) error { + c := containerOfPodSpec(spec) + handler, err := resolveProbeHandler(probeString) + if err != nil { + return err + } + if c.ReadinessProbe == nil { + c.ReadinessProbe = &corev1.Probe{} + } + c.ReadinessProbe.ProbeHandler = *handler + return nil +} + +// UpdateReadinessProbeOpts updates container readiness probe commons options based on provided string +func UpdateReadinessProbeOpts(spec *corev1.PodSpec, probeString string) error { + c := containerOfPodSpec(spec) + if c.ReadinessProbe == nil { + c.ReadinessProbe = &corev1.Probe{} + } + err := resolveProbeOptions(c.ReadinessProbe, probeString) + if err != nil { + return err + } + return nil +} + // UpdateImagePullPolicy updates the pull policy for the given revision template func UpdateImagePullPolicy(spec *corev1.PodSpec, imagePullPolicy string) error { container := containerOfPodSpec(spec) @@ -466,6 +525,10 @@ func updateVolume(volume *corev1.Volume, info *volumeSourceInfo) error { case SecretVolumeSourceType: volume.ConfigMap = nil volume.Secret = &corev1.SecretVolumeSource{SecretName: info.volumeSourceName} + case EmptyDirVolumeSourceType: + volume.EmptyDir = &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMedium(info.emptyDirMemoryType), SizeLimit: info.emptyDirSize} + case PVCVolumeSourceType: + volume.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{ClaimName: info.volumeSourceName} default: return fmt.Errorf("Invalid VolumeSourceType") } @@ -486,8 +549,7 @@ func updateVolumeMountsFromMap(volumeMounts []corev1.VolumeMount, toUpdate *util if !existsVolumeNameInVolumes(name, volumes) { return nil, fmt.Errorf("There is no volume matched with %q", name) } - - volumeMount.ReadOnly = true + volumeMount.ReadOnly = isReadOnlyVolume(name, volumes) volumeMount.Name = name volumeMount.SubPath = volumeMountInfo.SubPath set[volumeMount.MountPath] = true @@ -501,7 +563,7 @@ func updateVolumeMountsFromMap(volumeMounts []corev1.VolumeMount, toUpdate *util if !set[mountPath] { volumeMounts = append(volumeMounts, corev1.VolumeMount{ Name: name, - ReadOnly: true, + ReadOnly: isReadOnlyVolume(name, volumes), MountPath: mountPath, SubPath: volumeMountInfo.SubPath, }) @@ -579,38 +641,67 @@ func removeVolumes(volumes []corev1.Volume, toRemove []string, volumeMounts []co // ======================================================================================= type volumeSourceInfo struct { - volumeSourceType VolumeSourceType - volumeSourceName string + volumeSourceType VolumeSourceType + volumeSourceName string + emptyDirMemoryType string + emptyDirSize *resource.Quantity } func newVolumeSourceInfoWithSpecString(spec string) (*volumeSourceInfo, error) { - slices := strings.SplitN(spec, ":", 2) - if len(slices) != 2 { - return nil, fmt.Errorf("argument requires a value that contains the : character; got %q", spec) + slices := strings.SplitN(spec, ":", 3) + if len(slices) < 2 { + return nil, fmt.Errorf("argument requires a value that contains the : character; got %q, %q", spec, slices) } - var volumeSourceType VolumeSourceType + if len(slices) == 2 { + var volumeSourceType VolumeSourceType + + typeString := strings.TrimSpace(slices[0]) + volumeSourceName := strings.TrimSpace(slices[1]) + + switch typeString { + case "config-map", "cm": + volumeSourceType = ConfigMapVolumeSourceType + case "secret", "sc": + volumeSourceType = SecretVolumeSourceType + case "emptyDir", "ed": + volumeSourceType = EmptyDirVolumeSourceType + case "persistentVolumeClaim", "pvc": + volumeSourceType = PVCVolumeSourceType + default: + return nil, fmt.Errorf("unsupported volume source type \"%q\"; supported volume source types are \"config-map\" and \"secret\"", slices[0]) + } - typeString := strings.TrimSpace(slices[0]) - volumeSourceName := strings.TrimSpace(slices[1]) + if len(volumeSourceName) == 0 { + return nil, fmt.Errorf("the name of %s cannot be an empty string", volumeSourceType) + } - switch typeString { - case "config-map", "cm": - volumeSourceType = ConfigMapVolumeSourceType - case "secret", "sc": - volumeSourceType = SecretVolumeSourceType - default: - return nil, fmt.Errorf("unsupported volume source type \"%q\"; supported volume source types are \"config-map\" and \"secret\"", slices[0]) - } + return &volumeSourceInfo{ + volumeSourceType: volumeSourceType, + volumeSourceName: volumeSourceName, + }, nil + } else { + typeString := strings.TrimSpace(slices[0]) + switch typeString { + case "config-map", "cm", "secret", "sc", "persistentVolumeClaim", "pvc": + return nil, fmt.Errorf("incorrect mount details for type %q", typeString) + case "emptyDir", "ed": + volName := slices[1] + edType, edSize, err := getEmptyDirTypeAndSize(slices[2]) + if err != nil { + return nil, err + } + return &volumeSourceInfo{ + volumeSourceType: EmptyDirVolumeSourceType, + volumeSourceName: volName, + emptyDirMemoryType: edType, + emptyDirSize: edSize, + }, nil + default: + return nil, fmt.Errorf("unsupported volume type \"%q\"; supported volume types are \"config-map or cm\", \"secret or sc\", \"volume or vo\", and \"emptyDir or ed\"", slices[0]) + } - if len(volumeSourceName) == 0 { - return nil, fmt.Errorf("the name of %s cannot be an empty string", volumeSourceType) } - - return &volumeSourceInfo{ - volumeSourceType: volumeSourceType, - volumeSourceName: volumeSourceName, - }, nil } func (vol *volumeSourceInfo) getCanonicalName() string { @@ -649,6 +740,15 @@ func (vol *volumeSourceInfo) createEnvFromSource() *corev1.EnvFromSource { // ======================================================================================= +func isReadOnlyVolume(volumeName string, volumes []corev1.Volume) bool { + for _, volume := range volumes { + if volume.Name == volumeName { + return volume.EmptyDir == nil + } + } + return true +} + func existsVolumeNameInVolumes(volumeName string, volumes []corev1.Volume) bool { for _, volume := range volumes { if volume.Name == volumeName { @@ -714,9 +814,27 @@ func reviseVolumeInfoAndMountsToUpdate(mountsToUpdate *util.OrderedMap, volumesT }) mountInfo.VolumeName = generatedName mountsToUpdateRevised.Set(path, mountInfo) - + case "emptyDir", "ed": + generatedName := util.GenerateVolumeName(path) + mountInfo := getMountInfo(slices[1]) + volumeSourceInfoByName.Set(generatedName, &volumeSourceInfo{ + volumeSourceType: EmptyDirVolumeSourceType, + volumeSourceName: slices[1], + emptyDirMemoryType: "", + }) + mountInfo.VolumeName = generatedName + mountsToUpdateRevised.Set(path, mountInfo) + case "persistentVolumeClaim", "pvc": + generatedName := util.GenerateVolumeName(path) + mountInfo := getMountInfo(slices[1]) + volumeSourceInfoByName.Set(generatedName, &volumeSourceInfo{ + volumeSourceType: PVCVolumeSourceType, + volumeSourceName: mountInfo.VolumeName, + }) + mountInfo.VolumeName = generatedName + mountsToUpdateRevised.Set(path, mountInfo) default: - return nil, nil, fmt.Errorf("unsupported volume type \"%q\"; supported volume types are \"config-map or cm\", \"secret or sc\", and \"volume or vo\"", slices[0]) + return nil, nil, fmt.Errorf("unsupported volume type \"%q\"; supported volume types are \"config-map or cm\", \"secret or sc\", \"volume or vo\", and \"emptyDir or ed\"", slices[0]) } } } @@ -733,6 +851,61 @@ func reviseVolumeInfoAndMountsToUpdate(mountsToUpdate *util.OrderedMap, volumesT return volumeSourceInfoByName, mountsToUpdateRevised, nil } +func getEmptyDirTypeAndSize(value string) (string, *resource.Quantity, error) { + slices := strings.SplitN(value, ",", 2) + formatErr := fmt.Errorf("incorrect format to specify emptyDir type") + repeatErrStr := "cannot repeat the key %q" + var dirType string + var size *resource.Quantity + switch len(slices) { + case 0: + return "", nil, nil + case 1: + typeSizeSlices := strings.SplitN(slices[0], "=", 2) + if len(typeSizeSlices) < 2 { + return "", nil, formatErr + } + switch strings.ToLower(typeSizeSlices[0]) { + case "type": + dirType = typeSizeSlices[1] + case "size": + quantity, err := resource.ParseQuantity(typeSizeSlices[1]) + if err != nil { + return "", nil, formatErr + } + size = &quantity + default: + return "", nil, formatErr + } + case 2: + for _, slice := range slices { + typeSizeSlices := strings.SplitN(slice, "=", 2) + if len(typeSizeSlices) < 2 { + return "", nil, formatErr + } + switch strings.ToLower(typeSizeSlices[0]) { + case "type": + if dirType != "" { + return "", nil, fmt.Errorf(repeatErrStr, "type") + } + dirType = typeSizeSlices[1] + case "size": + if size != nil { + return "", nil, fmt.Errorf(repeatErrStr, "size") + } + quantity, err := resource.ParseQuantity(typeSizeSlices[1]) + if err != nil { + return "", nil, formatErr + } + size = &quantity + default: + return "", nil, formatErr + } + } + } + return dirType, size, nil +} + func reviseVolumesToRemove(volumeMounts []corev1.VolumeMount, volumesToRemove []string, mountsToRemove []string) []string { for _, pathToRemove := range mountsToRemove { for _, volumeMount := range volumeMounts { @@ -762,3 +935,112 @@ func decodeContainersFromFile(filename string) (*corev1.PodSpec, error) { } return podSpec, nil } + +// ======================================================================================= +// Probes + +// resolveProbe parses probes as a string +// It's split into two functions: +// - resolveProbeOptions() -> common probe opts +// - resolveProbeHandler() -> probe handler [HTTPGet, Exec, TCPSocket] +// Format: +// - [http,https]:host:port:path +// - exec:cmd,cmd,... +// - tcp:host:port +// Common opts (comma separated, case insensitive): +// - InitialDelaySeconds=,FailureThreshold=, +// SuccessThreshold=,PeriodSeconds==,TimeoutSeconds= + +// resolveProbeOptions parses probe commons options +func resolveProbeOptions(probe *corev1.Probe, probeString string) error { + options := strings.Split(probeString, ",") + mappedOptions, err := util.MapFromArray(options, "=") + if err != nil { + return err + } + for k, v := range mappedOptions { + // Trim & verify value is convertible to int + intValue, err := strconv.ParseInt(strings.TrimSpace(v), 0, 32) + if err != nil { + return fmt.Errorf("not a nummeric value for parameter '%s'", k) + } + // Lower case param name mapping + switch strings.TrimSpace(strings.ToLower(k)) { + case "initialdelayseconds": + probe.InitialDelaySeconds = int32(intValue) + case "timeoutseconds": + probe.TimeoutSeconds = int32(intValue) + case "periodseconds": + probe.PeriodSeconds = int32(intValue) + case "successthreshold": + probe.SuccessThreshold = int32(intValue) + case "failurethreshold": + probe.FailureThreshold = int32(intValue) + default: + return fmt.Errorf("not a valid probe parameter name '%s'", k) + } + } + return nil +} + +// resolveProbeHandler parses probe handler options +func resolveProbeHandler(probeString string) (*corev1.ProbeHandler, error) { + if len(probeString) == 0 { + return nil, fmt.Errorf("no probe parameters detected") + } + probeParts := strings.Split(probeString, ":") + if len(probeParts) > 4 { + return nil, fmt.Errorf("too many probe parameters provided, please check the format") + } + var probeHandler *corev1.ProbeHandler + switch probeParts[0] { + case "http", "https": + if len(probeParts) != 4 { + return nil, fmt.Errorf("unexpected probe format, please use 'http:host:port:path'") + } + handler := corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{}, + } + if probeParts[0] == "https" { + handler.HTTPGet.Scheme = v1.URISchemeHTTPS + } + handler.HTTPGet.Host = probeParts[1] + if probeParts[2] != "" { + // Cosmetic fix to have default 'port: 0' instead of empty string 'port: ""' + handler.HTTPGet.Port = intstr.Parse(probeParts[2]) + } + handler.HTTPGet.Path = probeParts[3] + + probeHandler = &handler + case "exec": + if len(probeParts) != 2 { + return nil, fmt.Errorf("unexpected probe format, please use 'exec:[,,...]'") + } + if len(probeParts[1]) == 0 { + return nil, fmt.Errorf("at least one command parameter is required for Exec probe") + } + handler := corev1.ProbeHandler{ + Exec: &corev1.ExecAction{}, + } + cmd := strings.Split(probeParts[1], ",") + handler.Exec.Command = cmd + + probeHandler = &handler + case "tcp": + if len(probeParts) != 3 { + return nil, fmt.Errorf("unexpected probe format, please use 'tcp:host:port") + } + handler := corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{}, + } + handler.TCPSocket.Host = probeParts[1] + handler.TCPSocket.Port = intstr.Parse(probeParts[2]) + + probeHandler = &handler + default: + return nil, fmt.Errorf("unsupported probe type '%s'; supported types: http, https, exec, tcp", probeParts[0]) + } + return probeHandler, nil +} + +// ======================================================================================= diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go index bcff34b995..64c3fb2db3 100644 --- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go @@ -168,22 +168,6 @@ func ValidateAttributesNames(attrs map[string]string) (errs *apis.FieldError) { return errs } -func ValidateSingleAttributeMap(expr map[string]string) (errs *apis.FieldError) { - if len(expr) == 0 { - return nil - } - - if len(expr) != 1 { - return apis.ErrGeneric("Multiple items found, can have only one key-value", apis.CurrentField) - } - for attr := range expr { - if !validAttributeName.MatchString(attr) { - errs = errs.Also(apis.ErrInvalidKeyName(attr, apis.CurrentField, "Attribute name must start with a letter and can only contain lowercase alphanumeric").ViaKey(attr)) - } - } - return errs -} - func ValidateSubscriptionAPIFiltersList(ctx context.Context, filters []SubscriptionsAPIFilter) (errs *apis.FieldError) { if filters == nil || !feature.FromContext(ctx).IsEnabled(feature.NewTriggerFilters) { return nil @@ -221,11 +205,11 @@ func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPI errs = errs.Also( ValidateOneOf(filter), ).Also( - ValidateSingleAttributeMap(filter.Exact).ViaField("exact"), + ValidateAttributesNames(filter.Exact).ViaField("exact"), ).Also( - ValidateSingleAttributeMap(filter.Prefix).ViaField("prefix"), + ValidateAttributesNames(filter.Prefix).ViaField("prefix"), ).Also( - ValidateSingleAttributeMap(filter.Suffix).ViaField("suffix"), + ValidateAttributesNames(filter.Suffix).ViaField("suffix"), ).Also( ValidateSubscriptionAPIFiltersList(ctx, filter.All).ViaField("all"), ).Also( diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go index 4bd637cbe0..ec80fea20e 100644 --- a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go +++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go @@ -90,10 +90,9 @@ type SubscriptionSpec struct { // etc.) Channel duckv1.KReference `json:"channel"` - // Subscriber is reference to (optional) function for processing events. + // Subscriber is reference to function for processing events. // Events from the Channel will be delivered here and replies are // sent to a Destination as specified by the Reply. - // +optional Subscriber *duckv1.Destination `json:"subscriber,omitempty"` // Reply specifies (optionally) how to handle events returned from diff --git a/vendor/knative.dev/hack/README.md b/vendor/knative.dev/hack/README.md index de565f7c54..89cae268af 100644 --- a/vendor/knative.dev/hack/README.md +++ b/vendor/knative.dev/hack/README.md @@ -12,29 +12,14 @@ This is a helper script to run the presubmit tests. To use it: 1. [optional] Define the function `build_tests()`. If you don't define this function, the default action for running the build tests is to: - - check markdown files - run `go build` on the entire repo - run `/hack/verify-codegen.sh` (if it exists) - check licenses in all go packages - The markdown link checker tool doesn't check `localhost` links by default. - Its configuration file, `markdown-link-check-config.json`, lives in the - `hack` directory. To override it, create a file with the same name, - containing the custom config in the `/test` directory. - - The markdown lint tool ignores long lines by default. Its configuration file, - `markdown-lint-config.rc`, lives in the `hack` repo. To override it, create a - file with the same name, containing the custom config in the `/test` - directory. - 1. [optional] Customize the default build test runner, if you're using it. Set the following environment variables if the default values don't fit your needs: - - `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0 - (false). - - `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults - to 0 (false). - `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test fails, defaults to 0 (false). diff --git a/vendor/knative.dev/hack/e2e-tests.sh b/vendor/knative.dev/hack/e2e-tests.sh index 9178eac2cc..57222f32cb 100644 --- a/vendor/knative.dev/hack/e2e-tests.sh +++ b/vendor/knative.dev/hack/e2e-tests.sh @@ -137,13 +137,17 @@ CLOUD_PROVIDER="gke" function initialize() { local run_tests=0 local custom_flags=() + local parse_script_flags=0 E2E_SCRIPT="$(get_canonical_path "$0")" local e2e_script_command=( "${E2E_SCRIPT}" "--run-tests" ) + for i in "$@"; do + if [[ $i == "--run-tests" ]]; then parse_script_flags=1; fi + done + cd "${REPO_ROOT_DIR}" while [[ $# -ne 0 ]]; do local parameter=$1 - # TODO(chizhg): remove parse_flags logic if no repos are using it. # Try parsing flag as a custom one. if function_exists parse_flags; then parse_flags "$@" @@ -152,7 +156,10 @@ function initialize() { # Skip parsed flag (and possibly argument) and continue # Also save it to it's passed through to the test script for ((i=1;i<=skip;i++)); do - e2e_script_command+=("$1") + # Avoid double-parsing + if (( parse_script_flags )); then + e2e_script_command+=("$1") + fi shift done continue diff --git a/vendor/knative.dev/hack/infra-library.sh b/vendor/knative.dev/hack/infra-library.sh index a96210e2e1..0d0e002f4a 100644 --- a/vendor/knative.dev/hack/infra-library.sh +++ b/vendor/knative.dev/hack/infra-library.sh @@ -17,7 +17,7 @@ # This is a collection of functions for infra related setups, mainly # cluster provisioning. It doesn't do anything when called from command line. -source $(dirname "${BASH_SOURCE[0]}")/library.sh +source "$(dirname "${BASH_SOURCE[0]:-$0}")/library.sh" # Dumps the k8s api server metrics. Spins up a proxy, waits a little bit and # dumps the metrics to ${ARTIFACTS}/k8s.metrics.txt diff --git a/vendor/knative.dev/hack/library.sh b/vendor/knative.dev/hack/library.sh index 895800b31d..26cd00cb8c 100644 --- a/vendor/knative.dev/hack/library.sh +++ b/vendor/knative.dev/hack/library.sh @@ -40,7 +40,7 @@ fi readonly IS_PROW [[ ! -v REPO_ROOT_DIR ]] && REPO_ROOT_DIR="$(git rev-parse --show-toplevel)" readonly REPO_ROOT_DIR -readonly REPO_NAME="$(basename ${REPO_ROOT_DIR})" +readonly REPO_NAME="${REPO_NAME:-$(basename "${REPO_ROOT_DIR}")}" # Useful flags about the current OS IS_LINUX=0 @@ -56,10 +56,15 @@ readonly IS_LINUX readonly IS_OSX readonly IS_WINDOWS +export TMPDIR="${TMPDIR:-$(mktemp -u -t -d knative.XXXXXXXX)}" +mkdir -p "$TMPDIR" # Set ARTIFACTS to an empty temp dir if unset if [[ -z "${ARTIFACTS:-}" ]]; then - export ARTIFACTS="$(mktemp -d)" + ARTIFACTS="$(mktemp -u -t -d)" + export ARTIFACTS fi +mkdir -p "$ARTIFACTS" + # On a Prow job, redirect stderr to stdout so it's synchronously added to log (( IS_PROW )) && exec 2>&1 @@ -105,7 +110,7 @@ function make_banner() { echo -e "${border}\n${msg}\n${border}" # TODO(adrcunha): Remove once logs have timestamps on Prow # For details, see https://github.com/kubernetes/test-infra/issues/10100 - echo -e "$1$1$1$1 $(TZ='America/Los_Angeles' date)\n${border}" + echo -e "$1$1$1$1 $(TZ='UTC' date)\n${border}" } # Simple header for logging purposes. @@ -429,7 +434,7 @@ function mktemp_with_extension() { function create_junit_xml() { local xml xml="$(mktemp_with_extension "${ARTIFACTS}"/junit_XXXXXXXX xml)" - echo "JUnit file ${xml} is created for reporting the test result" + echo "XML report for $1::$2 written to ${xml}" run_kntest junit --suite="$1" --name="$2" --err-msg="$3" --dest="${xml}" || return 1 } @@ -437,37 +442,38 @@ function create_junit_xml() { # Parameters: $1... - parameters to go test function report_go_test() { local go_test_args=( "$@" ) - # Install gotestsum if necessary. - run_go_tool gotest.tools/gotestsum gotestsum --help > /dev/null 2>&1 - # Capture the test output to the report file. - local report - report="$(mktemp)" - local xml + local logfile xml ansilog htmllog xml="$(mktemp_with_extension "${ARTIFACTS}"/junit_XXXXXXXX xml)" + # Keep the suffix, so files are related. + logfile="${xml/junit_/go_test_}" + logfile="${logfile/.xml/.jsonl}" echo "Running go test with args: ${go_test_args[*]}" - capture_output "${report}" gotestsum --format "${GO_TEST_VERBOSITY:-testname}" \ - --junitfile "${xml}" --junitfile-testsuite-name relative --junitfile-testcase-classname relative \ + go_run gotest.tools/gotestsum@v1.8.0 \ + --format "${GO_TEST_VERBOSITY:-testname}" \ + --junitfile "${xml}" \ + --junitfile-testsuite-name relative \ + --junitfile-testcase-classname relative \ + --jsonfile "${logfile}" \ -- "${go_test_args[@]}" - local failed=$? - echo "Finished run, return code is ${failed}" + local gotest_retcode=$? + echo "Finished run, return code is ${gotest_retcode}" echo "XML report written to ${xml}" - if [[ -n "$(grep '' "${xml}")" ]]; then - # XML report is empty, something's wrong; use the output as failure reason - create_junit_xml _go_tests "GoTests" "$(cat "${report}")" - fi - # Capture and report any race condition errors - local race_errors - race_errors="$(sed -n '/^WARNING: DATA RACE$/,/^==================$/p' "${report}")" - create_junit_xml _go_tests "DataRaceAnalysis" "${race_errors}" - if (( ! IS_PROW )); then - # Keep the suffix, so files are related. - local logfile=${xml/junit_/go_test_} - logfile=${logfile/.xml/.log} - cp "${report}" "${logfile}" - echo "Test log written to ${logfile}" - fi - return ${failed} + echo "Test log (JSONL) written to ${logfile}" + + ansilog="${logfile/.jsonl/-ansi.log}" + go_run github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@v2.3.1 \ + -input "${logfile}" \ + -showteststatus \ + -nofail > "$ansilog" + echo "Test log (ANSI) written to ${ansilog}" + + htmllog="${logfile/.jsonl/.html}" + go_run github.com/buildkite/terminal-to-html/v3/cmd/terminal-to-html@v3.6.1 \ + --preview < "$ansilog" > "$htmllog" + echo "Test log (HTML) written to ${htmllog}" + + return ${gotest_retcode} } # Install Knative Serving in the current cluster. @@ -543,24 +549,41 @@ function start_latest_eventing_sugar_controller() { start_knative_eventing_extension "${KNATIVE_EVENTING_SUGAR_CONTROLLER_RELEASE}" "knative-eventing" } +# Run a go utility without installing it. +# Parameters: $1 - tool package for go run. +# $2..$n - parameters passed to the tool. +function go_run() { + local package + package="$1" + if [[ "$package" != *@* ]]; then + abort 'Package for "go_run" needs to have @version' + fi + shift 1 + GORUN_PATH="${GORUN_PATH:-$(go env GOPATH)}" + # Some CI environments may have non-writable GOPATH + if ! [ -w "${GORUN_PATH}" ]; then + GORUN_PATH="$(mktemp -t -d -u gopath.XXXXXXXX)" + fi + export GORUN_PATH + GOPATH="${GORUN_PATH}" \ + GOFLAGS='' \ + GO111MODULE='' \ + go run "$package" "$@" +} + # Run a go tool, installing it first if necessary. # Parameters: $1 - tool package/dir for go install. # $2 - tool to run. # $3..$n - parameters passed to the tool. +# Deprecated: use go_run instead function run_go_tool() { local package=$1 - local tool=$2 - local install_failed=0 # If no `@version` is provided, default to adding `@latest` if [[ "$package" != *@* ]]; then package=$package@latest fi - if [[ -z "$(which ${tool})" ]]; then - GOFLAGS="" go install "$package" || install_failed=1 - fi - (( install_failed )) && return ${install_failed} shift 2 - ${tool} "$@" + go_run "$package" "$@" } # Add function call to trap @@ -623,7 +646,7 @@ function go_update_deps() { else group "Upgrading to release ${RELEASE}" fi - FLOATING_DEPS+=( $(run_go_tool knative.dev/test-infra/buoy buoy float ${REPO_ROOT_DIR}/go.mod "${buoyArgs[@]}") ) + FLOATING_DEPS+=( $(go_run knative.dev/test-infra/buoy@latest float ${REPO_ROOT_DIR}/go.mod "${buoyArgs[@]}") ) if [[ ${#FLOATING_DEPS[@]} > 0 ]]; then echo "Floating deps to ${FLOATING_DEPS[@]}" go get -d ${FLOATING_DEPS[@]} @@ -684,13 +707,10 @@ function go_mod_gopath_hack() { echo "${TMP_DIR}" } -# Run kntest tool, error out and ask users to install it if it's not currently installed. +# Run kntest tool # Parameters: $1..$n - parameters passed to the tool. function run_kntest() { - if [[ ! -x "$(command -v kntest)" ]]; then - echo "--- FAIL: kntest not installed, please clone knative test-infra repo and run \`go install ./tools/kntest/cmd/kntest\` to install it"; return 1; - fi - kntest "$@" + go_run knative.dev/test-infra/tools/kntest/cmd/kntest@latest "$@" } # Run go-licenses to update licenses. @@ -701,14 +721,16 @@ function update_licenses() { local dst=$1 local dir=$2 shift - run_go_tool github.com/google/go-licenses go-licenses save "${dir}" --save_path="${dst}" --force || \ + go_run github.com/google/go-licenses@v1.2.0 \ + save "${dir}" --save_path="${dst}" --force || \ { echo "--- FAIL: go-licenses failed to update licenses"; return 1; } } # Run go-licenses to check for forbidden licenses. function check_licenses() { # Check that we don't have any forbidden licenses. - run_go_tool github.com/google/go-licenses go-licenses check "${REPO_ROOT_DIR}/..." || \ + go_run github.com/google/go-licenses@v1.2.0 \ + check "${REPO_ROOT_DIR}/..." || \ { echo "--- FAIL: go-licenses failed the license check"; return 1; } } diff --git a/vendor/knative.dev/hack/presubmit-tests.sh b/vendor/knative.dev/hack/presubmit-tests.sh index 5f219d9a5d..d9549f51b2 100644 --- a/vendor/knative.dev/hack/presubmit-tests.sh +++ b/vendor/knative.dev/hack/presubmit-tests.sh @@ -185,7 +185,7 @@ function run_unit_tests() { # Default unit test runner that runs all go tests in the repo. function default_unit_test_runner() { - report_go_test -race -count 1 ./... + report_go_test -short -race -count 1 ./... } # Run integration tests. If there's no `integration_tests` function, run the diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh index 0448a77525..6944310ac9 100644 --- a/vendor/knative.dev/hack/release.sh +++ b/vendor/knative.dev/hack/release.sh @@ -18,7 +18,6 @@ # See README.md for instructions on how to use it. source $(dirname "${BASH_SOURCE[0]}")/library.sh -set -x # Organization name in GitHub; defaults to Knative. readonly ORG_NAME="${ORG_NAME:-knative}" @@ -108,7 +107,8 @@ export GITHUB_TOKEN="" # Convenience function to run the hub tool. # Parameters: $1..$n - arguments to hub. function hub_tool() { - run_go_tool github.com/github/hub hub $@ + # Pinned to SHA because of https://github.com/github/hub/issues/2517 + go_run github.com/github/hub/v2@363513a "$@" } # Shortcut to "git push" that handles authentication. diff --git a/vendor/knative.dev/networking/pkg/apis/networking/register.go b/vendor/knative.dev/networking/pkg/apis/networking/register.go index 80751d127d..f7bdd81d7d 100644 --- a/vendor/knative.dev/networking/pkg/apis/networking/register.go +++ b/vendor/knative.dev/networking/pkg/apis/networking/register.go @@ -22,6 +22,9 @@ const ( // GroupName is the name for the networking API group. GroupName = "networking.internal.knative.dev" + // CertifcateUIDLabelKey is used to specify a label selector for informers listing ingress secrets. + CertificateUIDLabelKey = GroupName + "/certificate-uid" + // IngressLabelKey is the label key attached to underlying network programming // resources to indicate which Ingress triggered their creation. IngressLabelKey = GroupName + "/ingress" @@ -104,6 +107,12 @@ const ( // WildcardCertDomainLabelKey is the label key attached to a certificate to indicate the // domain for which it was issued. WildcardCertDomainLabelKey = PublicGroupName + "/wildcardDomain" + + // VisibilityLabelKey is the label to indicate visibility of Route + // and KServices. It can be an annotation too but since users are + // already using labels for domain, it probably best to keep this + // consistent. + VisibilityLabelKey = PublicGroupName + "/visibility" ) // Pseudo-constants diff --git a/vendor/knative.dev/networking/pkg/network.go b/vendor/knative.dev/networking/pkg/config/config.go similarity index 59% rename from vendor/knative.dev/networking/pkg/network.go rename to vendor/knative.dev/networking/pkg/config/config.go index fef8135305..30580161a8 100644 --- a/vendor/knative.dev/networking/pkg/network.go +++ b/vendor/knative.dev/networking/pkg/config/config.go @@ -1,11 +1,11 @@ /* -Copyright 2018 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,65 +14,49 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pkg +package config import ( "bytes" "errors" "fmt" "io/ioutil" - "net/http" "net/url" "strings" "text/template" - lru "github.com/hashicorp/golang-lru" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/lru" cm "knative.dev/pkg/configmap" "sigs.k8s.io/yaml" ) -const ( - // ProbePath is the name of a path that activator, autoscaler and - // prober(used by KIngress generally) use for health check. - ProbePath = "/healthz" - - // ProbeHeaderName is the name of a header that can be added to - // requests to probe the knative networking layer. Requests - // with this header will not be passed to the user container or - // included in request metrics. - ProbeHeaderName = "K-Network-Probe" - - // ProxyHeaderName is the name of an internal header that activator - // uses to mark requests going through it. - ProxyHeaderName = "K-Proxy-Request" - - // HashHeaderName is the name of an internal header that Ingress controller - // uses to find out which version of the networking config is deployed. - HashHeaderName = "K-Network-Hash" - - // HashHeaderValue is the value that must appear in the HashHeaderName - // header in order for our network hash to be injected. - HashHeaderValue = "override" - - // OriginalHostHeader is used to avoid Istio host based routing rules - // in Activator. - // The header contains the original Host value that can be rewritten - // at the Queue proxy level back to be a host header. - OriginalHostHeader = "K-Original-Host" +var ( + templateCache *lru.Cache + + // Verify the default templates are valid. + _ = template.Must(template.New("domain-template").Parse(DefaultDomainTemplate)) + _ = template.Must(template.New("tag-template").Parse(DefaultTagTemplate)) +) +func init() { + // The only failure is due to negative size. + // Store ~10 latest templates per template type. + templateCache = lru.New(10 * 2) +} + +const ( // ConfigName is the name of the configmap containing all // customizations for networking features. - ConfigName = "config-network" + ConfigMapName = "config-network" - // DefaultIngressClassKey is the name of the configuration entry - // that specifies the default Ingress. - DefaultIngressClassKey = "ingress-class" + // DefaultDomainTemplate is the default golang template to use when + // constructing the Knative Route's Domain(host) + DefaultDomainTemplate = "{{.Name}}.{{.Namespace}}.{{.Domain}}" - // DefaultCertificateClassKey is the name of the configuration entry - // that specifies the default Certificate. - DefaultCertificateClassKey = "certificate-class" + // DefaultTagTemplate is the default golang template to use when + // constructing the Knative Route's tag names. + DefaultTagTemplate = "{{.Tag}}-{{.Name}}" // IstioIngressClassName value for specifying knative's Istio // Ingress reconciler. @@ -82,42 +66,13 @@ const ( // Certificate reconciler. CertManagerCertificateClassName = "cert-manager.certificate.networking.knative.dev" - // DomainTemplateKey is the name of the configuration entry that - // specifies the golang template string to use to construct the - // Knative service's DNS name. - DomainTemplateKey = "domain-template" - - // TagTemplateKey is the name of the configuration entry that - // specifies the golang template string to use to construct the - // hostname for a Route's tag. - TagTemplateKey = "tag-template" - - // RolloutDurationKey is the name of the configuration entry - // that specifies the default duration of the configuration rollout. - RolloutDurationKey = "rollout-duration" - - // NamespaceWildcardCertSelectorKey is the name of the configuration - // entry that specifies a LabelSelector to control which namespaces - // have a wildcard certificate provisioned for them. - NamespaceWildcardCertSelectorKey = "namespace-wildcard-cert-selector" - - // KubeProbeUAPrefix is the user agent prefix of the probe. - // Since K8s 1.8, prober requests have - // User-Agent = "kube-probe/{major-version}.{minor-version}". - KubeProbeUAPrefix = "kube-probe/" - - // KubeletProbeHeaderName is the name of the header supplied by kubelet - // probes. Istio with mTLS rewrites probes, but their probes pass a - // different user-agent. So we augment the probes with this header. - KubeletProbeHeaderName = "K-Kubelet-Probe" - - // DefaultDomainTemplate is the default golang template to use when - // constructing the Knative Route's Domain(host) - DefaultDomainTemplate = "{{.Name}}.{{.Namespace}}.{{.Domain}}" + // ServingInternalCertName is the name of secret contains certificates in serving + // system namespace. + ServingInternalCertName = "knative-serving-certs" +) - // DefaultTagTemplate is the default golang template to use when - // constructing the Knative Route's tag names. - DefaultTagTemplate = "{{.Tag}}-{{.Name}}" +// Config Keys +const ( // AutocreateClusterDomainClaimsKey is the key for the // AutocreateClusterDomainClaims property. @@ -127,90 +82,90 @@ const ( // that specifies enabling auto-TLS or not. AutoTLSKey = "auto-tls" - // HTTPProtocolKey is the name of the configuration entry that - // specifies the HTTP endpoint behavior of Knative ingress. - HTTPProtocolKey = "http-protocol" - - // UserAgentKey is the constant for header "User-Agent". - UserAgentKey = "User-Agent" - - // ActivatorUserAgent is the user-agent header value set in probe requests sent - // from activator. - ActivatorUserAgent = "Knative-Activator-Probe" + // DefaultCertificateClassKey is the name of the configuration entry + // that specifies the default Certificate. + DefaultCertificateClassKey = "certificate-class" - // QueueProxyUserAgent is the user-agent header value set in probe requests sent - // from queue-proxy. - QueueProxyUserAgent = "Knative-Queue-Proxy-Probe" + // DefaultExternalSchemeKey is the config for defining the scheme of external URLs. + DefaultExternalSchemeKey = "default-external-scheme" - // IngressReadinessUserAgent is the user-agent header value - // set in probe requests for Ingress status. - IngressReadinessUserAgent = "Knative-Ingress-Probe" + // DefaultIngressClassKey is the name of the configuration entry + // that specifies the default Ingress. + DefaultIngressClassKey = "ingress-class" - // AutoscalingUserAgent is the user-agent header value set in probe - // requests sent by autoscaling implementations. - AutoscalingUserAgent = "Knative-Autoscaling-Probe" + // DomainTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // Knative service's DNS name. + DomainTemplateKey = "domain-template" - // TagHeaderName is the name of the header entry which has a tag name as value. - // The tag name specifies which route was expected to be chosen by Ingress. - TagHeaderName = "Knative-Serving-Tag" + // EnableMeshPodAddressabilityKey is the config for enabling pod addressability in mesh. + EnableMeshPodAddressabilityKey = "enable-mesh-pod-addressability" - // DefaultRouteHeaderName is the name of the header entry - // identifying whether a request is routed via the default route or not. - // It has one of the string value "true" or "false". - DefaultRouteHeaderName = "Knative-Serving-Default-Route" + // HTTPProtocolKey is the name of the configuration entry that + // specifies the HTTP endpoint behavior of Knative ingress. + HTTPProtocolKey = "http-protocol" - // ProtoAcceptContent is the content type to be used when autoscaler scrapes metrics from the QP - ProtoAcceptContent = "application/protobuf" + // MeshCompatibilityModeKey is the config for selecting the mesh compatibility mode. + MeshCompatibilityModeKey = "mesh-compatibility-mode" - // FlushInterval controls the time when we flush the connection in the - // reverse proxies (Activator, QP). - // As of go1.16, a FlushInterval of 0 (the default) still flushes immediately - // when Content-Length is -1, which means the default works properly for - // streaming/websockets, without flushing more often than necessary for - // non-streaming requests. - FlushInterval = 0 + // NamespaceWildcardCertSelectorKey is the name of the configuration + // entry that specifies a LabelSelector to control which namespaces + // have a wildcard certificate provisioned for them. + NamespaceWildcardCertSelectorKey = "namespace-wildcard-cert-selector" - // VisibilityLabelKey is the label to indicate visibility of Route - // and KServices. It can be an annotation too but since users are - // already using labels for domain, it probably best to keep this - // consistent. - VisibilityLabelKey = "networking.knative.dev/visibility" + // RolloutDurationKey is the name of the configuration entry + // that specifies the default duration of the configuration rollout. + RolloutDurationKey = "rollout-duration" - // PassthroughLoadbalancingHeaderName is the name of the header that directs - // load balancers to not load balance the respective request but to - // send it to the request's target directly. - PassthroughLoadbalancingHeaderName = "K-Passthrough-Lb" + // TagTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // hostname for a Route's tag. + TagTemplateKey = "tag-template" - // EnableMeshPodAddressabilityKey is the config for enabling pod addressability in mesh. - EnableMeshPodAddressabilityKey = "enable-mesh-pod-addressability" + // InternalEncryptionKey is the name of the configuration whether + // internal traffic is encrypted or not. + InternalEncryptionKey = "internal-encryption" +) - // MeshCompatibilityModeKey is the config for selecting the mesh compatibility mode. - MeshCompatibilityModeKey = "mesh-compatibility-mode" +// HTTPProtocol indicates a type of HTTP endpoint behavior +// that Knative ingress could take. +type HTTPProtocol string - // DefaultExternalSchemeKey is the config for defining the scheme of external URLs. - DefaultExternalSchemeKey = "default-external-scheme" +const ( + // HTTPEnabled represents HTTP protocol is enabled in Knative ingress. + HTTPEnabled HTTPProtocol = "enabled" - // ActivatorCAKey is the config for the secret name, which stores CA public certificate used - // to sign the activator TLS certificate. - ActivatorCAKey = "activator-ca" + // HTTPDisabled represents HTTP protocol is disabled in Knative ingress. + HTTPDisabled HTTPProtocol = "disabled" - // ActivatorSANKey is the config for the SAN used to validate the activator TLS certificate. - ActivatorSANKey = "activator-san" + // HTTPRedirected represents HTTP connection is redirected to HTTPS in Knative ingress. + HTTPRedirected HTTPProtocol = "redirected" +) - // ActivatorCertKey is the config for the secret name, which stores certificates - // to serve the TLS traffic from ingress to activator. - ActivatorCertKey = "activator-cert-secret" +// MeshCompatibilityMode is one of enabled (always use ClusterIP), disabled +// (always use Pod IP), or auto (try PodIP, and fall back to ClusterIP if mesh +// is detected). +type MeshCompatibilityMode string - // QueueProxyCAKey is the config for the secret name, which stores CA public certificate used - // to sign the queue-proxy TLS certificate. - QueueProxyCAKey = "queue-proxy-ca" +const ( + // MeshCompatibilityModeEnabled instructs consumers of network plugins, such as + // Knative Serving, to use ClusterIP when connecting to pods. This is + // required when mesh is enabled (unless EnableMeshPodAddressability is set), + // but is less efficient. + MeshCompatibilityModeEnabled MeshCompatibilityMode = "enabled" - // QueueProxySANKey is the config for the SAN used to validate the queue-proxy TLS certificate. - QueueProxySANKey = "queue-proxy-san" + // MeshCompatibilityModeDisabled instructs consumers of network plugins, such as + // Knative Serving, to connect to individual Pod IPs. This is most efficient, + // but will only work with mesh enabled when EnableMeshPodAddressability is + // used. + MeshCompatibilityModeDisabled MeshCompatibilityMode = "disabled" - // QueueProxyCertKey is the config for the secret name, which stores certificates - // to serve the TLS traffic from activator to queue-proxy. - QueueProxyCertKey = "queue-proxy-cert-secret" + // MeshCompatibilityModeAuto instructs consumers of network plugins, such as + // Knative Serving, to heuristically determine whether to connect using the + // Cluster IP, or to ocnnect to individual Pod IPs. This is most efficient, + // determine whether mesh is enabled, and fall back from Direct Pod IP + // communication to Cluster IP as needed. + MeshCompatibilityModeAuto MeshCompatibilityMode = "auto" ) // DomainTemplateValues are the available properties people can choose from @@ -232,20 +187,6 @@ type TagTemplateValues struct { Tag string } -var ( - templateCache *lru.Cache - - // Verify the default templates are valid. - _ = template.Must(template.New("domain-template").Parse(DefaultDomainTemplate)) - _ = template.Must(template.New("tag-template").Parse(DefaultTagTemplate)) -) - -func init() { - // The only failure is due to negative size. - // Store ~10 latest templates per template type. - templateCache, _ = lru.New(10 * 2) -} - // Config contains the networking configuration defined in the // network config map. type Config struct { @@ -310,70 +251,10 @@ type Config struct { // not enabled. Defaults to "http". DefaultExternalScheme string - // ActivatorCA defines the secret name of the CA public certificate used to sign the activator TLS certificate. - // The traffic is not encrypted if ActivatorCA is empty. - ActivatorCA string - - // ActivatorSAN defines the SAN (Subject Alt Name) used to validate the activator TLS certificate. - // It is used only when ActivatorCA is specified. - ActivatorSAN string - - // ActivatorCertSecret defines the secret name of the server certificates to serve the TLS traffic from ingress to activator. - ActivatorCertSecret string - - // QueueProxyCA defines the secret name of the CA public certificate used to sign the queue-proxy TLS certificate. - // The traffic to queue-proxy is not encrypted if QueueProxyCA is empty. - QueueProxyCA string - - // QueueProxySAN defines the SAN (Subject Alt Name) used to validate the queue-proxy TLS certificate. - // It is used only when QueueProxyCA is specified. - QueueProxySAN string - - // QueueProxyCertSecret defines the secret name of the server certificates to serve the TLS traffic from activator to queue-proxy. - QueueProxyCertSecret string + // DefaultExternal specifies whether internal traffic is encrypted or not. + InternalEncryption bool } -// HTTPProtocol indicates a type of HTTP endpoint behavior -// that Knative ingress could take. -type HTTPProtocol string - -const ( - // HTTPEnabled represents HTTP protocol is enabled in Knative ingress. - HTTPEnabled HTTPProtocol = "enabled" - - // HTTPDisabled represents HTTP protocol is disabled in Knative ingress. - HTTPDisabled HTTPProtocol = "disabled" - - // HTTPRedirected represents HTTP connection is redirected to HTTPS in Knative ingress. - HTTPRedirected HTTPProtocol = "redirected" -) - -// MeshCompatibilityMode is one of enabled (always use ClusterIP), disabled -// (always use Pod IP), or auto (try PodIP, and fall back to ClusterIP if mesh -// is detected). -type MeshCompatibilityMode string - -const ( - // MeshCompatibilityModeEnabled instructs consumers of network plugins, such as - // Knative Serving, to use ClusterIP when connecting to pods. This is - // required when mesh is enabled (unless EnableMeshPodAddressability is set), - // but is less efficient. - MeshCompatibilityModeEnabled MeshCompatibilityMode = "enabled" - - // MeshCompatibilityModeDisabled instructs consumers of network plugins, such as - // Knative Serving, to connect to individual Pod IPs. This is most efficient, - // but will only work with mesh enabled when EnableMeshPodAddressability is - // used. - MeshCompatibilityModeDisabled MeshCompatibilityMode = "disabled" - - // MeshCompatibilityModeAuto instructs consumers of network plugins, such as - // Knative Serving, to heuristically determine whether to connect using the - // Cluster IP, or to ocnnect to individual Pod IPs. This is most efficient, - // determine whether mesh is enabled, and fall back from Direct Pod IP - // communication to Cluster IP as needed. - MeshCompatibilityModeAuto MeshCompatibilityMode = "auto" -) - func defaultConfig() *Config { return &Config{ DefaultIngressClass: IstioIngressClassName, @@ -386,20 +267,10 @@ func defaultConfig() *Config { AutocreateClusterDomainClaims: false, DefaultExternalScheme: "http", MeshCompatibilityMode: MeshCompatibilityModeAuto, - ActivatorCA: "", - ActivatorSAN: "", - ActivatorCertSecret: "", - QueueProxyCA: "", - QueueProxySAN: "", - QueueProxyCertSecret: "", + InternalEncryption: false, } } -// NewConfigFromConfigMap creates a Config from the supplied ConfigMap -func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { - return NewConfigFromMap(configMap.Data) -} - // NewConfigFromMap creates a Config from the supplied data. func NewConfigFromMap(data map[string]string) (*Config, error) { nc := defaultConfig() @@ -423,12 +294,7 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { cm.AsBool(AutocreateClusterDomainClaimsKey, &nc.AutocreateClusterDomainClaims), cm.AsBool(EnableMeshPodAddressabilityKey, &nc.EnableMeshPodAddressability), cm.AsString(DefaultExternalSchemeKey, &nc.DefaultExternalScheme), - cm.AsString(ActivatorCAKey, &nc.ActivatorCA), - cm.AsString(ActivatorSANKey, &nc.ActivatorSAN), - cm.AsString(ActivatorCertKey, &nc.ActivatorCertSecret), - cm.AsString(QueueProxyCAKey, &nc.QueueProxyCA), - cm.AsString(QueueProxySANKey, &nc.QueueProxySAN), - cm.AsString(QueueProxyCertKey, &nc.QueueProxyCertSecret), + cm.AsBool(InternalEncryptionKey, &nc.InternalEncryption), asMode(MeshCompatibilityModeKey, &nc.MeshCompatibilityMode), asLabelSelector(NamespaceWildcardCertSelectorKey, &nc.NamespaceWildcardCertSelector), ); err != nil { @@ -485,22 +351,6 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { return nil, fmt.Errorf("httpProtocol %s in config-network ConfigMap is not supported", data[HTTPProtocolKey]) } - if nc.ActivatorCA != "" && nc.ActivatorSAN == "" { - return nil, fmt.Errorf("%q must be set when %q was set", ActivatorSANKey, ActivatorCAKey) - } - - if nc.ActivatorCA == "" && nc.ActivatorSAN != "" { - return nil, fmt.Errorf("%q must be set when %q was set", ActivatorCAKey, ActivatorSANKey) - } - - if nc.QueueProxyCA != "" && nc.QueueProxySAN == "" { - return nil, fmt.Errorf("%q must be set when %q was set", QueueProxySANKey, QueueProxyCAKey) - } - - if nc.QueueProxyCA == "" && nc.QueueProxySAN != "" { - return nil, fmt.Errorf("%q must be set when %q was set", QueueProxyCAKey, QueueProxySANKey) - } - return nc, nil } @@ -571,82 +421,20 @@ func checkTagTemplate(t *template.Template) error { return t.Execute(ioutil.Discard, data) } -// IsKubeletProbe returns true if the request is a Kubernetes probe. -func IsKubeletProbe(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("User-Agent"), KubeProbeUAPrefix) || - r.Header.Get(KubeletProbeHeaderName) != "" -} - -// KnativeProbeHeader returns the value for key ProbeHeaderName in request headers. -func KnativeProbeHeader(r *http.Request) string { - return r.Header.Get(ProbeHeaderName) -} - -// KnativeProxyHeader returns the value for key ProxyHeaderName in request headers. -func KnativeProxyHeader(r *http.Request) string { - return r.Header.Get(ProxyHeaderName) -} - -// IsProbe returns true if the request is a Kubernetes probe or a Knative probe, -// i.e. non-empty ProbeHeaderName header. -func IsProbe(r *http.Request) bool { - return IsKubeletProbe(r) || KnativeProbeHeader(r) != "" -} - -// RewriteHostIn removes the `Host` header from the inbound (server) request -// and replaces it with our custom header. -// This is done to avoid Istio Host based routing, see #3870. -// Queue-Proxy will execute the reverse process. -func RewriteHostIn(r *http.Request) { - h := r.Host - r.Host = "" - r.Header.Del("Host") - // Don't overwrite an existing OriginalHostHeader. - if r.Header.Get(OriginalHostHeader) == "" { - r.Header.Set(OriginalHostHeader, h) - } -} - -// RewriteHostOut undoes the `RewriteHostIn` action. -// RewriteHostOut checks if network.OriginalHostHeader was set and if it was, -// then uses that as the r.Host (which takes priority over Request.Header["Host"]). -// If the request did not have the OriginalHostHeader header set, the request is untouched. -func RewriteHostOut(r *http.Request) { - if ohh := r.Header.Get(OriginalHostHeader); ohh != "" { - r.Host = ohh - r.Header.Del("Host") - r.Header.Del(OriginalHostHeader) - } -} - -// NameForPortNumber finds the name for a given port as defined by a Service. -func NameForPortNumber(svc *corev1.Service, portNumber int32) (string, error) { - for _, port := range svc.Spec.Ports { - if port.Port == portNumber { - return port.Name, nil - } - } - return "", fmt.Errorf("no port with number %d found", portNumber) -} - -// PortNumberForName resolves a given name to a portNumber as defined by an EndpointSubset. -func PortNumberForName(sub corev1.EndpointSubset, portName string) (int32, error) { - for _, subPort := range sub.Ports { - if subPort.Name == portName { - return subPort.Port, nil +// asLabelSelector returns a LabelSelector extracted from a given configmap key. +func asLabelSelector(key string, target **metav1.LabelSelector) cm.ParseFunc { + return func(data map[string]string) error { + if raw, ok := data[key]; ok { + if len(raw) > 0 { + var selector *metav1.LabelSelector + if err := yaml.Unmarshal([]byte(raw), &selector); err != nil { + return err + } + *target = selector + } } + return nil } - return 0, fmt.Errorf("no port for name %q found", portName) -} - -// IsPotentialMeshErrorResponse returns whether the HTTP response is compatible -// with having been caused by attempting direct connection when mesh was -// enabled. For example if we get a HTTP 404 status code it's safe to assume -// mesh is not enabled even if a probe was otherwise unsuccessful. This is -// useful to avoid falling back to ClusterIP when we see errors which are -// unrelated to mesh being enabled. -func IsPotentialMeshErrorResponse(resp *http.Response) bool { - return resp.StatusCode == http.StatusServiceUnavailable || resp.StatusCode == http.StatusBadGateway } // asMode parses the value at key as a MeshCompatibilityMode into the target, if it exists. @@ -663,19 +451,3 @@ func asMode(key string, target *MeshCompatibilityMode) cm.ParseFunc { return nil } } - -// asLabelSelector returns a LabelSelector extracted from a given configmap key. -func asLabelSelector(key string, target **metav1.LabelSelector) cm.ParseFunc { - return func(data map[string]string) error { - if raw, ok := data[key]; ok { - if len(raw) > 0 { - var selector *metav1.LabelSelector - if err := yaml.Unmarshal([]byte(raw), &selector); err != nil { - return err - } - *target = selector - } - } - return nil - } -} diff --git a/vendor/knative.dev/networking/pkg/doc.go b/vendor/knative.dev/networking/pkg/config/doc.go similarity index 93% rename from vendor/knative.dev/networking/pkg/doc.go rename to vendor/knative.dev/networking/pkg/config/doc.go index a0b73a258e..4d6130b243 100644 --- a/vendor/knative.dev/networking/pkg/doc.go +++ b/vendor/knative.dev/networking/pkg/config/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // Package network holds the typed objects that define the schemas for // configuring the knative/serving networking layer. -package pkg +package config diff --git a/vendor/knative.dev/networking/pkg/zz_generated.deepcopy.go b/vendor/knative.dev/networking/pkg/config/zz_generated.deepcopy.go similarity index 99% rename from vendor/knative.dev/networking/pkg/zz_generated.deepcopy.go rename to vendor/knative.dev/networking/pkg/config/zz_generated.deepcopy.go index 1d2fd66400..276a4b9f87 100644 --- a/vendor/knative.dev/networking/pkg/zz_generated.deepcopy.go +++ b/vendor/knative.dev/networking/pkg/config/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package pkg +package config import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/knative.dev/networking/pkg/deprecated_config.go b/vendor/knative.dev/networking/pkg/deprecated_config.go new file mode 100644 index 0000000000..50c322d457 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/deprecated_config.go @@ -0,0 +1,213 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + corev1 "k8s.io/api/core/v1" + "knative.dev/networking/pkg/config" +) + +const ( + // ConfigName is the name of the configmap containing all + // customizations for networking features. + // + // Deprecated: use knative.dev/networking/pkg/config.ConfigMapName + ConfigName = config.ConfigMapName + + // DefaultDomainTemplate is the default golang template to use when + // constructing the Knative Route's Domain(host) + // + // Deprecated: use knative.dev/networking/pkg/config.DefaultDomainTemplate + DefaultDomainTemplate = config.DefaultDomainTemplate + + // DefaultTagTemplate is the default golang template to use when + // constructing the Knative Route's tag names. + // + // Deprecated: use knative.dev/networking/pkg/config.DefaultTagTemplate + DefaultTagTemplate = config.DefaultTagTemplate + + // DefaultIngressClassKey is the name of the configuration entry + // that specifies the default Ingress. + // + // Deprecated: use knative.dev/networking/pkg/config.DefaultIngressClassKey + DefaultIngressClassKey = config.DefaultIngressClassKey + + // DefaultCertificateClassKey is the name of the configuration entry + // that specifies the default Certificate. + // + // Deprecated: use knative.dev/networking/pkg/config.DefaultCertificateClassKey + DefaultCertificateClassKey = config.DefaultCertificateClassKey + + // IstioIngressClassName value for specifying knative's Istio + // Ingress reconciler. + // + // Deprecated: use knative.dev/networking/pkg/config.IstioIngressClassName + IstioIngressClassName = config.IstioIngressClassName + + // CertManagerCertificateClassName value for specifying Knative's Cert-Manager + // Certificate reconciler. + // + // Deprecated: use knative.dev/networking/pkg/config.CertManagerCertificateClassName + CertManagerCertificateClassName = config.CertManagerCertificateClassName + + // DomainTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // Knative service's DNS name. + // + // Deprecated: use knative.dev/networking/pkg/config.DomainTemplateKey + DomainTemplateKey = config.DomainTemplateKey + + // TagTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // hostname for a Route's tag. + // + // Deprecated: use knative.dev/networking/pkg/config.TagTemplateKey + TagTemplateKey = config.TagTemplateKey + + // RolloutDurationKey is the name of the configuration entry + // that specifies the default duration of the configuration rollout. + // + // Deprecated: use knative.dev/networking/pkg/config.RolloutDurationKey + RolloutDurationKey = config.RolloutDurationKey + + // NamespaceWildcardCertSelectorKey is the name of the configuration + // entry that specifies a LabelSelector to control which namespaces + // have a wildcard certificate provisioned for them. + // + // Deprecated: use knative.dev/networking/pkg/config.NamespaceWildcardCertSelectorKey + NamespaceWildcardCertSelectorKey = config.NamespaceWildcardCertSelectorKey + + // AutocreateClusterDomainClaimsKey is the key for the + // AutocreateClusterDomainClaims property. + // + // Deprecated: use knative.dev/networking/pkg/config.AutocreateClusterDomainClaimsKey + AutocreateClusterDomainClaimsKey = config.AutocreateClusterDomainClaimsKey + + // AutoTLSKey is the name of the configuration entry + // that specifies enabling auto-TLS or not. + // + // Deprecated: use knative.dev/networking/pkg/config.AutoTLSKey + AutoTLSKey = config.AutoTLSKey + + // HTTPProtocolKey is the name of the configuration entry that + // specifies the HTTP endpoint behavior of Knative ingress. + // + // Deprecated: use knative.dev/networking/pkg/config.HTTPProtocolKey + HTTPProtocolKey = config.HTTPProtocolKey + + // EnableMeshPodAddressabilityKey is the config for enabling pod addressability in mesh. + // + // Deprecated: use knative.dev/networking/pkg/config.EnableMeshPodAddressabilityKey + EnableMeshPodAddressabilityKey = config.EnableMeshPodAddressabilityKey + + // MeshCompatibilityModeKey is the config for selecting the mesh compatibility mode. + // + // Deprecated: use knative.dev/networking/pkg/config.MeshCompatibilityModeKey + MeshCompatibilityModeKey = config.MeshCompatibilityModeKey + + // DefaultExternalSchemeKey is the config for defining the scheme of external URLs. + // + // Deprecated: use knative.dev/networking/pkg/config.DefaultExternalSchemeKey + DefaultExternalSchemeKey = config.DefaultExternalSchemeKey +) + +// DomainTemplateValues are the available properties people can choose from +// in their Route's "DomainTemplate" golang template sting. +// We could add more over time - e.g. RevisionName if we thought that +// might be of interest to people. +// +// Deprecated: use knative.dev/networking/pkg/config.DomainTemplateValues +type DomainTemplateValues = config.DomainTemplateValues + +// TagTemplateValues are the available properties people can choose from +// in their Route's "TagTemplate" golang template sting. +// +// Deprecated: use knative.dev/networking/pkg/config.TagTemplateValues +type TagTemplateValues = config.TagTemplateValues + +// Config contains the networking configuration defined in the +// network config map. +// +// Deprecated: use knative.dev/networking/pkg/config.Config +type Config = config.Config + +// HTTPProtocol indicates a type of HTTP endpoint behavior +// that Knative ingress could take. +// +// Deprecated: use knative.dev/networking/pkg/config.HTTPProtocol +type HTTPProtocol = config.HTTPProtocol + +const ( + // HTTPEnabled represents HTTP protocol is enabled in Knative ingress. + // + // Deprecated: use knative.dev/networking/pkg/config.HTTPEnabled + HTTPEnabled HTTPProtocol = config.HTTPEnabled + + // HTTPDisabled represents HTTP protocol is disabled in Knative ingress. + // + // Deprecated: use knative.dev/networking/pkg/config.HTTPDisabled + HTTPDisabled HTTPProtocol = config.HTTPDisabled + + // HTTPRedirected represents HTTP connection is redirected to HTTPS in Knative ingress. + // + // Deprecated: use knative.dev/networking/pkg/config.HTTPRedirected + HTTPRedirected HTTPProtocol = config.HTTPRedirected +) + +// MeshCompatibilityMode is one of enabled (always use ClusterIP), disabled +// (always use Pod IP), or auto (try PodIP, and fall back to ClusterIP if mesh +// is detected). +// +// Deprecated: use knative.dev/networking/pkg/config.MeshCompatibilityMode +type MeshCompatibilityMode = config.MeshCompatibilityMode + +const ( + // MeshCompatibilityModeEnabled instructs consumers of network plugins, such as + // Knative Serving, to use ClusterIP when connecting to pods. This is + // required when mesh is enabled (unless EnableMeshPodAddressability is set), + // but is less efficient. + // + // Deprecated: Use knative.dev/networking/pkg/config/MeshCompatibilityModeEnabled + MeshCompatibilityModeEnabled MeshCompatibilityMode = config.MeshCompatibilityModeEnabled + + // MeshCompatibilityModeDisabled instructs consumers of network plugins, such as + // Knative Serving, to connect to individual Pod IPs. This is most efficient, + // but will only work with mesh enabled when EnableMeshPodAddressability is + // used. + // + // Deprecated: Use knative.dev/networking/pkg/config/MeshCompatibilityModeDisabled + MeshCompatibilityModeDisabled MeshCompatibilityMode = config.MeshCompatibilityModeDisabled + + // MeshCompatibilityModeAuto instructs consumers of network plugins, such as + // Knative Serving, to heuristically determine whether to connect using the + // Cluster IP, or to ocnnect to individual Pod IPs. This is most efficient, + // determine whether mesh is enabled, and fall back from Direct Pod IP + // communication to Cluster IP as needed. + // + // Deprecated: Use knative.dev/networking/pkg/config/MeshCompatibilityModeAuto + MeshCompatibilityModeAuto MeshCompatibilityMode = config.MeshCompatibilityModeAuto +) + +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap +func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { + return NewConfigFromMap(configMap.Data) +} + +// NewConfigFromMap creates a Config from the supplied data. +// +// Deprecated: Use knative.dev/networking/pkg/config/NewConfigFromMap +var NewConfigFromMap = config.NewConfigFromMap diff --git a/vendor/knative.dev/networking/pkg/deprecated_header.go b/vendor/knative.dev/networking/pkg/deprecated_header.go new file mode 100644 index 0000000000..c2e98c2c31 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/deprecated_header.go @@ -0,0 +1,167 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "knative.dev/networking/pkg/http/header" +) + +const ( + // ProbeHeaderName is the name of a header that can be added to + // requests to probe the knative networking layer. Requests + // with this header will not be passed to the user container or + // included in request metrics. + // + // Deprecated: use knative.dev/networking/pkg/http/header.ProbeKey + ProbeHeaderName = header.ProbeKey + + // ProxyHeaderName is the name of an internal header that activator + // uses to mark requests going through it. + // + // Deprecated: use knative.dev/networking/pkg/http/header.ProxyKey + ProxyHeaderName = header.ProxyKey + + // HashHeaderName is the name of an internal header that Ingress controller + // uses to find out which version of the networking config is deployed. + // + // Deprecated: use knative.dev/networking/pkg/http/header.HashKey + HashHeaderName = header.HashKey + + // HashHeaderValue is the value that must appear in the HashHeaderName + // header in order for our network hash to be injected. + // + // Deprecated: use knative.dev/networking/pkg/http/header.HashValueOverride + HashHeaderValue = header.HashValueOverride + + // OriginalHostHeader is used to avoid Istio host based routing rules + // in Activator. + // The header contains the original Host value that can be rewritten + // at the Queue proxy level back to be a host header. + // + // Deprecated: use knative.dev/networking/pkg/http/header.OriginalHostKey + OriginalHostHeader = header.OriginalHostKey + + // KubeProbeUAPrefix is the user agent prefix of the probe. + // Since K8s 1.8, prober requests have + // User-Agent = "kube-probe/{major-version}.{minor-version}". + // + // Deprecated: use knative.dev/networking/pkg/http/header.KubeProbeUAPrefix + KubeProbeUAPrefix = header.KubeProbeUAPrefix + + // KubeletProbeHeaderName is the name of the header supplied by kubelet + // probes. Istio with mTLS rewrites probes, but their probes pass a + // different user-agent. So we augment the probes with this header. + // + // Deprecated: use knative.dev/networking/pkg/http/header.KubeletProbeKey + KubeletProbeHeaderName = header.KubeletProbeKey + + // UserAgentKey is the constant for header "User-Agent". + // + // Deprecated: use knative.dev/networking/pkg/http/header.UserAgentKey + UserAgentKey = header.UserAgentKey + + // ActivatorUserAgent is the user-agent header value set in probe requests sent + // from activator. + // + // Deprecated: use knative.dev/networking/pkg/http/header.ActivatorUserAgent + ActivatorUserAgent = header.ActivatorUserAgent + + // QueueProxyUserAgent is the user-agent header value set in probe requests sent + // from queue-proxy. + // + // Deprecated: use knative.dev/networking/pkg/http/header.QueueProxyUserAgent + QueueProxyUserAgent = header.QueueProxyUserAgent + + // IngressReadinessUserAgent is the user-agent header value + // set in probe requests for Ingress status. + // + // Deprecated: use knative.dev/networking/pkg/http/header.IngressReadinessUserAgent + IngressReadinessUserAgent = header.IngressReadinessUserAgent + + // AutoscalingUserAgent is the user-agent header value set in probe + // requests sent by autoscaling implementations. + // + // Deprecated: use knative.dev/networking/pkg/http/header.AutoscalingUserAgent + AutoscalingUserAgent = header.AutoscalingUserAgent + + // TagHeaderName is the name of the header entry which has a tag name as value. + // The tag name specifies which route was expected to be chosen by Ingress. + // + // Deprecated: use knative.dev/networking/pkg/http/header.RouteTagKey + TagHeaderName = header.RouteTagKey + + // DefaultRouteHeaderName is the name of the header entry + // identifying whether a request is routed via the default route or not. + // It has one of the string value "true" or "false". + // + // Deprecated: use knative.dev/networking/pkg/http/header.DefaultRouteKey + DefaultRouteHeaderName = header.DefaultRouteKey + + // PassthroughLoadbalancingHeaderName is the name of the header that directs + // load balancers to not load balance the respective request but to + // send it to the request's target directly. + // + // Deprecated: use knative.dev/networking/pkg/http/header.PassthroughLoadbalancingKey + PassthroughLoadbalancingHeaderName = header.PassthroughLoadbalancingKey + + // ProtoAcceptContent is the content type to be used when autoscaler scrapes metrics from the QP + // + // Deprecated: use knative.dev/networking/pkg/http/header.ProtobufMIMEType + ProtoAcceptContent = header.ProtobufMIMEType + + // ProbeHeaderValue is the value used in 'K-Network-Probe' + // + // Deprecated: use knative.dev/networking/pkg/http/header.ProbeValue + ProbeHeaderValue = header.ProbeValue +) + +// IsKubeletProbe returns true if the request is a Kubernetes probe. +// +// Deprecated: use knative.dev/networking/pkg/http/header.IsKubeletProbe +var IsKubeletProbe = header.IsKubeletProbe + +// KnativeProbeHeader returns the value for key ProbeHeaderName in request headers. +// +// Deprecated: use knative.dev/networking/pkg/http/header.GetKnativeProbeValue +var KnativeProbeHeader = header.GetKnativeProbeValue + +// KnativeProxyHeader returns the value for key ProxyHeaderName in request headers. +// +// Deprecated: use knative.dev/networking/pkg/http/header.GetKnativeProxyValue +var KnativeProxyHeader = header.GetKnativeProxyValue + +// IsProbe returns true if the request is a Kubernetes probe or a Knative probe, +// i.e. non-empty ProbeHeaderName header. +// +// Deprecated: use knative.dev/networking/pkg/http/header.IsProbe +var IsProbe = header.IsProbe + +// RewriteHostIn removes the `Host` header from the inbound (server) request +// and replaces it with our custom header. +// This is done to avoid Istio Host based routing, see #3870. +// Queue-Proxy will execute the reverse process. +// +// Deprecated: use knative.dev/networking/pkg/http/header.RewriteHostIn +var RewriteHostIn = header.RewriteHostIn + +// RewriteHostOut undoes the `RewriteHostIn` action. +// RewriteHostOut checks if network.OriginalHostHeader was set and if it was, +// then uses that as the r.Host (which takes priority over Request.Header["Host"]). +// If the request did not have the OriginalHostHeader header set, the request is untouched. +// +// Deprecated: use knative.dev/networking/pkg/http/header.RewriteHostOut +var RewriteHostOut = header.RewriteHostOut diff --git a/vendor/knative.dev/networking/pkg/deprecated_http.go b/vendor/knative.dev/networking/pkg/deprecated_http.go new file mode 100644 index 0000000000..ad40005266 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/deprecated_http.go @@ -0,0 +1,116 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "knative.dev/networking/pkg/http" + "knative.dev/networking/pkg/http/probe" + "knative.dev/networking/pkg/http/proxy" + "knative.dev/networking/pkg/http/stats" +) + +const ( + // ProbePath is the name of a path that activator, autoscaler and + // prober(used by KIngress generally) use for health check. + // + // Deprecated: use knative.dev/networking/pkg/http.HealthCheckPath + ProbePath = http.HealthCheckPath + + // FlushInterval controls the time when we flush the connection in the + // reverse proxies (Activator, QP). + // As of go1.16, a FlushInterval of 0 (the default) still flushes immediately + // when Content-Length is -1, which means the default works properly for + // streaming/websockets, without flushing more often than necessary for + // non-streaming requests. + // + // Deprecated: use knative.dev/networking/pkg/http/proxy.FlushInterval + FlushInterval = proxy.FlushInterval +) + +type ( + // ReqEvent represents either an incoming or closed request. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ReqEvent + ReqEvent = stats.ReqEvent + + // ReqEventType denotes the type (incoming/closed) of a ReqEvent. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ReqEventType + ReqEventType = stats.ReqEventType + + // RequestStats collects statistics about requests as they flow in and out of the system. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.RequestStats + RequestStats = stats.RequestStats + + // RequestStatsReport are the metrics reported from the the request stats collector + // at a given time. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.RequestStatsReport + RequestStatsReport = stats.RequestStatsReport +) + +const ( + // ReqIn represents an incoming request + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ReqIn + ReqIn = stats.ReqIn + + // ReqOut represents a finished request + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ReqOut + ReqOut = stats.ReqOut + + // ProxiedIn represents an incoming request through a proxy. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ProxiedIn + ProxiedIn = stats.ProxiedIn + + // ProxiedOut represents a finished proxied request. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.ProxiedOut + ProxiedOut = stats.ProxiedOut +) + +var ( + // NewRequestStats builds a RequestStats instance, started at the given time. + // + // Deprecated: use knative.dev/networking/pkg/http/stats.NewRequestStats + NewRequestStats = stats.NewRequestStats + + // NewBufferPool creates a new BufferPool. This is only safe to use in the context + // of a httputil.ReverseProxy, as the buffers returned via Put are not cleaned + // explicitly. + // + // Deprecated: use knative.dev/networking/pkg/http/proxy.NewBufferPool + NewBufferPool = proxy.NewBufferPool + + // NewProbeHandler wraps a HTTP handler handling probing requests around the provided HTTP handler + // + // Deprecated: use knative.dev/networking/pkg/http/probe.NewHandler + NewProbeHandler = probe.NewHandler + + // IsPotentialMeshErrorResponse returns whether the HTTP response is compatible + // with having been caused by attempting direct connection when mesh was + // enabled. For example if we get a HTTP 404 status code it's safe to assume + // mesh is not enabled even if a probe was otherwise unsuccessful. This is + // useful to avoid falling back to ClusterIP when we see errors which are + // unrelated to mesh being enabled. + // + // Deprecated: use knative.dev/networking/pkg/http.IsPotentialMeshErrorResponse + IsPotentialMeshErrorResponse = http.IsPotentialMeshErrorResponse +) diff --git a/vendor/knative.dev/networking/pkg/deprecated_labels.go b/vendor/knative.dev/networking/pkg/deprecated_labels.go new file mode 100644 index 0000000000..0a70ca18b3 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/deprecated_labels.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import "knative.dev/networking/pkg/apis/networking" + +const ( + // VisibilityLabelKey is the label to indicate visibility of Route + // and KServices. It can be an annotation too but since users are + // already using labels for domain, it probably best to keep this + // consistent. + // + // Deprecated: use knative.dev/networking/pkg/apis/networking.VisibilityLabelKey + VisibilityLabelKey = networking.VisibilityLabelKey +) diff --git a/vendor/knative.dev/networking/pkg/deprecated_port.go b/vendor/knative.dev/networking/pkg/deprecated_port.go new file mode 100644 index 0000000000..8b218289ce --- /dev/null +++ b/vendor/knative.dev/networking/pkg/deprecated_port.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import "knative.dev/networking/pkg/k8s" + +// NameForPortNumber finds the name for a given port as defined by a Service. +// +// Deprecated: use knative.dev/networking/pkg/k8s.NameForPortNumber +var NameForPortNumber = k8s.NameForPortNumber + +// PortNumberForName resolves a given name to a portNumber as defined by an EndpointSubset. +// +// Deprecated: use knative.dev/networking/pkg/k8s.PortNumberForName +var PortNumberForName = k8s.PortNumberForName diff --git a/vendor/knative.dev/networking/pkg/http/constants.go b/vendor/knative.dev/networking/pkg/http/constants.go new file mode 100644 index 0000000000..4012153de2 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/http/constants.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +const ( + // HealthCheckPath is the name of a path that activator, autoscaler and + // prober(used by KIngress generally) use for health check. + HealthCheckPath = "/healthz" +) diff --git a/vendor/knative.dev/networking/pkg/http/error.go b/vendor/knative.dev/networking/pkg/http/error.go new file mode 100644 index 0000000000..93878f3307 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/http/error.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import "net/http" + +// IsPotentialMeshErrorResponse returns whether the HTTP response is compatible +// with having been caused by attempting direct connection when mesh was +// enabled. For example if we get a HTTP 404 status code it's safe to assume +// mesh is not enabled even if a probe was otherwise unsuccessful. This is +// useful to avoid falling back to ClusterIP when we see errors which are +// unrelated to mesh being enabled. +func IsPotentialMeshErrorResponse(resp *http.Response) bool { + return resp.StatusCode == http.StatusServiceUnavailable || resp.StatusCode == http.StatusBadGateway +} diff --git a/vendor/knative.dev/networking/pkg/http/header/header.go b/vendor/knative.dev/networking/pkg/http/header/header.go new file mode 100644 index 0000000000..158fb06200 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/http/header/header.go @@ -0,0 +1,157 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package header + +import ( + "net/http" + "strings" +) + +// HashKey & Values +const ( + // HashKey is the name of an internal header that Ingress controller + // uses to find out which version of the networking config is deployed. + HashKey = "K-Network-Hash" + + // HashValueOverride is the value that must appear in the HashHeaderKey + // header in order for our network hash to be injected. + HashValueOverride = "override" +) + +// ProbeKey & Values +const ( + // ProbeKey is the name of a header that can be added to + // requests to probe the knative networking layer. Requests + // with this header will not be passed to the user container or + // included in request metrics. + ProbeKey = "K-Network-Probe" + + // ProbeValue is the value used in 'K-Network-Probe' + ProbeValue = "probe" +) + +const ( + // ProxyKey is the name of an internal header that activator + // uses to mark requests going through it. + ProxyKey = "K-Proxy-Request" + + // OriginalHostKey is used to avoid Istio host based routing rules + // in Activator. + // The header contains the original Host value that can be rewritten + // at the Queue proxy level back to be a host header. + OriginalHostKey = "K-Original-Host" + + // KubeletProbeKey is the name of the header supplied by kubelet + // probes. Istio with mTLS rewrites probes, but their probes pass a + // different user-agent. So we augment the probes with this header. + KubeletProbeKey = "K-Kubelet-Probe" + + // RouteTagKey is the name of the header entry which has a tag name as value. + // The tag name specifies which route was expected to be chosen by Ingress. + RouteTagKey = "Knative-Serving-Tag" + + // DefaultRouteKey is the name of the header entry + // identifying whether a request is routed via the default route or not. + // It has one of the string value "true" or "false". + DefaultRouteKey = "Knative-Serving-Default-Route" + + // PassthroughLoadbalancingKey is the name of the header that directs + // load balancers to not load balance the respective request but to + // send it to the request's target directly. + PassthroughLoadbalancingKey = "K-Passthrough-Lb" +) + +// User Agent Key & Values +const ( + // UserAgentKey is the constant for header "User-Agent". + UserAgentKey = "User-Agent" + + // KubeProbeUAPrefix is the user agent prefix of the probe. + // Since K8s 1.8, prober requests have + // User-Agent = "kube-probe/{major-version}.{minor-version}". + KubeProbeUAPrefix = "kube-probe/" + + // ActivatorUserAgent is the user-agent header value set in probe requests sent + // from activator. + ActivatorUserAgent = "Knative-Activator-Probe" + + // QueueProxyUserAgent is the user-agent header value set in probe requests sent + // from queue-proxy. + QueueProxyUserAgent = "Knative-Queue-Proxy-Probe" + + // IngressReadinessUserAgent is the user-agent header value + // set in probe requests for Ingress status. + IngressReadinessUserAgent = "Knative-Ingress-Probe" + + // AutoscalingUserAgent is the user-agent header value set in probe + // requests sent by autoscaling implementations. + AutoscalingUserAgent = "Knative-Autoscaling-Probe" +) + +// Accept Content Values +const ( + // ProtobufMIMEType is a content type to be used when autoscaler scrapes metrics from the QP + ProtobufMIMEType = "application/protobuf" +) + +// KnativeProbeHeader returns the value for key ProbeHeaderName in request headers. +func GetKnativeProbeValue(r *http.Request) string { + return r.Header.Get(ProbeKey) +} + +// KnativeProxyHeader returns the value for key ProxyHeaderName in request headers. +func GetKnativeProxyValue(r *http.Request) string { + return r.Header.Get(ProxyKey) +} + +// IsProbe returns true if the request is a Kubernetes probe or a Knative probe, +// i.e. non-empty ProbeHeaderName header. +func IsProbe(r *http.Request) bool { + return IsKubeletProbe(r) || GetKnativeProbeValue(r) != "" +} + +// IsKubeletProbe returns true if the request is a Kubernetes probe. +func IsKubeletProbe(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("User-Agent"), KubeProbeUAPrefix) || + r.Header.Get(KubeletProbeKey) != "" +} + +// RewriteHostIn removes the `Host` header from the inbound (server) request +// and replaces it with our custom header. +// This is done to avoid Istio Host based routing, see #3870. +// Queue-Proxy will execute the reverse process. +func RewriteHostIn(r *http.Request) { + h := r.Host + r.Host = "" + r.Header.Del("Host") + // Don't overwrite an existing OriginalHostHeader. + if r.Header.Get(OriginalHostKey) == "" { + r.Header.Set(OriginalHostKey, h) + } +} + +// RewriteHostOut undoes the `RewriteHostIn` action. +// RewriteHostOut checks if network.OriginalHostHeader was set and if it was, +// then uses that as the r.Host (which takes priority over Request.Header["Host"]). +// If the request did not have the OriginalHostHeader header set, the request is untouched. +func RewriteHostOut(r *http.Request) { + if ohh := r.Header.Get(OriginalHostKey); ohh != "" { + r.Host = ohh + r.Header.Del("Host") + r.Header.Del(OriginalHostKey) + } +} diff --git a/vendor/knative.dev/networking/pkg/probe_handler.go b/vendor/knative.dev/networking/pkg/http/probe/handler.go similarity index 63% rename from vendor/knative.dev/networking/pkg/probe_handler.go rename to vendor/knative.dev/networking/pkg/http/probe/handler.go index 3283ef1976..c1d86b93af 100644 --- a/vendor/knative.dev/networking/pkg/probe_handler.go +++ b/vendor/knative.dev/networking/pkg/http/probe/handler.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,39 +14,38 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pkg +package probe import ( "fmt" "net/http" -) -// ProbeHeaderValue is the value used in 'K-Network-Probe' -var ProbeHeaderValue = "probe" + "knative.dev/networking/pkg/http/header" +) type handler struct { next http.Handler } -// NewProbeHandler wraps a HTTP handler handling probing requests around the provided HTTP handler -func NewProbeHandler(next http.Handler) http.Handler { +// NewHandler wraps a HTTP handler handling probing requests around the provided HTTP handler +func NewHandler(next http.Handler) http.Handler { return &handler{next: next} } // ServeHTTP handles probing requests func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if ph := r.Header.Get(ProbeHeaderName); ph != ProbeHeaderValue { - r.Header.Del(HashHeaderName) + if ph := r.Header.Get(header.ProbeKey); ph != header.ProbeValue { + r.Header.Del(header.HashKey) h.next.ServeHTTP(w, r) return } - hh := r.Header.Get(HashHeaderName) + hh := r.Header.Get(header.HashKey) if hh == "" { - http.Error(w, fmt.Sprintf("a probe request must contain a non-empty %q header", HashHeaderName), http.StatusBadRequest) + http.Error(w, fmt.Sprintf("a probe request must contain a non-empty %q header", header.HashKey), http.StatusBadRequest) return } - w.Header().Set(HashHeaderName, hh) + w.Header().Set(header.HashKey, hh) w.WriteHeader(http.StatusOK) } diff --git a/vendor/knative.dev/networking/pkg/bufferpool.go b/vendor/knative.dev/networking/pkg/http/proxy/bufferpool.go similarity index 96% rename from vendor/knative.dev/networking/pkg/bufferpool.go rename to vendor/knative.dev/networking/pkg/http/proxy/bufferpool.go index de257f059f..80d81f5dae 100644 --- a/vendor/knative.dev/networking/pkg/bufferpool.go +++ b/vendor/knative.dev/networking/pkg/http/proxy/bufferpool.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pkg +package proxy import ( "net/http/httputil" diff --git a/vendor/knative.dev/networking/pkg/http/proxy/constants.go b/vendor/knative.dev/networking/pkg/http/proxy/constants.go new file mode 100644 index 0000000000..8d43e4425f --- /dev/null +++ b/vendor/knative.dev/networking/pkg/http/proxy/constants.go @@ -0,0 +1,28 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +const ( + + // FlushInterval controls the time when we flush the connection in the + // reverse proxies (Activator, QP). + // As of go1.16, a FlushInterval of 0 (the default) still flushes immediately + // when Content-Length is -1, which means the default works properly for + // streaming/websockets, without flushing more often than necessary for + // non-streaming requests. + FlushInterval = 0 +) diff --git a/vendor/knative.dev/networking/pkg/stats.go b/vendor/knative.dev/networking/pkg/http/stats/request.go similarity index 97% rename from vendor/knative.dev/networking/pkg/stats.go rename to vendor/knative.dev/networking/pkg/http/stats/request.go index a42515ffc6..0fee36177f 100644 --- a/vendor/knative.dev/networking/pkg/stats.go +++ b/vendor/knative.dev/networking/pkg/http/stats/request.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package pkg +package stats import ( "sync" @@ -24,7 +24,6 @@ import ( ) // ReqEvent represents either an incoming or closed request. -// +k8s:deepcopy-gen=false type ReqEvent struct { // Time is the time the request event happened. Time time.Time @@ -55,7 +54,6 @@ func NewRequestStats(startedAt time.Time) *RequestStats { } // RequestStats collects statistics about requests as they flow in and out of the system. -// +k8s:deepcopy-gen=false type RequestStats struct { mux sync.Mutex @@ -72,7 +70,6 @@ type RequestStats struct { // RequestStatsReport are the metrics reported from the the request stats collector // at a given time. -// +k8s:deepcopy-gen=false type RequestStatsReport struct { // AverageConcurrency is the average concurrency over the reporting timeframe. // This is calculated via the utilization at a given concurrency. For example: diff --git a/vendor/knative.dev/networking/pkg/k8s/ports.go b/vendor/knative.dev/networking/pkg/k8s/ports.go new file mode 100644 index 0000000000..265a6b86bd --- /dev/null +++ b/vendor/knative.dev/networking/pkg/k8s/ports.go @@ -0,0 +1,43 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +// NameForPortNumber finds the name for a given port as defined by a Service. +func NameForPortNumber(svc *corev1.Service, portNumber int32) (string, error) { + for _, port := range svc.Spec.Ports { + if port.Port == portNumber { + return port.Name, nil + } + } + return "", fmt.Errorf("no port with number %d found", portNumber) +} + +// PortNumberForName resolves a given name to a portNumber as defined by an EndpointSubset. +func PortNumberForName(sub corev1.EndpointSubset, portName string) (int32, error) { + for _, subPort := range sub.Ports { + if subPort.Name == portName { + return subPort.Port, nil + } + } + return 0, fmt.Errorf("no port for name %q found", portName) +} diff --git a/vendor/knative.dev/pkg/apis/contexts.go b/vendor/knative.dev/pkg/apis/contexts.go index b9835fc00c..a3550dcebc 100644 --- a/vendor/knative.dev/pkg/apis/contexts.go +++ b/vendor/knative.dev/pkg/apis/contexts.go @@ -85,14 +85,21 @@ func IsInUpdate(ctx context.Context) bool { return ctx.Value(inUpdateKey{}) != nil } -// IsInStatusUpdate checks whether the context is an Update. -func IsInStatusUpdate(ctx context.Context) bool { +// GetUpdatedSubresource returns the subresource being updated or "" if there +// is no subresource that's being updated. Examples are "status" for Status +// updates, or "scale" for scaling Deployment. +func GetUpdatedSubresource(ctx context.Context) string { value := ctx.Value(inUpdateKey{}) if value == nil { - return false + return "" } up := value.(*updatePayload) - return up.subresource == "status" + return up.subresource +} + +// IsInStatusUpdate checks whether the context is an Update. +func IsInStatusUpdate(ctx context.Context) bool { + return GetUpdatedSubresource(ctx) == "status" } // GetBaseline returns the baseline of the update, or nil when we diff --git a/vendor/knative.dev/pkg/apis/deprecated.go b/vendor/knative.dev/pkg/apis/deprecated.go index fb73306faa..8f07e71b31 100644 --- a/vendor/knative.dev/pkg/apis/deprecated.go +++ b/vendor/knative.dev/pkg/apis/deprecated.go @@ -36,12 +36,15 @@ func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError { // CheckDeprecatedUpdate checks whether the provided named deprecated fields // are set in a context where deprecation is disallowed. // This is a json shallow check. We will recursively check inlined structs. -func CheckDeprecatedUpdate(ctx context.Context, obj, original interface{}) *FieldError { +func CheckDeprecatedUpdate(ctx context.Context, obj, original interface{}) (errs *FieldError) { if IsDeprecatedAllowed(ctx) { + // TODO: We should still run through the validation here, but do + // something like: + // defer func() { + // errs = errs.At(WarningLevel) + // }() return nil } - - var errs *FieldError objFields, objInlined := getPrefixedNamedFieldValues(deprecatedPrefix, obj) if nonZero(reflect.ValueOf(original)) { diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go index dcde449401..96638e5799 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go index 838f15a171..c845923b19 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go index 2e7d702c45..743081b012 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/apis/field_error.go b/vendor/knative.dev/pkg/apis/field_error.go index 5ea51c0e47..ddd4838af2 100644 --- a/vendor/knative.dev/pkg/apis/field_error.go +++ b/vendor/knative.dev/pkg/apis/field_error.go @@ -28,18 +28,54 @@ import ( // a problem with the current field itself. const CurrentField = "" +// DiagnosticLevel is used to signal the severity of a particular diagnostic +// in the form of a FieldError. +type DiagnosticLevel int + +const ( + // ErrorLevel is used to signify fatal/blocking diagnostics, e.g. those + // that should block admission in a validating admission webhook. + ErrorLevel DiagnosticLevel = iota + + // WarningLevel is used to signify information/non-blocking diagnostics, + // e.g. those that should be surfaced as warnings in a validating admission + // webhook. + WarningLevel +) + +func (dl DiagnosticLevel) String() string { + switch dl { + case ErrorLevel: + return "Error" + case WarningLevel: + return "Warning" + + default: + return fmt.Sprintf("", dl) + } +} + // FieldError is used to propagate the context of errors pertaining to // specific fields in a manner suitable for use in a recursive walk, so // that errors contain the appropriate field context. // FieldError methods are non-mutating. // +k8s:deepcopy-gen=true type FieldError struct { + // Message holds the main diagnostic message carried by this FieldError Message string - Paths []string + + // Paths holds a list of paths to which this diagnostic pertains + Paths []string + + // Level holds the severity of the diagnostic. + // If empty, this defaults to ErrorLevel. + Level DiagnosticLevel + // Details contains an optional longer payload. // +optional Details string - errors []FieldError + + errors []FieldError } // FieldError implements error @@ -60,6 +96,7 @@ func (fe *FieldError) ViaField(prefix ...string) *FieldError { // along using .Also(). newErr := &FieldError{ Message: fe.Message, + Level: fe.Level, Details: fe.Details, } @@ -107,6 +144,54 @@ func (fe *FieldError) ViaFieldKey(field, key string) *FieldError { return fe.ViaKey(key).ViaField(field) } +// At is a way to alter the level of the diagnostics held in this FieldError. +// ErrMissingField("foo").At(WarningLevel) +func (fe *FieldError) At(l DiagnosticLevel) *FieldError { + if fe == nil { + return nil + } + // Copy over message and details, paths will be updated and errors come + // along using .Also(). + newErr := &FieldError{ + Message: fe.Message, + Level: l, + Details: fe.Details, + Paths: fe.Paths, + } + + for _, e := range fe.errors { + newErr = newErr.Also(e.At(l)) + } + return newErr +} + +// Filter is a way to access the set of diagnostics having a particular level. +// if err := x.Validate(ctx).Filter(ErrorLevel); err != nil { +// return err +// } +func (fe *FieldError) Filter(l DiagnosticLevel) *FieldError { + if fe == nil { + return nil + } + var newErr *FieldError + if l == fe.Level { + newErr = &FieldError{ + Message: fe.Message, + Level: fe.Level, + Details: fe.Details, + Paths: fe.Paths, + } + } + + for _, e := range fe.errors { + newErr = newErr.Also(e.Filter(l)) + } + if newErr.isEmpty() { + return nil + } + return newErr +} + // Also collects errors, returns a new collection of existing errors and new errors. func (fe *FieldError) Also(errs ...*FieldError) *FieldError { // Avoid doing any work, if we don't have to. @@ -154,6 +239,7 @@ func (fe *FieldError) normalized() []*FieldError { if fe.Message != "" { errors = append(errors, &FieldError{ Message: fe.Message, + Level: fe.Level, Paths: fe.Paths, Details: fe.Details, }) @@ -274,6 +360,9 @@ func merge(errs []*FieldError) []*FieldError { // Sort the flattened map. sort.Slice(newErrs, func(i, j int) bool { if newErrs[i].Message == newErrs[j].Message { + if newErrs[i].Details == newErrs[j].Details { + return newErrs[i].Level < newErrs[j].Level + } return newErrs[i].Details < newErrs[j].Details } return newErrs[i].Message < newErrs[j].Message @@ -285,7 +374,7 @@ func merge(errs []*FieldError) []*FieldError { // key returns the key using the fields .Message and .Details. func key(err *FieldError) string { - return fmt.Sprintf("%s-%s", err.Message, err.Details) + return fmt.Sprintf("%s-%s-%s", err.Level, err.Message, err.Details) } // Public helpers --- diff --git a/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go index 512ebea240..367d7941f8 100644 --- a/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/changeset/commit.go b/vendor/knative.dev/pkg/changeset/commit.go index 3668dfebd1..da38bd330d 100644 --- a/vendor/knative.dev/pkg/changeset/commit.go +++ b/vendor/knative.dev/pkg/changeset/commit.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,113 +17,63 @@ limitations under the License. package changeset import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path/filepath" "regexp" - "strings" + "runtime/debug" + "strconv" + "sync" ) -const ( - commitIDFile = "HEAD" - koDataPathEnvName = "KO_DATA_PATH" - // packedRefsFile is a file containing a list of refs, used to compact the - // list of refs instead of storing them on the filesystem directly. - // See https://git-scm.com/docs/git-pack-refs - packedRefsFile = "packed-refs" +const Unknown = "unknown" + +var ( + shaRegexp = regexp.MustCompile(`^[a-f0-9]{40,64}$`) + rev string + once sync.Once + + readBuildInfo = debug.ReadBuildInfo ) -var commitIDRE = regexp.MustCompile(`^[a-f0-9]{40}$`) +// Get returns the 'vcs.revision' property from the embedded build information +// If there is no embedded information 'unknown' will be returned +// +// The result will have a '-dirty' suffix if the workspace was not clean +func Get() string { + once.Do(func() { + rev = get() + }) + + return rev +} -// Get tries to fetch the first 7 digitals of GitHub commit ID from HEAD file in -// KO_DATA_PATH. If it fails, it returns the error it gets. -func Get() (string, error) { - data, err := readFileFromKoData(commitIDFile) - if err != nil { - return "", err +func get() string { + info, ok := readBuildInfo() + if !ok { + return Unknown } - commitID := strings.TrimSpace(string(data)) - if rID := strings.TrimPrefix(commitID, "ref: "); rID != commitID { - // First try to read from the direct ref file - e.g. refs/heads/main - data, err := readFileFromKoData(rID) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - - // Ref file didn't exist - it might be contained in the packed-refs - // file. - var pferr error - data, pferr = findPackedRef(rID) - // Only return the sub-error if the packed-refs file exists, otherwise - // just let the original error return (e.g. treat it as if we didn't - // even attempt the operation). This is primarily to keep the error - // messages clean. - if pferr != nil { - if os.IsNotExist(pferr) { - return "", err - } - return "", pferr - } + + var revision string + var modified bool + + for _, s := range info.Settings { + switch s.Key { + case "vcs.revision": + revision = s.Value + case "vcs.modified": + modified, _ = strconv.ParseBool(s.Value) } - commitID = strings.TrimSpace(string(data)) - } - if commitIDRE.MatchString(commitID) { - return commitID[:7], nil } - return "", fmt.Errorf("%q is not a valid GitHub commit ID", commitID) -} -// readFileFromKoData tries to read data as string from the file with given name -// under KO_DATA_PATH then returns the content as string. The file is expected -// to be wrapped into the container from /kodata by ko. If it fails, returns -// the error it gets. -func readFileFromKoData(filename string) ([]byte, error) { - f, err := koDataFile(filename) - if err != nil { - return nil, err + if revision == "" { + return Unknown } - defer f.Close() - return ioutil.ReadAll(f) -} -// readFileFromKoData tries to open the file with given name under KO_DATA_PATH. -// The file is expected to be wrapped into the container from /kodata by ko. -// If it fails, returns the error it gets. -func koDataFile(filename string) (*os.File, error) { - koDataPath := os.Getenv(koDataPathEnvName) - if koDataPath == "" { - return nil, fmt.Errorf("%q does not exist or is empty", koDataPathEnvName) + if shaRegexp.MatchString(revision) { + revision = revision[:7] } - return os.Open(filepath.Join(koDataPath, filename)) -} -// findPackedRef searches the packed-ref file for ref values. -// This can happen if the # of refs in a repo grows too much - git will try -// and condense them into a file. -// See https://git-scm.com/docs/git-pack-refs -func findPackedRef(ref string) ([]byte, error) { - f, err := koDataFile(packedRefsFile) - if err != nil { - return nil, err + if modified { + revision += "-dirty" } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - // We only care about lines with ` ` pairs. - // Why this might happen: - // 1. Lines starting with ^ refer to unpeeled tag SHAs - // (e.g. the commits pointed to by annotated tags) - s := strings.Split(scanner.Text(), " ") - if len(s) != 2 { - continue - } - if ref == s[1] { - return []byte(s[0]), nil - } - } - return nil, fmt.Errorf("%q ref not found in packed-refs", ref) + + return revision } diff --git a/vendor/knative.dev/pkg/changeset/doc.go b/vendor/knative.dev/pkg/changeset/doc.go index c56f7ebfb4..9727f27d3f 100644 --- a/vendor/knative.dev/pkg/changeset/doc.go +++ b/vendor/knative.dev/pkg/changeset/doc.go @@ -14,10 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package changeset provides Knative utilities for fetching GitHub Commit ID -// from kodata directory. It requires GitHub HEAD file to be linked into -// Knative component source code via the following command: -// ln -s -r .git/HEAD ./cmd//kodata/ -// Then ko will build this file into $KO_DATA_PATH when building the container -// for a Knative component. +// Package changeset returns version control info that was embedded in the +// golang binary package changeset diff --git a/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/addressable/addressable.go b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/addressable/addressable.go index d40153e950..3bdfb44f0a 100644 --- a/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/addressable/addressable.go +++ b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/addressable/addressable.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/leaderelection/config.go b/vendor/knative.dev/pkg/leaderelection/config.go index 11010014a0..ba6e0c60ff 100644 --- a/vendor/knative.dev/pkg/leaderelection/config.go +++ b/vendor/knative.dev/pkg/leaderelection/config.go @@ -17,6 +17,7 @@ limitations under the License. package leaderelection import ( + "context" "fmt" "os" "strconv" @@ -88,6 +89,24 @@ type Config struct { LeaseNamesPrefixMapping map[string]string } +type lecfg struct{} + +// WithConfig associates a leader election configuration with the +// context. +func WithConfig(ctx context.Context, cfg *Config) context.Context { + return context.WithValue(ctx, lecfg{}, cfg) +} + +// GetConfig gets the leader election config from the provided +// context. +func GetConfig(ctx context.Context) *Config { + untyped := ctx.Value(lecfg{}) + if untyped == nil { + return nil + } + return untyped.(*Config) +} + func (c *Config) GetComponentConfig(name string) ComponentConfig { return ComponentConfig{ Component: name, diff --git a/vendor/knative.dev/pkg/logging/config.go b/vendor/knative.dev/pkg/logging/config.go index 5dbf0a4e49..a95dfe8fa5 100644 --- a/vendor/knative.dev/pkg/logging/config.go +++ b/vendor/knative.dev/pkg/logging/config.go @@ -17,6 +17,7 @@ limitations under the License. package logging import ( + "context" "encoding/json" "errors" "fmt" @@ -74,15 +75,14 @@ func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*za } func enrichLoggerWithCommitID(logger *zap.Logger) *zap.SugaredLogger { - commitID, err := changeset.Get() - if err != nil { - logger.Info("Fetch GitHub commit ID from kodata failed", zap.Error(err)) + revision := changeset.Get() + if revision == changeset.Unknown { + logger.Info("Unable to read vcs.revision from binary") return logger.Sugar() } - // Enrich logs with GitHub commit ID. - return logger.With(zap.String(logkey.GitHubCommitID, commitID)).Sugar() - + // Enrich logs with the components git revision. + return logger.With(zap.String(logkey.Commit, revision)).Sugar() } // NewLoggerFromConfig creates a logger using the provided Config @@ -113,8 +113,8 @@ func newLoggerFromConfig(configJSON, levelOverride string, opts []zap.Option) (* return nil, zap.AtomicLevel{}, err } - logger.Info("Successfully created the logger.") - logger.Info("Logging level set to: " + loggingCfg.Level.String()) + logger.Debug("Successfully created the logger.") + logger.Debug("Logging level set to: " + loggingCfg.Level.String()) return logger, loggingCfg.Level, nil } @@ -136,6 +136,22 @@ type Config struct { LoggingLevel map[string]zapcore.Level } +type lcfg struct{} + +// WithConfig associates a logging configuration with the context. +func WithConfig(ctx context.Context, cfg *Config) context.Context { + return context.WithValue(ctx, lcfg{}, cfg) +} + +// GetConfig gets the logging config from the provided context. +func GetConfig(ctx context.Context) *Config { + untyped := ctx.Value(lcfg{}) + if untyped == nil { + return nil + } + return untyped.(*Config) +} + func defaultConfig() *Config { return &Config{ LoggingLevel: make(map[string]zapcore.Level), diff --git a/vendor/knative.dev/pkg/logging/logkey/constants.go b/vendor/knative.dev/pkg/logging/logkey/constants.go index 9e1c4d81e1..1e9b6214ce 100644 --- a/vendor/knative.dev/pkg/logging/logkey/constants.go +++ b/vendor/knative.dev/pkg/logging/logkey/constants.go @@ -59,7 +59,7 @@ const ( // KubernetesService is the key used to represent a Kubernetes service name in logs KubernetesService = "knative.dev/k8sservice" - // GitHubCommitID is the key used to represent the GitHub Commit ID where the - // Knative component was built from in logs - GitHubCommitID = "commit" + // Commit is the logging key used to represent the VCS revision that the + // Knative component was built from + Commit = "commit" ) diff --git a/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go index ac7d5e152c..a752abd9b2 100644 --- a/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/metrics/metrics_worker.go b/vendor/knative.dev/pkg/metrics/metrics_worker.go index 0b7753acb2..6f2ccff256 100644 --- a/vendor/knative.dev/pkg/metrics/metrics_worker.go +++ b/vendor/knative.dev/pkg/metrics/metrics_worker.go @@ -73,7 +73,7 @@ func (cmd *updateMetricsConfigWithExporter) handleCommand(w *metricsWorker) { ctx := cmd.ctx logger := logging.FromContext(ctx) if isNewExporterRequired(cmd.newConfig) { - logger.Info("Flushing the existing exporter before setting up the new exporter.") + logger.Debug("Flushing the existing exporter before setting up the new exporter.") flushGivenExporter(curMetricsExporter) e, f, err := newMetricsExporter(cmd.newConfig, logger) if err != nil { @@ -88,7 +88,7 @@ func (cmd *updateMetricsConfigWithExporter) handleCommand(w *metricsWorker) { cmd.done <- err return } - logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, cmd.newConfig) + logger.Debugf("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, cmd.newConfig) } setCurMetricsConfigUnlocked(cmd.newConfig) cmd.done <- nil diff --git a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go index 7c177b1299..b5b39d9fce 100644 --- a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -48,7 +48,7 @@ func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (vi logger.Errorw("Failed to create the Prometheus exporter.", zap.Error(err)) return nil, nil, err } - logger.Infof("Created Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) + logger.Debugf("Created Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) // Start the server for Prometheus scraping go func() { srv := startNewPromSrv(e, config.prometheusHost, config.prometheusPort) diff --git a/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go index e0108605b3..1d90e3a9dd 100644 --- a/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/pkg/network/transports.go b/vendor/knative.dev/pkg/network/transports.go index d96eda1177..d48cd62151 100644 --- a/vendor/knative.dev/pkg/network/transports.go +++ b/vendor/knative.dev/pkg/network/transports.go @@ -55,6 +55,9 @@ var backOffTemplate = wait.Backoff{ Steps: 15, } +// ErrTimeoutDialing when the timeout is reached after set amount of time. +var ErrTimeoutDialing = errors.New("timed out dialing") + // DialWithBackOff executes `net.Dialer.DialContext()` with exponentially increasing // dial timeouts. In addition it sleeps with random jitter between tries. var DialWithBackOff = NewBackoffDialer(backOffTemplate) @@ -110,7 +113,7 @@ func dialBackOffHelper(ctx context.Context, network, address string, bo wait.Bac return c, nil } elapsed := time.Since(start) - return nil, fmt.Errorf("timed out dialing after %.2fs", elapsed.Seconds()) + return nil, fmt.Errorf("%w %s after %.2fs", ErrTimeoutDialing, address, elapsed.Seconds()) } func newHTTPTransport(disableKeepAlives, disableCompression bool, maxIdle, maxIdlePerHost int) http.RoundTripper { diff --git a/vendor/knative.dev/pkg/test/presubmit-tests.sh b/vendor/knative.dev/pkg/test/presubmit-tests.sh index 3348f253d2..e03367a1b8 100644 --- a/vendor/knative.dev/pkg/test/presubmit-tests.sh +++ b/vendor/knative.dev/pkg/test/presubmit-tests.sh @@ -18,10 +18,6 @@ # It is started by prow for each PR. # For convenience, it can also be executed manually. -# Markdown linting failures don't show up properly in Gubernator resulting -# in a net-negative contributor experience. -export DISABLE_MD_LINTING=1 - export GO111MODULE=on source $(dirname $0)/../vendor/knative.dev/hack/presubmit-tests.sh diff --git a/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go index 8e2f56b47c..814f39909d 100644 --- a/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2021 The Knative Authors +Copyright 2022 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go index 4a32d2afa7..101a6ec22a 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go @@ -191,6 +191,27 @@ func validateMinMaxScale(config *autoscalerconfig.Config, m map[string]string) * errs = errs.Also(validateMaxScaleWithinLimit(k, max, config.MaxScaleLimit)) } + // if ActivationScale is also set, validate that min <= nz min <= max + if k, v, ok := ActivationScale.Get(m); ok { + if nzMin, err := strconv.ParseInt(v, 10, 32); err != nil { + errs = errs.Also(apis.ErrInvalidValue(v, k)) + } else if min > int32(nzMin) { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("min-scale=%d is greater than activation-scale=%d", min, nzMin), + Paths: []string{k}, + }) + } else if max < int32(nzMin) && max != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("max-scale=%d is less than activation-scale=%d", max, nzMin), + Paths: []string{k}, + }) + } else if nzMin < 2 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("activation-scale=%d must be greater than 1", nzMin), + Paths: []string{k}, + }) + } + } return errs } diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go index 5592d4d3fe..3d488f1056 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go @@ -216,6 +216,13 @@ const ( // PanicThresholdPercentageMax is the counterpart to the PanicThresholdPercentageMin // but bounding from above. PanicThresholdPercentageMax = 1000.0 + + // ActivationScale is the minimum, non-zero value that a service should scale to. + // For example, if ActivationScale = 2, when a service scaled from zero it would + // scale up two replicas in this case. In essence, this allows one to set both a + // min-scale value while also preserving the ability to scale to zero. + // ActivationScale must be >= 2. + ActivationScaleKey = GroupName + "/activation-scale" ) var ( @@ -226,6 +233,7 @@ var ( InitialScaleAnnotationKey, GroupName + "/initialScale", } + MaxScaleAnnotation = kmap.KeyPriority{ MaxScaleAnnotationKey, GroupName + "/maxScale", @@ -237,6 +245,9 @@ var ( MetricAggregationAlgorithmKey, GroupName + "/metricAggregationAlgorithm", } + ActivationScale = kmap.KeyPriority{ + ActivationScaleKey, + } MinScaleAnnotation = kmap.KeyPriority{ MinScaleAnnotationKey, GroupName + "/minScale", diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go index 9107250b38..9cff0ab9f5 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go @@ -102,6 +102,12 @@ func (pa *PodAutoscaler) ScaleBounds(asConfig *autoscalerconfig.Config) (int32, return min, max } +// ActivationScale returns the min-non-zero-replicas annotation value or falise +// if not present or invalid. +func (pa *PodAutoscaler) ActivationScale() (int32, bool) { + return pa.annotationInt32(autoscaling.ActivationScale) +} + // Target returns the target annotation value or false if not present, or invalid. func (pa *PodAutoscaler) Target() (float64, bool) { return pa.annotationFloat64(autoscaling.TargetAnnotation) diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go index 51c71bc23d..0cf64b692a 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go @@ -32,7 +32,7 @@ import ( // PodAutoscaler is a Knative abstraction that encapsulates the interface by which Knative // components instantiate autoscalers. This definition is an abstraction that may be backed // by multiple definitions. For more information, see the Knative Pluggability presentation: -// https://docs.google.com/presentation/d/10KWynvAJYuOEWy69VBa6bHJVCqIsz1TNdEKosNvcpPY/edit +// https://docs.google.com/presentation/d/19vW9HFZ6Puxt31biNZF3uLRejDmu82rxJIk1cWmxF7w/edit type PodAutoscaler struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/knative.dev/serving/pkg/apis/config/defaults.go b/vendor/knative.dev/serving/pkg/apis/config/defaults.go index 9a33641b30..3b9c18c9ec 100644 --- a/vendor/knative.dev/serving/pkg/apis/config/defaults.go +++ b/vendor/knative.dev/serving/pkg/apis/config/defaults.go @@ -44,6 +44,14 @@ const ( // DefaultMaxRevisionTimeoutSeconds will be set if MaxRevisionTimeoutSeconds is not specified. DefaultMaxRevisionTimeoutSeconds = 10 * 60 + // DefaultRevisionResponseStartTimeoutSeconds will be set if ResponseStartTimeoutSeconds is not specified. + // for backward compatibility will keep default similar to DefaultRevisionTimeoutSeconds, + // should be revised in future releases. + DefaultRevisionResponseStartTimeoutSeconds = 5 * 60 + + // DefaultRevisionIdleTimeoutSeconds will be set if idleTimeoutSeconds not specified. + DefaultRevisionIdleTimeoutSeconds = 0 + // DefaultInitContainerName is the default name we give to the init containers // specified by the user, if `name:` is omitted. DefaultInitContainerName = "init-container" @@ -71,14 +79,16 @@ var ( func defaultDefaultsConfig() *Defaults { return &Defaults{ - RevisionTimeoutSeconds: DefaultRevisionTimeoutSeconds, - MaxRevisionTimeoutSeconds: DefaultMaxRevisionTimeoutSeconds, - InitContainerNameTemplate: DefaultInitContainerNameTemplate, - UserContainerNameTemplate: DefaultUserContainerNameTemplate, - ContainerConcurrency: DefaultContainerConcurrency, - ContainerConcurrencyMaxLimit: DefaultMaxRevisionContainerConcurrency, - AllowContainerConcurrencyZero: DefaultAllowContainerConcurrencyZero, - EnableServiceLinks: ptr.Bool(false), + RevisionTimeoutSeconds: DefaultRevisionTimeoutSeconds, + MaxRevisionTimeoutSeconds: DefaultMaxRevisionTimeoutSeconds, + RevisionRequestStartTimeoutSeconds: DefaultRevisionResponseStartTimeoutSeconds, + RevisionIdleTimeoutSeconds: DefaultRevisionIdleTimeoutSeconds, + InitContainerNameTemplate: DefaultInitContainerNameTemplate, + UserContainerNameTemplate: DefaultUserContainerNameTemplate, + ContainerConcurrency: DefaultContainerConcurrency, + ContainerConcurrencyMaxLimit: DefaultMaxRevisionContainerConcurrency, + AllowContainerConcurrencyZero: DefaultAllowContainerConcurrencyZero, + EnableServiceLinks: ptr.Bool(false), } } @@ -111,6 +121,8 @@ func NewDefaultsConfigFromMap(data map[string]string) (*Defaults, error) { cm.AsInt64("revision-timeout-seconds", &nc.RevisionTimeoutSeconds), cm.AsInt64("max-revision-timeout-seconds", &nc.MaxRevisionTimeoutSeconds), + cm.AsInt64("revision-idle-timeout-seconds", &nc.RevisionIdleTimeoutSeconds), + cm.AsInt64("container-concurrency", &nc.ContainerConcurrency), cm.AsInt64("container-concurrency-max-limit", &nc.ContainerConcurrencyMaxLimit), @@ -124,9 +136,24 @@ func NewDefaultsConfigFromMap(data map[string]string) (*Defaults, error) { return nil, err } + // We default this to what the user has specified + nc.RevisionRequestStartTimeoutSeconds = nc.RevisionTimeoutSeconds + + if err := cm.Parse(data, + cm.AsInt64("revision-response-start-timeout-seconds", &nc.RevisionRequestStartTimeoutSeconds), + ); err != nil { + return nil, err + } + if nc.RevisionTimeoutSeconds > nc.MaxRevisionTimeoutSeconds { return nil, fmt.Errorf("revision-timeout-seconds (%d) cannot be greater than max-revision-timeout-seconds (%d)", nc.RevisionTimeoutSeconds, nc.MaxRevisionTimeoutSeconds) } + if nc.RevisionRequestStartTimeoutSeconds > 0 && nc.RevisionRequestStartTimeoutSeconds > nc.RevisionTimeoutSeconds { + return nil, fmt.Errorf("revision-response-start-timeout-seconds (%d) cannot be greater than revision-timeout-seconds (%d)", nc.RevisionRequestStartTimeoutSeconds, nc.RevisionTimeoutSeconds) + } + if nc.RevisionIdleTimeoutSeconds > 0 && nc.RevisionIdleTimeoutSeconds > nc.RevisionTimeoutSeconds { + return nil, fmt.Errorf("revision-idle-timeout-seconds (%d) cannot be greater than revision-timeout-seconds (%d)", nc.RevisionIdleTimeoutSeconds, nc.RevisionTimeoutSeconds) + } if nc.ContainerConcurrencyMaxLimit < 1 { return nil, apis.ErrOutOfBoundsValue( nc.ContainerConcurrencyMaxLimit, 1, math.MaxInt32, "container-concurrency-max-limit") @@ -157,6 +184,14 @@ type Defaults struct { // RevisionTimeoutSeconds must be less than this value. MaxRevisionTimeoutSeconds int64 + // This is the default number of seconds a request will be allowed to + // stay open while waiting to receive any bytes from the user's application + RevisionRequestStartTimeoutSeconds int64 + + // RevisionIdleTimeoutSeconds is the maximum duration in seconds a request + // will be allowed to stay open while not receiving any bytes from the user's application. + RevisionIdleTimeoutSeconds int64 + InitContainerNameTemplate *ObjectMetaTemplate UserContainerNameTemplate *ObjectMetaTemplate diff --git a/vendor/knative.dev/serving/pkg/apis/config/features.go b/vendor/knative.dev/serving/pkg/apis/config/features.go index f1339bfa00..d8d8b04be7 100644 --- a/vendor/knative.dev/serving/pkg/apis/config/features.go +++ b/vendor/knative.dev/serving/pkg/apis/config/features.go @@ -39,6 +39,15 @@ const ( Allowed Flag = "Allowed" ) +// service annotations under features.knative.dev/* +const ( + // QueueProxyPodInfoFeatureKey gates mouting of podinfo with the value 'enabled' + QueueProxyPodInfoFeatureKey = "features.knative.dev/queueproxy-podinfo" + + // DryRunFeatureKey gates the podspec dryrun feature and runs with the value 'enabled' + DryRunFeatureKey = "features.knative.dev/podspec-dryrun" +) + func defaultFeaturesConfig() *Features { return &Features{ MultiContainer: Enabled, @@ -57,7 +66,10 @@ func defaultFeaturesConfig() *Features { PodSpecVolumesEmptyDir: Disabled, PodSpecPersistentVolumeClaim: Disabled, PodSpecPersistentVolumeWrite: Disabled, + QueueProxyMountPodInfo: Disabled, PodSpecInitContainers: Disabled, + PodSpecDNSPolicy: Disabled, + PodSpecDNSConfig: Disabled, TagHeaderBasedRouting: Disabled, AutoDetectHTTP2: Disabled, } @@ -85,7 +97,10 @@ func NewFeaturesConfigFromMap(data map[string]string) (*Features, error) { asFlag("kubernetes.podspec-init-containers", &nc.PodSpecInitContainers), asFlag("kubernetes.podspec-persistent-volume-claim", &nc.PodSpecPersistentVolumeClaim), asFlag("kubernetes.podspec-persistent-volume-write", &nc.PodSpecPersistentVolumeWrite), + asFlag("kubernetes.podspec-dnspolicy", &nc.PodSpecDNSPolicy), + asFlag("kubernetes.podspec-dnsconfig", &nc.PodSpecDNSConfig), asFlag("tag-header-based-routing", &nc.TagHeaderBasedRouting), + asFlag("queueproxy.mount-podinfo", &nc.QueueProxyMountPodInfo), asFlag("autodetect-http2", &nc.AutoDetectHTTP2)); err != nil { return nil, err } @@ -116,6 +131,9 @@ type Features struct { PodSpecInitContainers Flag PodSpecPersistentVolumeClaim Flag PodSpecPersistentVolumeWrite Flag + QueueProxyMountPodInfo Flag + PodSpecDNSPolicy Flag + PodSpecDNSConfig Flag TagHeaderBasedRouting Flag AutoDetectHTTP2 Flag } diff --git a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go index 5b43269b89..2c5e8fcee2 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go @@ -33,7 +33,6 @@ func VolumeMask(ctx context.Context, in *corev1.Volume) *corev1.Volume { if in == nil { return nil } - cfg := config.FromContextOrDefaults(ctx) out := new(corev1.Volume) @@ -41,14 +40,6 @@ func VolumeMask(ctx context.Context, in *corev1.Volume) *corev1.Volume { out.Name = in.Name out.VolumeSource = in.VolumeSource - if cfg.Features.PodSpecVolumesEmptyDir != config.Disabled { - out.EmptyDir = in.EmptyDir - } - - if cfg.Features.PodSpecPersistentVolumeClaim != config.Disabled { - out.PersistentVolumeClaim = in.PersistentVolumeClaim - } - return out } @@ -227,13 +218,18 @@ func PodSpecMask(ctx context.Context, in *corev1.PodSpec) *corev1.PodSpec { if cfg.Features.PodSpecInitContainers != config.Disabled { out.InitContainers = in.InitContainers } + if cfg.Features.PodSpecDNSPolicy != config.Disabled { + out.DNSPolicy = in.DNSPolicy + } + if cfg.Features.PodSpecDNSConfig != config.Disabled { + out.DNSConfig = in.DNSConfig + } // Disallowed fields // This list is unnecessary, but added here for clarity out.RestartPolicy = "" out.TerminationGracePeriodSeconds = nil out.ActiveDeadlineSeconds = nil - out.DNSPolicy = "" out.NodeName = "" out.HostNetwork = false out.HostPID = false @@ -242,7 +238,6 @@ func PodSpecMask(ctx context.Context, in *corev1.PodSpec) *corev1.PodSpec { out.Hostname = "" out.Subdomain = "" out.Priority = nil - out.DNSConfig = nil out.ReadinessGates = nil return out diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go index 959d5e97a8..a2a23c9ef7 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go @@ -83,11 +83,23 @@ type RevisionSpec struct { // +optional ContainerConcurrency *int64 `json:"containerConcurrency,omitempty"` - // TimeoutSeconds is the maximum duration in seconds that the request routing - // layer will wait for a request delivered to a container to begin replying - // (send network traffic). If unspecified, a system default will be provided. + // TimeoutSeconds is the maximum duration in seconds that the request instance + // is allowed to respond to a request. If unspecified, a system default will + // be provided. // +optional TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // ResponseStartTimeoutSeconds is the maximum duration in seconds that the request + // routing layer will wait for a request delivered to a container to begin + // sending any network traffic. + // +optional + ResponseStartTimeoutSeconds *int64 `json:"responseStartTimeoutSeconds,omitempty"` + + // IdleTimeoutSeconds is the maximum duration in seconds a request will be allowed + // to stay open while not receiving any bytes from the user's application. If + // unspecified, a system default will be provided. + // +optional + IdleTimeoutSeconds *int64 `json:"idleTimeoutSeconds,omitempty"` } const ( diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go index 99ae1ca1d8..c42bb4b927 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go @@ -230,6 +230,16 @@ func (in *RevisionSpec) DeepCopyInto(out *RevisionSpec) { *out = new(int64) **out = **in } + if in.ResponseStartTimeoutSeconds != nil { + in, out := &in.ResponseStartTimeoutSeconds, &out.ResponseStartTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.IdleTimeoutSeconds != nil { + in, out := &in.IdleTimeoutSeconds, &out.IdleTimeoutSeconds + *out = new(int64) + **out = **in + } return } diff --git a/vendor/knative.dev/serving/pkg/networking/constants.go b/vendor/knative.dev/serving/pkg/networking/constants.go index 0fe1e5ea10..a832d46540 100644 --- a/vendor/knative.dev/serving/pkg/networking/constants.go +++ b/vendor/knative.dev/serving/pkg/networking/constants.go @@ -51,6 +51,10 @@ const ( // ServiceTypeKey is the label key attached to a service specifying the type of service. // e.g. Public, Private. ServiceTypeKey = networking.GroupName + "/serviceType" + + // ServingCertName is used by the secret name for internal TLS as "namespace-${ServingCertName}". + // Also the secret name has the label with "${ServingCertName}: data-plane" + ServingCertName = "serving-certs" ) // ServiceType is the enumeration type for the Kubernetes services diff --git a/vendor/knative.dev/serving/pkg/networking/util.go b/vendor/knative.dev/serving/pkg/networking/util.go index 2b0e51ca9e..ec9707d02a 100644 --- a/vendor/knative.dev/serving/pkg/networking/util.go +++ b/vendor/knative.dev/serving/pkg/networking/util.go @@ -5,21 +5,21 @@ import ( "fmt" "strings" - networkingpkg "knative.dev/networking/pkg" "knative.dev/networking/pkg/apis/networking" netv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" + netcfg "knative.dev/networking/pkg/config" "knative.dev/pkg/logging" ) // GetHTTPOption get http-protocol from resource annotations if not, get it from configmap config-network -func GetHTTPOption(ctx context.Context, networkConfig *networkingpkg.Config, annotations map[string]string) (netv1alpha1.HTTPOption, error) { +func GetHTTPOption(ctx context.Context, networkConfig *netcfg.Config, annotations map[string]string) (netv1alpha1.HTTPOption, error) { // Get HTTPOption via annotations. if len(annotations) != 0 && networking.GetHTTPProtocol(annotations) != "" { protocol := strings.ToLower(networking.GetHTTPProtocol(annotations)) - switch networkingpkg.HTTPProtocol(protocol) { - case networkingpkg.HTTPEnabled: + switch netcfg.HTTPProtocol(protocol) { + case netcfg.HTTPEnabled: return netv1alpha1.HTTPOptionEnabled, nil - case networkingpkg.HTTPRedirected: + case netcfg.HTTPRedirected: return netv1alpha1.HTTPOptionRedirected, nil default: return "", fmt.Errorf("incorrect http-protocol annotation: " + protocol) @@ -31,12 +31,12 @@ func GetHTTPOption(ctx context.Context, networkConfig *networkingpkg.Config, ann // Get HTTPOption via config-network. switch httpProtocol := networkConfig.HTTPProtocol; httpProtocol { - case networkingpkg.HTTPEnabled: + case netcfg.HTTPEnabled: return netv1alpha1.HTTPOptionEnabled, nil - case networkingpkg.HTTPRedirected: + case netcfg.HTTPRedirected: return netv1alpha1.HTTPOptionRedirected, nil // This will be deprecated soon - case networkingpkg.HTTPDisabled: + case netcfg.HTTPDisabled: logger.Warnf("http-protocol %s in config-network ConfigMap will be deprecated soon", httpProtocol) return "", nil default: diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go b/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go index bfaea11994..80070a10ce 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/yaml" - netpkg "knative.dev/networking/pkg" + "knative.dev/networking/pkg/apis/networking" "knative.dev/pkg/configmap" "knative.dev/pkg/network" "knative.dev/serving/pkg/apis/serving" @@ -99,7 +99,7 @@ func (c *Domain) LookupDomainForLabels(labels map[string]string) string { specificity := -1 // If we see VisibilityLabelKey sets with VisibilityClusterLocal, that // will take precedence and the route will get a Cluster's Domain Name. - if labels[netpkg.VisibilityLabelKey] == serving.VisibilityClusterLocal { + if labels[networking.VisibilityLabelKey] == serving.VisibilityClusterLocal { return "svc." + network.GetClusterDomainName() } for k, selector := range c.Domains { diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go b/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go index aece92fb51..dfbc19c314 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go @@ -20,6 +20,7 @@ import ( "context" network "knative.dev/networking/pkg" + netcfg "knative.dev/networking/pkg/config" "knative.dev/pkg/configmap" "knative.dev/pkg/logging" cfgmap "knative.dev/serving/pkg/apis/config" @@ -33,7 +34,7 @@ type cfgKey struct{} type Config struct { Domain *Domain GC *gc.Config - Network *network.Config + Network *netcfg.Config Features *cfgmap.Features } @@ -88,7 +89,7 @@ func NewStore(ctx context.Context, onAfterStore ...func(name string, value inter configmap.Constructors{ DomainConfigName: NewDomainFromConfigMap, gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), - network.ConfigName: network.NewConfigFromConfigMap, + netcfg.ConfigMapName: network.NewConfigFromConfigMap, cfgmap.FeaturesConfigName: cfgmap.NewFeaturesConfigFromConfigMap, }, onAfterStore..., @@ -108,7 +109,7 @@ func (s *Store) Load() *Config { config := &Config{ Domain: s.UntypedLoad(DomainConfigName).(*Domain).DeepCopy(), GC: s.UntypedLoad(gc.ConfigName).(*gc.Config).DeepCopy(), - Network: s.UntypedLoad(network.ConfigName).(*network.Config).DeepCopy(), + Network: s.UntypedLoad(netcfg.ConfigMapName).(*netcfg.Config).DeepCopy(), Features: nil, } diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go b/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go index 478df39b3f..0ca76e1931 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go @@ -26,8 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - network "knative.dev/networking/pkg" + netapi "knative.dev/networking/pkg/apis/networking" netv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" + netcfg "knative.dev/networking/pkg/config" "knative.dev/pkg/apis" pkgnet "knative.dev/pkg/network" "knative.dev/serving/pkg/apis/serving" @@ -72,7 +73,7 @@ func DomainNameFromTemplate(ctx context.Context, r metav1.ObjectMeta, name strin // These are the available properties they can choose from. // We could add more over time - e.g. RevisionName if we thought that // might be of interest to people. - data := network.DomainTemplateValues{ + data := netcfg.DomainTemplateValues{ Name: name, Namespace: r.Namespace, Domain: domain, @@ -86,9 +87,9 @@ func DomainNameFromTemplate(ctx context.Context, r metav1.ObjectMeta, name strin var templ *template.Template // If the route is "cluster local" then don't use the user-defined // domain template, use the default one - if rLabels[network.VisibilityLabelKey] == serving.VisibilityClusterLocal { + if rLabels[netapi.VisibilityLabelKey] == serving.VisibilityClusterLocal { templ = template.Must(template.New("domain-template").Parse( - network.DefaultDomainTemplate)) + netcfg.DefaultDomainTemplate)) } else { templ = networkConfig.GetDomainTemplate() } @@ -114,7 +115,7 @@ func HostnameFromTemplate(ctx context.Context, name, tag string) (string, error) // These are the available properties they can choose from. // We could add more over time - e.g. RevisionName if we thought that // might be of interest to people. - data := network.TagTemplateValues{ + data := netcfg.TagTemplateValues{ Name: name, Tag: tag, } diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go b/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go index ef5ecbd197..fc8dbd4e9a 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go @@ -18,7 +18,7 @@ package labels import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - network "knative.dev/networking/pkg" + network "knative.dev/networking/pkg/apis/networking" "knative.dev/serving/pkg/apis/serving" ) diff --git a/vendor/knative.dev/serving/pkg/testing/v1/configuration.go b/vendor/knative.dev/serving/pkg/testing/v1/configuration.go index c1e355890c..e84e532d9d 100644 --- a/vendor/knative.dev/serving/pkg/testing/v1/configuration.go +++ b/vendor/knative.dev/serving/pkg/testing/v1/configuration.go @@ -124,3 +124,17 @@ func WithConfigRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ConfigOption cfg.Spec.Template.Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) } } + +// WithConfigRevisionResponseStartTimeoutSeconds sets revision first byte timeout +func WithConfigRevisionResponseStartTimeoutSeconds(revisionResponseStartTimeoutSeconds int64) ConfigOption { + return func(cfg *v1.Configuration) { + cfg.Spec.Template.Spec.ResponseStartTimeoutSeconds = ptr.Int64(revisionResponseStartTimeoutSeconds) + } +} + +// WithConfigRevisionIdleTimeoutSeconds sets revision idle timeout +func WithConfigRevisionIdleTimeoutSeconds(revisionIdleTimeoutSeconds int64) ConfigOption { + return func(cfg *v1.Configuration) { + cfg.Spec.Template.Spec.IdleTimeoutSeconds = ptr.Int64(revisionIdleTimeoutSeconds) + } +} diff --git a/vendor/knative.dev/serving/pkg/testing/v1/service.go b/vendor/knative.dev/serving/pkg/testing/v1/service.go index 26e86e90fc..8dbec1a4d6 100644 --- a/vendor/knative.dev/serving/pkg/testing/v1/service.go +++ b/vendor/knative.dev/serving/pkg/testing/v1/service.go @@ -182,6 +182,20 @@ func WithRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ServiceOption { } } +// WithRevisionResponseStartTimeoutSeconds sets revision first byte timeout +func WithRevisionResponseStartTimeoutSeconds(revisionResponseStartTimeoutSeconds int64) ServiceOption { + return func(service *v1.Service) { + service.Spec.Template.Spec.ResponseStartTimeoutSeconds = ptr.Int64(revisionResponseStartTimeoutSeconds) + } +} + +// WithRevisionIdleTimeoutSeconds sets revision idle timeout +func WithRevisionIdleTimeoutSeconds(revisionIdleTimeoutSeconds int64) ServiceOption { + return func(service *v1.Service) { + service.Spec.Template.Spec.IdleTimeoutSeconds = ptr.Int64(revisionIdleTimeoutSeconds) + } +} + // WithServiceAccountName sets revision service account name func WithServiceAccountName(serviceAccountName string) ServiceOption { return func(service *v1.Service) { diff --git a/vendor/modules.txt b/vendor/modules.txt index c226ffaa0f..f777dfd2a7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -187,7 +187,7 @@ github.com/cloudevents/sdk-go/sql/v2/gen github.com/cloudevents/sdk-go/sql/v2/parser github.com/cloudevents/sdk-go/sql/v2/runtime github.com/cloudevents/sdk-go/sql/v2/utils -# github.com/cloudevents/sdk-go/v2 v2.8.0 +# github.com/cloudevents/sdk-go/v2 v2.10.1 ## explicit; go 1.14 github.com/cloudevents/sdk-go/v2 github.com/cloudevents/sdk-go/v2/binding @@ -965,10 +965,10 @@ go.starlark.net/syntax # go.uber.org/atomic v1.9.0 ## explicit; go 1.13 go.uber.org/atomic -# go.uber.org/multierr v1.7.0 +# go.uber.org/multierr v1.8.0 ## explicit; go 1.14 go.uber.org/multierr -# go.uber.org/zap v1.20.0 +# go.uber.org/zap v1.21.0 ## explicit; go 1.13 go.uber.org/zap go.uber.org/zap/buffer @@ -1023,7 +1023,7 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore @@ -1230,7 +1230,7 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 # gotest.tools/v3 v3.1.0 @@ -1241,7 +1241,7 @@ gotest.tools/v3/internal/assert gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -# k8s.io/api v0.23.5 => k8s.io/api v0.23.5 +# k8s.io/api v0.23.9 => k8s.io/api v0.23.5 ## explicit; go 1.16 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -1288,11 +1288,11 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.23.4 +# k8s.io/apiextensions-apiserver v0.23.9 ## explicit; go 1.16 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.23.5 => k8s.io/apimachinery v0.23.5 +# k8s.io/apimachinery v0.23.9 => k8s.io/apimachinery v0.23.5 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1490,10 +1490,12 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.60.1-0.20220317184644-43cc75f9ae89 +# k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity # k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf @@ -1508,12 +1510,14 @@ k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing k8s.io/utils/integer +k8s.io/utils/internal/third_party/forked/golang/golang-lru k8s.io/utils/internal/third_party/forked/golang/net +k8s.io/utils/lru k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/trace -# knative.dev/client v0.31.1 -## explicit; go 1.17 +# knative.dev/client v0.34.0 +## explicit; go 1.18 knative.dev/client/lib/test knative.dev/client/pkg/apis/client knative.dev/client/pkg/apis/client/v1alpha1 @@ -1528,8 +1532,8 @@ knative.dev/client/pkg/serving/v1 knative.dev/client/pkg/util knative.dev/client/pkg/util/mock knative.dev/client/pkg/wait -# knative.dev/eventing v0.31.3-0.20220802083815-e345f5f3695d -## explicit; go 1.16 +# knative.dev/eventing v0.34.2 +## explicit; go 1.18 knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/duck knative.dev/eventing/pkg/apis/duck/v1 @@ -1548,16 +1552,23 @@ knative.dev/eventing/pkg/apis/sources/v1 knative.dev/eventing/pkg/apis/sources/v1beta2 knative.dev/eventing/pkg/client/clientset/versioned/scheme knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1 -# knative.dev/hack v0.0.0-20220629135029-9e09abcd61f0 -## explicit; go 1.14 +# knative.dev/hack v0.0.0-20220823140917-8d1e4ccf9dc3 +## explicit; go 1.17 knative.dev/hack -# knative.dev/networking v0.0.0-20220412163509-1145ec58c8be -## explicit; go 1.16 +# knative.dev/networking v0.0.0-20220818010248-e51df7cdf571 +## explicit; go 1.18 knative.dev/networking/pkg knative.dev/networking/pkg/apis/networking knative.dev/networking/pkg/apis/networking/v1alpha1 -# knative.dev/pkg v0.0.0-20220412134708-e325df66cb51 -## explicit; go 1.17 +knative.dev/networking/pkg/config +knative.dev/networking/pkg/http +knative.dev/networking/pkg/http/header +knative.dev/networking/pkg/http/probe +knative.dev/networking/pkg/http/proxy +knative.dev/networking/pkg/http/stats +knative.dev/networking/pkg/k8s +# knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 +## explicit; go 1.18 knative.dev/pkg/apis knative.dev/pkg/apis/duck knative.dev/pkg/apis/duck/ducktypes @@ -1598,8 +1609,8 @@ knative.dev/pkg/tracing/config knative.dev/pkg/tracing/propagation knative.dev/pkg/tracing/propagation/tracecontextb3 knative.dev/pkg/tracker -# knative.dev/serving v0.31.1-0.20220630164831-69a88e92b069 -## explicit; go 1.17 +# knative.dev/serving v0.34.1 +## explicit; go 1.18 knative.dev/serving/pkg/apis/autoscaling knative.dev/serving/pkg/apis/autoscaling/v1alpha1 knative.dev/serving/pkg/apis/config