diff options
232 files changed, 40197 insertions, 205 deletions
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go index 09dd74e20..ff15983cd 100644 --- a/cmd/podman/common/completion.go +++ b/cmd/podman/common/completion.go @@ -212,6 +212,28 @@ func getImages(cmd *cobra.Command, toComplete string) ([]string, cobra.ShellComp return suggestions, cobra.ShellCompDirectiveNoFileComp } +func getSecrets(cmd *cobra.Command, toComplete string) ([]string, cobra.ShellCompDirective) { + suggestions := []string{} + + engine, err := setupContainerEngine(cmd) + if err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + secrets, err := engine.SecretList(registry.GetContext()) + if err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + for _, s := range secrets { + if strings.HasPrefix(s.Spec.Name, toComplete) { + suggestions = append(suggestions, s.Spec.Name) + } + } + return suggestions, cobra.ShellCompDirectiveNoFileComp +} + func getRegistries() ([]string, cobra.ShellCompDirective) { regs, err := registries.GetRegistries() if err != nil { @@ -412,6 +434,21 @@ func AutocompleteVolumes(cmd *cobra.Command, args []string, toComplete string) ( return getVolumes(cmd, toComplete) } +// AutocompleteSecrets - Autocomplete secrets. +func AutocompleteSecrets(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if !validCurrentCmdLine(cmd, args, toComplete) { + return nil, cobra.ShellCompDirectiveNoFileComp + } + return getSecrets(cmd, toComplete) +} + +func AutocompleteSecretCreate(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) == 1 { + return nil, cobra.ShellCompDirectiveDefault + } + return nil, cobra.ShellCompDirectiveNoFileComp +} + // AutocompleteImages - Autocomplete images. func AutocompleteImages(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if !validCurrentCmdLine(cmd, args, toComplete) { @@ -426,6 +463,12 @@ func AutocompleteCreateRun(cmd *cobra.Command, args []string, toComplete string) return nil, cobra.ShellCompDirectiveNoFileComp } if len(args) < 1 { + // check if the rootfs flag is set + // if it is set to true provide directory completion + rootfs, err := cmd.Flags().GetBool("rootfs") + if err == nil && rootfs { + return nil, cobra.ShellCompDirectiveFilterDirs + } return getImages(cmd, toComplete) } // TODO: add path completion for files in the image diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go index 915ff63b6..d8935628e 100644 --- a/cmd/podman/common/create.go +++ b/cmd/podman/common/create.go @@ -603,6 +603,14 @@ func DefineCreateFlags(cmd *cobra.Command, cf *ContainerCLIOpts) { ) _ = cmd.RegisterFlagCompletionFunc(sdnotifyFlagName, AutocompleteSDNotify) + secretFlagName := "secret" + createFlags.StringArrayVar( + &cf.Secrets, + secretFlagName, []string{}, + "Add secret to container", + ) + _ = cmd.RegisterFlagCompletionFunc(secretFlagName, AutocompleteSecrets) + securityOptFlagName := "security-opt" createFlags.StringArrayVar( &cf.SecurityOpt, diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go index d86a6d364..67d40ac43 100644 --- a/cmd/podman/common/create_opts.go +++ b/cmd/podman/common/create_opts.go @@ -93,6 +93,7 @@ type ContainerCLIOpts struct { Replace bool Rm bool RootFS bool + Secrets []string SecurityOpt []string SdNotifyMode string ShmSize string diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go index 4cc53a630..975c76fd9 100644 --- a/cmd/podman/common/specgen.go +++ b/cmd/podman/common/specgen.go @@ -642,6 +642,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string s.StopTimeout = &c.StopTimeout s.Timezone = c.Timezone s.Umask = c.Umask + s.Secrets = c.Secrets return nil } diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go index 1f06dace9..895bdb631 100644 --- a/cmd/podman/images/build.go +++ b/cmd/podman/images/build.go @@ -1,9 +1,11 @@ package images import ( + "io" "os" "path/filepath" "strings" + "time" "github.com/containers/buildah" "github.com/containers/buildah/imagebuildah" @@ -11,6 +13,8 @@ import ( "github.com/containers/buildah/pkg/parse" "github.com/containers/common/pkg/completion" "github.com/containers/common/pkg/config" + encconfig "github.com/containers/ocicrypt/config" + enchelpers "github.com/containers/ocicrypt/helpers" "github.com/containers/podman/v2/cmd/podman/common" "github.com/containers/podman/v2/cmd/podman/registry" "github.com/containers/podman/v2/cmd/podman/utils" @@ -78,7 +82,8 @@ func useLayers() string { func init() { registry.Commands = append(registry.Commands, registry.CliCommand{ - Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: buildCmd, }) buildFlags(buildCmd) @@ -151,8 +156,21 @@ func buildFlags(cmd *cobra.Command) { // Add the completion functions fromAndBudFlagsCompletions := buildahCLI.GetFromAndBudFlagsCompletions() completion.CompleteCommandFlags(cmd, fromAndBudFlagsCompletions) - _ = flags.MarkHidden("signature-policy") flags.SetNormalizeFunc(buildahCLI.AliasFlags) + if registry.IsRemote() { + flag = flags.Lookup("isolation") + buildOpts.Isolation = buildah.OCI + if err := flag.Value.Set(buildah.OCI); err != nil { + logrus.Errorf("unable to set --isolation to %v: %v", buildah.OCI, err) + } + flag.DefValue = buildah.OCI + _ = flags.MarkHidden("disable-content-trust") + _ = flags.MarkHidden("cache-from") + _ = flags.MarkHidden("sign-by") + _ = flags.MarkHidden("signature-policy") + _ = flags.MarkHidden("tls-verify") + _ = flags.MarkHidden("compress") + } } // build executes the build command. @@ -246,7 +264,18 @@ func build(cmd *cobra.Command, args []string) error { return err } - _, err = registry.ImageEngine().Build(registry.GetContext(), containerFiles, *apiBuildOpts) + report, err := registry.ImageEngine().Build(registry.GetContext(), containerFiles, *apiBuildOpts) + + if cmd.Flag("iidfile").Changed { + f, err := os.Create(buildOpts.Iidfile) + if err != nil { + return err + } + if _, err := f.WriteString("sha256:" + report.ID); err != nil { + return err + } + } + return err } @@ -308,6 +337,10 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil flags.Layers = false } + var stdin io.Reader + if flags.Stdin { + stdin = os.Stdin + } var stdout, stderr, reporter *os.File stdout = os.Stdout stderr = os.Stderr @@ -402,10 +435,21 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil runtimeFlags = append(runtimeFlags, "--systemd-cgroup") } + imageOS, arch, err := parse.PlatformFromOptions(c) + if err != nil { + return nil, err + } + + decConfig, err := getDecryptConfig(flags.DecryptionKeys) + if err != nil { + return nil, errors.Wrapf(err, "unable to obtain decrypt config") + } + opts := imagebuildah.BuildOptions{ AddCapabilities: flags.CapAdd, AdditionalTags: tags, Annotations: flags.Annotation, + Architecture: arch, Args: args, BlobDirectory: flags.BlobCache, CNIConfigDir: flags.CNIConfigDir, @@ -433,17 +477,25 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil DropCapabilities: flags.CapDrop, Err: stderr, ForceRmIntermediateCtrs: flags.ForceRm, + From: flags.From, IDMappingOptions: idmappingOptions, - IIDFile: flags.Iidfile, + In: stdin, Isolation: isolation, + Jobs: &flags.Jobs, Labels: flags.Label, Layers: flags.Layers, + LogRusage: flags.LogRusage, + Manifest: flags.Manifest, + MaxPullPushRetries: 3, NamespaceOptions: nsValues, NoCache: flags.NoCache, + OS: imageOS, + OciDecryptConfig: decConfig, Out: stdout, Output: output, OutputFormat: format, PullPolicy: pullPolicy, + PullPushRetryDelay: 2 * time.Second, Quiet: flags.Quiet, RemoveIntermediateCtrs: flags.Rm, ReportWriter: reporter, @@ -459,3 +511,18 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil return &entities.BuildOptions{BuildOptions: opts}, nil } + +func getDecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) { + decConfig := &encconfig.DecryptConfig{} + if len(decryptionKeys) > 0 { + // decryption + dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys) + if err != nil { + return nil, errors.Wrapf(err, "invalid decryption keys") + } + cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc}) + decConfig = cc.DecryptConfig + } + + return decConfig, nil +} diff --git a/cmd/podman/main.go b/cmd/podman/main.go index f076d13f3..05b36295b 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -14,6 +14,7 @@ import ( _ "github.com/containers/podman/v2/cmd/podman/play" _ "github.com/containers/podman/v2/cmd/podman/pods" "github.com/containers/podman/v2/cmd/podman/registry" + _ "github.com/containers/podman/v2/cmd/podman/secrets" _ "github.com/containers/podman/v2/cmd/podman/system" _ "github.com/containers/podman/v2/cmd/podman/system/connection" _ "github.com/containers/podman/v2/cmd/podman/volumes" diff --git a/cmd/podman/secrets/create.go b/cmd/podman/secrets/create.go new file mode 100644 index 000000000..e58ab57cd --- /dev/null +++ b/cmd/podman/secrets/create.go @@ -0,0 +1,80 @@ +package secrets + +import ( + "context" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/common/pkg/completion" + "github.com/containers/podman/v2/cmd/podman/common" + "github.com/containers/podman/v2/cmd/podman/registry" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + createCmd = &cobra.Command{ + Use: "create [options] SECRET FILE|-", + Short: "Create a new secret", + Long: "Create a secret. Input can be a path to a file or \"-\" (read from stdin). Default driver is file (unencrypted).", + RunE: create, + Args: cobra.ExactArgs(2), + Example: `podman secret create mysecret /path/to/secret + printf "secretdata" | podman secret create mysecret -`, + ValidArgsFunction: common.AutocompleteSecretCreate, + } +) + +var ( + createOpts = entities.SecretCreateOptions{} +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: createCmd, + Parent: secretCmd, + }) + + flags := createCmd.Flags() + + driverFlagName := "driver" + flags.StringVar(&createOpts.Driver, driverFlagName, "file", "Specify secret driver") + _ = createCmd.RegisterFlagCompletionFunc(driverFlagName, completion.AutocompleteNone) +} + +func create(cmd *cobra.Command, args []string) error { + name := args[0] + + var err error + path := args[1] + + var reader io.Reader + if path == "-" || path == "/dev/stdin" { + stat, err := os.Stdin.Stat() + if err != nil { + return err + } + if (stat.Mode() & os.ModeNamedPipe) == 0 { + return errors.New("if `-` is used, data must be passed into stdin") + + } + reader = os.Stdin + } else { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + reader = file + } + + report, err := registry.ContainerEngine().SecretCreate(context.Background(), name, reader, createOpts) + if err != nil { + return err + } + fmt.Println(report.ID) + return nil +} diff --git a/cmd/podman/secrets/inspect.go b/cmd/podman/secrets/inspect.go new file mode 100644 index 000000000..f38ba7f65 --- /dev/null +++ b/cmd/podman/secrets/inspect.go @@ -0,0 +1,82 @@ +package secrets + +import ( + "context" + "encoding/json" + "fmt" + "html/template" + "os" + "text/tabwriter" + + "github.com/containers/common/pkg/report" + "github.com/containers/podman/v2/cmd/podman/common" + "github.com/containers/podman/v2/cmd/podman/parse" + "github.com/containers/podman/v2/cmd/podman/registry" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect [options] SECRET [SECRET...]", + Short: "Inspect a secret", + Long: "Display detail information on one or more secrets", + RunE: inspect, + Example: "podman secret inspect MYSECRET", + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: common.AutocompleteSecrets, + } +) + +var format string + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: inspectCmd, + Parent: secretCmd, + }) + flags := inspectCmd.Flags() + formatFlagName := "format" + flags.StringVar(&format, formatFlagName, "", "Format volume output using Go template") + _ = inspectCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteJSONFormat) +} + +func inspect(cmd *cobra.Command, args []string) error { + inspected, errs, _ := registry.ContainerEngine().SecretInspect(context.Background(), args) + + // always print valid list + if len(inspected) == 0 { + inspected = []*entities.SecretInfoReport{} + } + + if cmd.Flags().Changed("format") { + row := report.NormalizeFormat(format) + formatted := parse.EnforceRange(row) + + tmpl, err := template.New("inspect secret").Parse(formatted) + if err != nil { + return err + } + w := tabwriter.NewWriter(os.Stdout, 12, 2, 2, ' ', 0) + defer w.Flush() + tmpl.Execute(w, inspected) + } else { + buf, err := json.MarshalIndent(inspected, "", " ") + if err != nil { + return err + } + fmt.Println(string(buf)) + } + + if len(errs) > 0 { + if len(errs) > 1 { + for _, err := range errs[1:] { + fmt.Fprintf(os.Stderr, "error inspecting secret: %v\n", err) + } + } + return errors.Errorf("error inspecting secret: %v", errs[0]) + } + return nil +} diff --git a/cmd/podman/secrets/list.go b/cmd/podman/secrets/list.go new file mode 100644 index 000000000..dff4bfdca --- /dev/null +++ b/cmd/podman/secrets/list.go @@ -0,0 +1,99 @@ +package secrets + +import ( + "context" + "html/template" + "os" + "text/tabwriter" + "time" + + "github.com/containers/common/pkg/completion" + "github.com/containers/common/pkg/report" + "github.com/containers/podman/v2/cmd/podman/common" + "github.com/containers/podman/v2/cmd/podman/parse" + "github.com/containers/podman/v2/cmd/podman/registry" + "github.com/containers/podman/v2/cmd/podman/validate" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + lsCmd = &cobra.Command{ + Use: "ls [options]", + Aliases: []string{"list"}, + Short: "List secrets", + RunE: ls, + Example: "podman secret ls", + Args: validate.NoArgs, + ValidArgsFunction: completion.AutocompleteNone, + } + listFlag = listFlagType{} +) + +type listFlagType struct { + format string + noHeading bool +} + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: lsCmd, + Parent: secretCmd, + }) + + flags := lsCmd.Flags() + formatFlagName := "format" + flags.StringVar(&listFlag.format, formatFlagName, "{{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.CreatedAt}}\t{{.UpdatedAt}}\t\n", "Format volume output using Go template") + _ = lsCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteJSONFormat) + +} + +func ls(cmd *cobra.Command, args []string) error { + responses, err := registry.ContainerEngine().SecretList(context.Background()) + if err != nil { + return err + } + listed := make([]*entities.SecretListReport, 0, len(responses)) + for _, response := range responses { + listed = append(listed, &entities.SecretListReport{ + ID: response.ID, + Name: response.Spec.Name, + CreatedAt: units.HumanDuration(time.Since(response.CreatedAt)) + " ago", + UpdatedAt: units.HumanDuration(time.Since(response.UpdatedAt)) + " ago", + Driver: response.Spec.Driver.Name, + }) + + } + return outputTemplate(cmd, listed) +} + +func outputTemplate(cmd *cobra.Command, responses []*entities.SecretListReport) error { + headers := report.Headers(entities.SecretListReport{}, map[string]string{ + "CreatedAt": "CREATED", + "UpdatedAt": "UPDATED", + }) + + row := report.NormalizeFormat(listFlag.format) + format := parse.EnforceRange(row) + + tmpl, err := template.New("list secret").Parse(format) + if err != nil { + return err + } + w := tabwriter.NewWriter(os.Stdout, 12, 2, 2, ' ', 0) + defer w.Flush() + + if cmd.Flags().Changed("format") && !parse.HasTable(listFlag.format) { + listFlag.noHeading = true + } + + if !listFlag.noHeading { + if err := tmpl.Execute(w, headers); err != nil { + return errors.Wrapf(err, "failed to write report column headers") + } + } + return tmpl.Execute(w, responses) +} diff --git a/cmd/podman/secrets/rm.go b/cmd/podman/secrets/rm.go new file mode 100644 index 000000000..c72a3c171 --- /dev/null +++ b/cmd/podman/secrets/rm.go @@ -0,0 +1,58 @@ +package secrets + +import ( + "context" + "errors" + "fmt" + + "github.com/containers/podman/v2/cmd/podman/common" + "github.com/containers/podman/v2/cmd/podman/registry" + "github.com/containers/podman/v2/cmd/podman/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + rmCmd = &cobra.Command{ + Use: "rm [options] SECRET [SECRET...]", + Short: "Remove one or more secrets", + RunE: rm, + ValidArgsFunction: common.AutocompleteSecrets, + Example: "podman secret rm mysecret1 mysecret2", + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: rmCmd, + Parent: secretCmd, + }) + flags := rmCmd.Flags() + flags.BoolVarP(&rmOptions.All, "all", "a", false, "Remove all secrets") +} + +var ( + rmOptions = entities.SecretRmOptions{} +) + +func rm(cmd *cobra.Command, args []string) error { + var ( + errs utils.OutputErrors + ) + if (len(args) > 0 && rmOptions.All) || (len(args) < 1 && !rmOptions.All) { + return errors.New("`podman secret rm` requires one argument, or the --all flag") + } + responses, err := registry.ContainerEngine().SecretRm(context.Background(), args, rmOptions) + if err != nil { + return err + } + for _, r := range responses { + if r.Err == nil { + fmt.Println(r.ID) + } else { + errs = append(errs, r.Err) + } + } + return errs.PrintErrors() +} diff --git a/cmd/podman/secrets/secret.go b/cmd/podman/secrets/secret.go new file mode 100644 index 000000000..4e6b6ec7b --- /dev/null +++ b/cmd/podman/secrets/secret.go @@ -0,0 +1,25 @@ +package secrets + +import ( + "github.com/containers/podman/v2/cmd/podman/registry" + "github.com/containers/podman/v2/cmd/podman/validate" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/spf13/cobra" +) + +var ( + // Command: podman _secret_ + secretCmd = &cobra.Command{ + Use: "secret", + Short: "Manage secrets", + Long: "Manage secrets", + RunE: validate.SubCommandExists, + } +) + +func init() { + registry.Commands = append(registry.Commands, registry.CliCommand{ + Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode}, + Command: secretCmd, + }) +} diff --git a/commands-demo.md b/commands-demo.md index e91609b0f..ececf0a22 100644 --- a/commands-demo.md +++ b/commands-demo.md @@ -80,6 +80,11 @@ | [podman-run(1)](https://podman.readthedocs.io/en/latest/markdown/podman-run.1.html) | Run a command in a new container | | [podman-save(1)](https://podman.readthedocs.io/en/latest/markdown/podman-save.1.html) | Save an image to a container archive | | [podman-search(1)](https://podman.readthedocs.io/en/latest/markdown/podman-search.1.html) | Search a registry for an image | +| [podman-secret(1)](https://podman.readthedocs.io/en/latest/markdown/podman-secret.1.html) | Manage podman secrets | +| [podman-secret-create(1)](https://podman.readthedocs.io/en/latest/markdown/podman-secret-create.1.html) | Create a new secret | +| [podman-secret-inspect(1)](https://podman.readthedocs.io/en/latest/markdown/podman-secret-inspect.1.html) | Display detailed information on one or more secrets | +| [podman--secret-ls(1)](https://podman.readthedocs.io/en/latest/markdown/podman-secret-ls.1.html) | List all the available secrets | +| [podman-secret-rm(1)](https://podman.readthedocs.io/en/latest/markdown/podman-secret-rm.1.html) | Remove one or more secrets | | [podman-start(1)](https://podman.readthedocs.io/en/latest/markdown/podman-start.1.html) | Start one or more containers | | [podman-stats(1)](https://podman.readthedocs.io/en/latest/markdown/podman-stats.1.html) | Display a live stream of one or more container's resource usage statistics | | [podman-stop(1)](https://podman.readthedocs.io/en/latest/markdown/podman-stop.1.html) | Stops one or more running containers | diff --git a/docs/source/Commands.rst b/docs/source/Commands.rst index 563462377..0bb23f71b 100644 --- a/docs/source/Commands.rst +++ b/docs/source/Commands.rst @@ -89,6 +89,8 @@ Commands :doc:`search <markdown/podman-search.1>` Search registry for image +:doc:`secret <markdown/podman-secret.1>` Manage podman secrets + :doc:`start <markdown/podman-start.1>` Start one or more containers :doc:`stats <markdown/podman-stats.1>` Display a live stream of container resource usage statistics diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md index 1d72db065..7782949a9 100644 --- a/docs/source/markdown/podman-create.1.md +++ b/docs/source/markdown/podman-create.1.md @@ -825,6 +825,16 @@ Specify the policy to select the seccomp profile. If set to *image*, Podman will Note that this feature is experimental and may change in the future. +#### **--secret**=*secret* + +Give the container access to a secret. Can be specified multiple times. + +A secret is a blob of sensitive data which a container needs at runtime but +should not be stored in the image or in source control, such as usernames and passwords, +TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). + +Secrets are managed using the `podman secret` command. + #### **--security-opt**=*option* Security Options @@ -1277,7 +1287,7 @@ b NOTE: Use the environment variable `TMPDIR` to change the temporary storage location of downloaded container images. Podman defaults to use `/var/tmp`. ## SEE ALSO -**podman**(1), **podman-save**(1), **podman-ps**(1), **podman-attach**(1), **podman-pod-create**(1), **podman-port**(1), **podman-kill**(1), **podman-stop**(1), +**podman**(1), **podman-secret**(1), **podman-save**(1), **podman-ps**(1), **podman-attach**(1), **podman-pod-create**(1), **podman-port**(1), **podman-kill**(1), **podman-stop**(1), **podman-generate-systemd**(1) **podman-rm**(1), **subgid**(5), **subuid**(5), **containers.conf**(5), **systemd.unit**(5), **setsebool**(8), **slirp4netns**(1), **fuse-overlayfs**(1), **proc**(5)**. ## HISTORY diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md index 0838dd546..49b45f4f8 100644 --- a/docs/source/markdown/podman-run.1.md +++ b/docs/source/markdown/podman-run.1.md @@ -877,6 +877,16 @@ Specify the policy to select the seccomp profile. If set to *image*, Podman will Note that this feature is experimental and may change in the future. +#### **--secret**=*secret* + +Give the container access to a secret. Can be specified multiple times. + +A secret is a blob of sensitive data which a container needs at runtime but +should not be stored in the image or in source control, such as usernames and passwords, +TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). + +Secrets are managed using the `podman secret` command + #### **--security-opt**=*option* Security Options diff --git a/docs/source/markdown/podman-secret-create.1.md b/docs/source/markdown/podman-secret-create.1.md new file mode 100644 index 000000000..af4dc1d97 --- /dev/null +++ b/docs/source/markdown/podman-secret-create.1.md @@ -0,0 +1,43 @@ +% podman-secret-create(1) + +## NAME +podman\-secret\-create - Create a new secret + +## SYNOPSIS +**podman secret create** [*options*] *name* *file|-* + +## DESCRIPTION + +Creates a secret using standard input or from a file for the secret content. + +Create accepts a path to a file, or `-`, which tells podman to read the secret from stdin + +A secret is a blob of sensitive data which a container needs at runtime but +should not be stored in the image or in source control, such as usernames and passwords, +TLS certificates and keys, SSH keys or other important generic strings or binary content (up to 500 kb in size). + +Secrets will not be commited to an image with `podman commit`, and will not be in the archive created by a `podman export` + +## OPTIONS + +#### **--driver**=*driver* + +Specify the secret driver (default **file**, which is unencrypted). + +#### **--help** + +Print usage statement. + +## EXAMPLES + +``` +$ podman secret create my_secret ./secret.json +$ podman secret create --driver=file my_secret ./secret.json +$ printf <secret> | podman secret create my_secret - +``` + +## SEE ALSO +podman-secret (1) + +## HISTORY +January 2021, Originally compiled by Ashley Cui <acui@redhat.com> diff --git a/docs/source/markdown/podman-secret-inspect.1.md b/docs/source/markdown/podman-secret-inspect.1.md new file mode 100644 index 000000000..383db8375 --- /dev/null +++ b/docs/source/markdown/podman-secret-inspect.1.md @@ -0,0 +1,38 @@ +% podman-secret-inspect(1) + +## NAME +podman\-secret\-inspect - Display detailed information on one or more secrets + +## SYNOPSIS +**podman secret inspect** [*options*] *secret* [...] + +## DESCRIPTION + +Inspects the specified secret. + +By default, this renders all results in a JSON array. If a format is specified, the given template will be executed for each result. +Secrets can be queried individually by providing their full name or a unique partial name. + +## OPTIONS + +#### **--format**=*format* + +Format secret output using Go template. + +#### **--help** + +Print usage statement. + + +## EXAMPLES + +``` +$ podman secret inspect mysecret +$ podman secret inspect --format "{{.Name} {{.Scope}}" mysecret +``` + +## SEE ALSO +podman-secret(1) + +## HISTORY +January 2021, Originally compiled by Ashley Cui <acui@redhat.com> diff --git a/docs/source/markdown/podman-secret-ls.1.md b/docs/source/markdown/podman-secret-ls.1.md new file mode 100644 index 000000000..688784e05 --- /dev/null +++ b/docs/source/markdown/podman-secret-ls.1.md @@ -0,0 +1,30 @@ +% podman-secret-ls(1) + +## NAME +podman\-secret\-ls - List all available secrets + +## SYNOPSIS +**podman secret ls** [*options*] + +## DESCRIPTION + +Lists all the secrets that exist. The output can be formatted to a Go template using the **--format** option. + +## OPTIONS + +#### **--format**=*format* + +Format secret output using Go template. + +## EXAMPLES + +``` +$ podman secret ls +$ podman secret ls --format "{{.Name}}" +``` + +## SEE ALSO +podman-secret(1) + +## HISTORY +January 2021, Originally compiled by Ashley Cui <acui@redhat.com> diff --git a/docs/source/markdown/podman-secret-rm.1.md b/docs/source/markdown/podman-secret-rm.1.md new file mode 100644 index 000000000..5169626dc --- /dev/null +++ b/docs/source/markdown/podman-secret-rm.1.md @@ -0,0 +1,33 @@ +% podman-secret-rm(1) + +## NAME +podman\-secret\-rm - Remove one or more secrets + +## SYNOPSIS +**podman secret rm** [*options*] *secret* [...] + +## DESCRIPTION + +Removes one or more secrets. + +## OPTIONS + +#### **--all**, **-a** + +Remove all existing secrets. + +#### **--help** + +Print usage statement. + +## EXAMPLES + +``` +$ podman secret rm mysecret1 mysecret2 +``` + +## SEE ALSO +podman-secret(1) + +## HISTORY +January 2021, Originally compiled by Ashley Cui <acui@redhat.com> diff --git a/docs/source/markdown/podman-secret.1.md b/docs/source/markdown/podman-secret.1.md new file mode 100644 index 000000000..125888cff --- /dev/null +++ b/docs/source/markdown/podman-secret.1.md @@ -0,0 +1,25 @@ +% podman-secret(1) + +## NAME +podman\-secret - Manage podman secrets + +## SYNOPSIS +**podman secret** *subcommand* + +## DESCRIPTION +podman secret is a set of subcommands that manage secrets. + +## SUBCOMMANDS + +| Command | Man Page | Description | +| ------- | ------------------------------------------------------ | ------------------------------------------------------ | +| create | [podman-secret-create(1)](podman-secret-create.1.md) | Create a new secret | +| inspect | [podman-secret-inspect(1)](podman-secret-inspect.1.md) | Display detailed information on one or more secrets | +| ls | [podman-secret-ls(1)](podman-secret-ls.1.md) | List all available secrets | +| rm | [podman-secret-rm(1)](podman-secret-rm.1.md) | Remove one or more secrets | + +## SEE ALSO +podman(1) + +## HISTORY +January 2021, Originally compiled by Ashley Cui <acui@redhat.com> diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md index 0bb8e387b..6f9e705c2 100644 --- a/docs/source/markdown/podman.1.md +++ b/docs/source/markdown/podman.1.md @@ -254,6 +254,7 @@ the exit codes follow the `chroot` standard, see below: | [podman-run(1)](podman-run.1.md) | Run a command in a new container. | | [podman-save(1)](podman-save.1.md) | Save image(s) to an archive. | | [podman-search(1)](podman-search.1.md) | Search a registry for an image. | +| [podman-secret(1)](podman-secret.1.md) | Manage podman secrets. | | [podman-start(1)](podman-start.1.md) | Start one or more containers. | | [podman-stats(1)](podman-stats.1.md) | Display a live stream of one or more container's resource usage statistics. | | [podman-stop(1)](podman-stop.1.md) | Stop one or more running containers. | diff --git a/docs/source/secret.rst b/docs/source/secret.rst new file mode 100644 index 000000000..3825ad1df --- /dev/null +++ b/docs/source/secret.rst @@ -0,0 +1,9 @@ +Secret +====== +:doc:`create <markdown/podman-secret-create.1>` Create a new secert + +:doc:`inspect <markdown/podman-secret-inspect.1>` Display detailed information on one or more secrets + +:doc:`ls <markdown/podman-secret-ls.1>` List secrets + +:doc:`rm <markdown/podman-secret-rm.1>` Remove one or more secrets @@ -10,10 +10,11 @@ require ( github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/containernetworking/cni v0.8.1 github.com/containernetworking/plugins v0.9.0 - github.com/containers/buildah v1.19.3 + github.com/containers/buildah v1.19.4 github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577 github.com/containers/conmon v2.0.20+incompatible github.com/containers/image/v5 v5.10.1 + github.com/containers/ocicrypt v1.1.0 github.com/containers/psgo v1.5.2 github.com/containers/storage v1.25.0 github.com/coreos/go-systemd/v22 v22.1.0 @@ -65,7 +66,6 @@ require ( golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 google.golang.org/appengine v1.6.6 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.20.2 @@ -89,7 +89,6 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= @@ -97,8 +96,8 @@ github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0= github.com/containernetworking/plugins v0.9.0 h1:c+1gegKhR7+d0Caum9pEHugZlyhXPOG6v3V6xJgIGCI= github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg= -github.com/containers/buildah v1.19.3 h1:U0E1UKzqW5C11W7giHhLZI06xkZiV40ZKDK/c1jotbE= -github.com/containers/buildah v1.19.3/go.mod h1:uZb6GuE36tmRSOcIXGfiYqdpr+GPXWmlUIJSk5sn19w= +github.com/containers/buildah v1.19.4 h1:TygMnZAt8JCQ0i1APbSHfdn69B2vGvPoJKD+f6D6fuA= +github.com/containers/buildah v1.19.4/go.mod h1:PfK0EiB871UFD1CT8xNsKq60s7xw2pgSOEGICf+x6O8= github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx3z0= github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577 h1:tUJcLouJ1bC3w9gdqgKqZBsj2uCuM8D8jSR592lxbhE= @@ -113,6 +112,8 @@ github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDpl github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= +github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2DyGPY= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw= github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU= github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI= @@ -391,6 +392,8 @@ github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZ github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -561,6 +564,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -644,6 +649,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -756,6 +762,7 @@ golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -846,6 +853,8 @@ google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/libpod/container.go b/libpod/container.go index ed7535bc8..e667cd991 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -10,6 +10,7 @@ import ( "github.com/containernetworking/cni/pkg/types" cnitypes "github.com/containernetworking/cni/pkg/types/current" + "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/lock" @@ -1133,6 +1134,11 @@ func (c *Container) Umask() string { return c.config.Umask } +//Secrets return the secrets in the container +func (c *Container) Secrets() []*secrets.Secret { + return c.config.Secrets +} + // Networks gets all the networks this container is connected to. // Please do NOT use ctr.config.Networks, as this can be changed from those // values at runtime via network connect and disconnect. diff --git a/libpod/container_config.go b/libpod/container_config.go index 93ac8807d..5d7e65f2b 100644 --- a/libpod/container_config.go +++ b/libpod/container_config.go @@ -4,6 +4,7 @@ import ( "net" "time" + "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" "github.com/containers/podman/v2/pkg/namespaces" "github.com/containers/storage" @@ -146,6 +147,10 @@ type ContainerRootFSConfig struct { // working directory if it does not exist. Some OCI runtimes do this by // default, but others do not. CreateWorkingDir bool `json:"createWorkingDir,omitempty"` + // Secrets lists secrets to mount into the container + Secrets []*secrets.Secret `json:"secrets,omitempty"` + // SecretPath is the secrets location in storage + SecretsPath string `json:"secretsPath"` } // ContainerSecurityConfig is an embedded sub-config providing security configuration diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index ac7eae56b..f50c7dbfe 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -340,6 +340,13 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.Timezone = c.config.Timezone + for _, secret := range c.config.Secrets { + newSec := define.InspectSecret{} + newSec.Name = secret.Name + newSec.ID = secret.ID + ctrConfig.Secrets = append(ctrConfig.Secrets, &newSec) + } + // Pad Umask to 4 characters if len(c.config.Umask) < 4 { pad := strings.Repeat("0", 4-len(c.config.Umask)) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 5a61f7fe6..b280e79d1 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/containers/common/pkg/secrets" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/pkg/cgroups" @@ -29,6 +30,7 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -2212,3 +2214,25 @@ func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool { } return false } + +// extractSecretToStorage copies a secret's data from the secrets manager to the container's static dir +func (c *Container) extractSecretToCtrStorage(name string) error { + manager, err := secrets.NewManager(c.runtime.GetSecretsStorageDir()) + if err != nil { + return err + } + secr, data, err := manager.LookupSecretData(name) + if err != nil { + return err + } + secretFile := filepath.Join(c.config.SecretsPath, secr.Name) + + err = ioutil.WriteFile(secretFile, data, 0644) + if err != nil { + return errors.Wrapf(err, "unable to create %s", secretFile) + } + if err := label.Relabel(secretFile, c.config.MountLabel, false); err != nil { + return err + } + return nil +} diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index ba85a1f47..3583f8fdd 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/subscriptions" + "github.com/containers/common/pkg/umask" "github.com/containers/podman/v2/libpod/define" "github.com/containers/podman/v2/libpod/events" "github.com/containers/podman/v2/pkg/annotations" @@ -1643,14 +1644,30 @@ rootless=%d c.state.BindMounts["/run/.containerenv"] = containerenvPath } - // Add Secret Mounts - secretMounts := subscriptions.MountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.Mountpoint, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) - for _, mount := range secretMounts { + // Add Subscription Mounts + subscriptionMounts := subscriptions.MountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.Mountpoint, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) + for _, mount := range subscriptionMounts { if _, ok := c.state.BindMounts[mount.Destination]; !ok { c.state.BindMounts[mount.Destination] = mount.Source } } + // Secrets are mounted by getting the secret data from the secrets manager, + // copying the data into the container's static dir, + // then mounting the copied dir into /run/secrets. + // The secrets mounting must come after subscription mounts, since subscription mounts + // creates the /run/secrets dir in the container where we mount as well. + if len(c.Secrets()) > 0 { + // create /run/secrets if subscriptions did not create + if err := c.createSecretMountDir(); err != nil { + return errors.Wrapf(err, "error creating secrets mount") + } + for _, secret := range c.Secrets() { + src := filepath.Join(c.config.SecretsPath, secret.Name) + dest := filepath.Join("/run/secrets", secret.Name) + c.state.BindMounts[dest] = src + } + } return nil } @@ -2368,3 +2385,27 @@ func (c *Container) checkFileExistsInRootfs(file string) (bool, error) { } return true, nil } + +// Creates and mounts an empty dir to mount secrets into, if it does not already exist +func (c *Container) createSecretMountDir() error { + src := filepath.Join(c.state.RunDir, "/run/secrets") + _, err := os.Stat(src) + if os.IsNotExist(err) { + oldUmask := umask.Set(0) + defer umask.Set(oldUmask) + + if err := os.MkdirAll(src, 0644); err != nil { + return err + } + if err := label.Relabel(src, c.config.MountLabel, false); err != nil { + return err + } + if err := os.Chown(src, c.RootUID(), c.RootGID()); err != nil { + return err + } + c.state.BindMounts["/run/secrets"] = src + return nil + } + + return err +} diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go index 9a93e2ffd..2cdd53cbc 100644 --- a/libpod/define/container_inspect.go +++ b/libpod/define/container_inspect.go @@ -62,6 +62,8 @@ type InspectContainerConfig struct { SystemdMode bool `json:"SystemdMode,omitempty"` // Umask is the umask inside the container. Umask string `json:"Umask,omitempty"` + // Secrets are the secrets mounted in the container + Secrets []*InspectSecret `json:"Secrets,omitempty"` } // InspectRestartPolicy holds information about the container's restart policy. @@ -705,3 +707,14 @@ type DriverData struct { Name string `json:"Name"` Data map[string]string `json:"Data"` } + +// InspectHostPort provides information on a port on the host that a container's +// port is bound to. +type InspectSecret struct { + // IP on the host we are bound to. "" if not specified (binding to all + // IPs). + Name string `json:"Name"` + // Port on the host we are bound to. No special formatting - just an + // integer stuffed into a string. + ID string `json:"ID"` +} diff --git a/libpod/kube.go b/libpod/kube.go index f9ead027d..6cb7723c9 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -322,7 +322,8 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNS return kubeContainer, kubeVolumes, nil, err } - if len(c.config.Spec.Linux.Devices) > 0 { + // NOTE: a privileged container mounts all of /dev/*. + if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 { // TODO Enable when we can support devices and their names kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices) return kubeContainer, kubeVolumes, nil, errors.Wrapf(define.ErrNotImplemented, "linux devices") @@ -625,13 +626,18 @@ func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) { // generateKubeSecurityContext generates a securityContext based on the existing container func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { - priv := c.Privileged() + privileged := c.Privileged() ro := c.IsReadOnly() allowPrivEscalation := !c.config.Spec.Process.NoNewPrivileges - newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities) - if err != nil { - return nil, err + var capabilities *v1.Capabilities + if !privileged { + // Running privileged adds all caps. + newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities) + if err != nil { + return nil, err + } + capabilities = newCaps } var selinuxOpts v1.SELinuxOptions @@ -651,8 +657,8 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { } sc := v1.SecurityContext{ - Capabilities: newCaps, - Privileged: &priv, + Capabilities: capabilities, + Privileged: &privileged, SELinuxOptions: &selinuxOpts, // RunAsNonRoot is an optional parameter; our first implementations should be root only; however // I'm leaving this as a bread-crumb for later diff --git a/libpod/options.go b/libpod/options.go index 20f62ee37..74ee60fef 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -8,6 +8,7 @@ import ( "syscall" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/podman/v2/libpod/define" @@ -1687,6 +1688,28 @@ func WithUmask(umask string) CtrCreateOption { } } +// WithSecrets adds secrets to the container +func WithSecrets(secretNames []string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + manager, err := secrets.NewManager(ctr.runtime.GetSecretsStorageDir()) + if err != nil { + return err + } + for _, name := range secretNames { + secr, err := manager.Lookup(name) + if err != nil { + return err + } + ctr.config.Secrets = append(ctr.config.Secrets, secr) + } + + return nil + } +} + // Pod Creation Options // WithInfraImage sets the infra image for libpod. diff --git a/libpod/runtime.go b/libpod/runtime.go index 0dc220b52..1ad39fe2f 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -904,3 +904,8 @@ func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) { return plugin.GetVolumePlugin(name, pluginPath) } + +// GetSecretsStoreageDir returns the directory that the secrets manager should take +func (r *Runtime) GetSecretsStorageDir() string { + return filepath.Join(r.store.GraphRoot(), "secrets") +} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index d2bcd8db3..49cf42626 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -422,6 +422,18 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } }() + ctr.config.SecretsPath = filepath.Join(ctr.config.StaticDir, "secrets") + err = os.MkdirAll(ctr.config.SecretsPath, 0644) + if err != nil { + return nil, err + } + for _, secr := range ctr.config.Secrets { + err = ctr.extractSecretToCtrStorage(secr.Name) + if err != nil { + return nil, err + } + } + if ctr.config.ConmonPidFile == "" { ctr.config.ConmonPidFile = filepath.Join(ctr.state.RunDir, "conmon.pid") } @@ -492,7 +504,6 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai toLock.lock.Lock() defer toLock.lock.Unlock() } - // Add the container to the state // TODO: May be worth looking into recovering from name/ID collisions here if ctr.config.Pod != "" { diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go index 415ff85cd..0f27a090f 100644 --- a/pkg/api/handlers/compat/images_build.go +++ b/pkg/api/handlers/compat/images_build.go @@ -60,29 +60,39 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { }() query := struct { - BuildArgs string `schema:"buildargs"` - CacheFrom string `schema:"cachefrom"` - CpuPeriod uint64 `schema:"cpuperiod"` // nolint - CpuQuota int64 `schema:"cpuquota"` // nolint - CpuSetCpus string `schema:"cpusetcpus"` // nolint - CpuShares uint64 `schema:"cpushares"` // nolint - Dockerfile string `schema:"dockerfile"` - ExtraHosts string `schema:"extrahosts"` - ForceRm bool `schema:"forcerm"` - HTTPProxy bool `schema:"httpproxy"` - Labels string `schema:"labels"` - Layers bool `schema:"layers"` - MemSwap int64 `schema:"memswap"` - Memory int64 `schema:"memory"` - NetworkMode string `schema:"networkmode"` - NoCache bool `schema:"nocache"` - Outputs string `schema:"outputs"` - Platform string `schema:"platform"` - Pull bool `schema:"pull"` - Quiet bool `schema:"q"` - Registry string `schema:"registry"` - Remote string `schema:"remote"` - Rm bool `schema:"rm"` + AddHosts string `schema:"extrahosts"` + AdditionalCapabilities string `schema:"addcaps"` + Annotations string `schema:"annotations"` + BuildArgs string `schema:"buildargs"` + CacheFrom string `schema:"cachefrom"` + ConfigureNetwork int64 `schema:"networkmode"` + CpuPeriod uint64 `schema:"cpuperiod"` // nolint + CpuQuota int64 `schema:"cpuquota"` // nolint + CpuSetCpus string `schema:"cpusetcpus"` // nolint + CpuShares uint64 `schema:"cpushares"` // nolint + Devices string `schema:"devices"` + Dockerfile string `schema:"dockerfile"` + DropCapabilities string `schema:"dropcaps"` + ForceRm bool `schema:"forcerm"` + From string `schema:"from"` + HTTPProxy bool `schema:"httpproxy"` + Isolation int64 `schema:"isolation"` + Jobs uint64 `schema:"jobs"` // nolint + Labels string `schema:"labels"` + Layers bool `schema:"layers"` + LogRusage bool `schema:"rusage"` + Manifest string `schema:"manifest"` + MemSwap int64 `schema:"memswap"` + Memory int64 `schema:"memory"` + NoCache bool `schema:"nocache"` + OutputFormat string `schema:"outputformat"` + Platform string `schema:"platform"` + Pull bool `schema:"pull"` + Quiet bool `schema:"q"` + Registry string `schema:"registry"` + Rm bool `schema:"rm"` + //FIXME SecurityOpt in remote API is not handled + SecurityOpt string `schema:"securityopt"` ShmSize int `schema:"shmsize"` Squash bool `schema:"squash"` Tag []string `schema:"t"` @@ -101,14 +111,57 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { return } + // convert label formats + var addCaps = []string{} + if _, found := r.URL.Query()["addcaps"]; found { + var m = []string{} + if err := json.Unmarshal([]byte(query.AdditionalCapabilities), &m); err != nil { + utils.BadRequest(w, "addcaps", query.AdditionalCapabilities, err) + return + } + addCaps = m + } + addhosts := []string{} + if _, found := r.URL.Query()["extrahosts"]; found { + if err := json.Unmarshal([]byte(query.AddHosts), &addhosts); err != nil { + utils.BadRequest(w, "extrahosts", query.AddHosts, err) + return + } + } + + // convert label formats + var dropCaps = []string{} + if _, found := r.URL.Query()["dropcaps"]; found { + var m = []string{} + if err := json.Unmarshal([]byte(query.DropCapabilities), &m); err != nil { + utils.BadRequest(w, "dropcaps", query.DropCapabilities, err) + return + } + dropCaps = m + } + + // convert label formats + var devices = []string{} + if _, found := r.URL.Query()["devices"]; found { + var m = []string{} + if err := json.Unmarshal([]byte(query.DropCapabilities), &m); err != nil { + utils.BadRequest(w, "devices", query.DropCapabilities, err) + return + } + devices = m + } + var output string if len(query.Tag) > 0 { output = query.Tag[0] } - - var additionalNames []string + format := buildah.Dockerv2ImageManifest + if utils.IsLibpodRequest(r) { + format = query.OutputFormat + } + var additionalTags []string if len(query.Tag) > 1 { - additionalNames = query.Tag[1:] + additionalTags = query.Tag[1:] } var buildArgs = map[string]string{} @@ -120,17 +173,21 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { } // convert label formats + var annotations = []string{} + if _, found := r.URL.Query()["annotations"]; found { + if err := json.Unmarshal([]byte(query.Annotations), &annotations); err != nil { + utils.BadRequest(w, "annotations", query.Annotations, err) + return + } + } + + // convert label formats var labels = []string{} if _, found := r.URL.Query()["labels"]; found { - var m = map[string]string{} - if err := json.Unmarshal([]byte(query.Labels), &m); err != nil { + if err := json.Unmarshal([]byte(query.Labels), &labels); err != nil { utils.BadRequest(w, "labels", query.Labels, err) return } - - for k, v := range m { - labels = append(labels, k+"="+v) - } } pullPolicy := buildah.PullIfMissing @@ -160,27 +217,14 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { reporter := channel.NewWriter(make(chan []byte, 1)) defer reporter.Close() + buildOptions := imagebuildah.BuildOptions{ - ContextDirectory: contextDirectory, - PullPolicy: pullPolicy, - Registry: query.Registry, - IgnoreUnrecognizedInstructions: true, - Quiet: query.Quiet, - Layers: query.Layers, - Isolation: buildah.IsolationChroot, - Compression: archive.Gzip, - Args: buildArgs, - Output: output, - AdditionalTags: additionalNames, - Out: stdout, - Err: auxout, - ReportWriter: reporter, - OutputFormat: buildah.Dockerv2ImageManifest, - SystemContext: &types.SystemContext{ - AuthFilePath: authfile, - DockerAuthConfig: creds, - }, + AddCapabilities: addCaps, + AdditionalTags: additionalTags, + Annotations: annotations, + Args: buildArgs, CommonBuildOpts: &buildah.CommonBuildOptions{ + AddHost: addhosts, CPUPeriod: query.CpuPeriod, CPUQuota: query.CpuQuota, CPUShares: query.CpuShares, @@ -190,12 +234,37 @@ func BuildImage(w http.ResponseWriter, r *http.Request) { MemorySwap: query.MemSwap, ShmSize: strconv.Itoa(query.ShmSize), }, - Squash: query.Squash, - Labels: labels, - NoCache: query.NoCache, - RemoveIntermediateCtrs: query.Rm, - ForceRmIntermediateCtrs: query.ForceRm, - Target: query.Target, + Compression: archive.Gzip, + ConfigureNetwork: buildah.NetworkConfigurationPolicy(query.ConfigureNetwork), + ContextDirectory: contextDirectory, + Devices: devices, + DropCapabilities: dropCaps, + Err: auxout, + ForceRmIntermediateCtrs: query.ForceRm, + From: query.From, + IgnoreUnrecognizedInstructions: true, + // FIXME, This is very broken. Buildah will only work with chroot + // Isolation: buildah.Isolation(query.Isolation), + Isolation: buildah.IsolationChroot, + + Labels: labels, + Layers: query.Layers, + Manifest: query.Manifest, + NoCache: query.NoCache, + Out: stdout, + Output: output, + OutputFormat: format, + PullPolicy: pullPolicy, + Quiet: query.Quiet, + Registry: query.Registry, + RemoveIntermediateCtrs: query.Rm, + ReportWriter: reporter, + Squash: query.Squash, + SystemContext: &types.SystemContext{ + AuthFilePath: authfile, + DockerAuthConfig: creds, + }, + Target: query.Target, } runtime := r.Context().Value("runtime").(*libpod.Runtime) diff --git a/pkg/api/handlers/compat/secrets.go b/pkg/api/handlers/compat/secrets.go new file mode 100644 index 000000000..ea2dfc707 --- /dev/null +++ b/pkg/api/handlers/compat/secrets.go @@ -0,0 +1,121 @@ +package compat + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + + "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" + "github.com/gorilla/schema" + "github.com/pkg/errors" +) + +func ListSecrets(w http.ResponseWriter, r *http.Request) { + var ( + runtime = r.Context().Value("runtime").(*libpod.Runtime) + decoder = r.Context().Value("decoder").(*schema.Decoder) + ) + query := struct { + Filters map[string][]string `schema:"filters"` + }{} + + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + if len(query.Filters) > 0 { + utils.Error(w, "filters not supported", http.StatusBadRequest, errors.New("bad parameter")) + } + ic := abi.ContainerEngine{Libpod: runtime} + reports, err := ic.SecretList(r.Context()) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, reports) +} + +func InspectSecret(w http.ResponseWriter, r *http.Request) { + var ( + runtime = r.Context().Value("runtime").(*libpod.Runtime) + ) + name := utils.GetName(r) + names := []string{name} + ic := abi.ContainerEngine{Libpod: runtime} + reports, errs, err := ic.SecretInspect(r.Context(), names) + if err != nil { + utils.InternalServerError(w, err) + return + } + if len(errs) > 0 { + utils.SecretNotFound(w, name, errs[0]) + return + } + utils.WriteResponse(w, http.StatusOK, reports[0]) + +} + +func RemoveSecret(w http.ResponseWriter, r *http.Request) { + var ( + runtime = r.Context().Value("runtime").(*libpod.Runtime) + ) + + opts := entities.SecretRmOptions{} + name := utils.GetName(r) + ic := abi.ContainerEngine{Libpod: runtime} + reports, err := ic.SecretRm(r.Context(), []string{name}, opts) + if err != nil { + utils.InternalServerError(w, err) + return + } + if reports[0].Err != nil { + utils.SecretNotFound(w, name, reports[0].Err) + return + } + utils.WriteResponse(w, http.StatusNoContent, nil) +} + +func CreateSecret(w http.ResponseWriter, r *http.Request) { + var ( + runtime = r.Context().Value("runtime").(*libpod.Runtime) + ) + opts := entities.SecretCreateOptions{} + createParams := struct { + *entities.SecretCreateRequest + Labels map[string]string `schema:"labels"` + }{} + + if err := json.NewDecoder(r.Body).Decode(&createParams); err != nil { + utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()")) + return + } + if len(createParams.Labels) > 0 { + utils.Error(w, "labels not supported", http.StatusBadRequest, errors.New("bad parameter")) + } + + decoded, _ := base64.StdEncoding.DecodeString(createParams.Data) + reader := bytes.NewReader(decoded) + opts.Driver = createParams.Driver.Name + + ic := abi.ContainerEngine{Libpod: runtime} + report, err := ic.SecretCreate(r.Context(), createParams.Name, reader, opts) + if err != nil { + if errors.Cause(err).Error() == "secret name in use" { + utils.Error(w, "name conflicts with an existing object", http.StatusConflict, err) + return + } + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, report) +} + +func UpdateSecret(w http.ResponseWriter, r *http.Request) { + utils.Error(w, fmt.Sprintf("unsupported endpoint: %v", r.Method), http.StatusNotImplemented, errors.New("update is not supported")) +} diff --git a/pkg/api/handlers/libpod/secrets.go b/pkg/api/handlers/libpod/secrets.go new file mode 100644 index 000000000..447a5d021 --- /dev/null +++ b/pkg/api/handlers/libpod/secrets.go @@ -0,0 +1,40 @@ +package libpod + +import ( + "net/http" + + "github.com/containers/podman/v2/libpod" + "github.com/containers/podman/v2/pkg/api/handlers/utils" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/domain/infra/abi" + "github.com/gorilla/schema" + "github.com/pkg/errors" +) + +func CreateSecret(w http.ResponseWriter, r *http.Request) { + var ( + runtime = r.Context().Value("runtime").(*libpod.Runtime) + decoder = r.Context().Value("decoder").(*schema.Decoder) + ) + query := struct { + Name string `schema:"name"` + Driver string `schema:"driver"` + }{ + // override any golang type defaults + } + opts := entities.SecretCreateOptions{} + if err := decoder.Decode(&query, r.URL.Query()); err != nil { + utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest, + errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String())) + return + } + opts.Driver = query.Driver + + ic := abi.ContainerEngine{Libpod: runtime} + report, err := ic.SecretCreate(r.Context(), query.Name, r.Body, opts) + if err != nil { + utils.InternalServerError(w, err) + return + } + utils.WriteResponse(w, http.StatusOK, report) +} diff --git a/pkg/api/handlers/utils/errors.go b/pkg/api/handlers/utils/errors.go index e2c287c45..c8785fb89 100644 --- a/pkg/api/handlers/utils/errors.go +++ b/pkg/api/handlers/utils/errors.go @@ -80,6 +80,14 @@ func SessionNotFound(w http.ResponseWriter, name string, err error) { Error(w, msg, http.StatusNotFound, err) } +func SecretNotFound(w http.ResponseWriter, nameOrID string, err error) { + if errors.Cause(err).Error() != "no such secret" { + InternalServerError(w, err) + } + msg := fmt.Sprintf("No such secret: %s", nameOrID) + Error(w, msg, http.StatusNotFound, err) +} + func ContainerNotRunning(w http.ResponseWriter, containerID string, err error) { msg := fmt.Sprintf("Container %s is not running", containerID) Error(w, msg, http.StatusConflict, err) diff --git a/pkg/api/server/register_secrets.go b/pkg/api/server/register_secrets.go new file mode 100644 index 000000000..95abf83e8 --- /dev/null +++ b/pkg/api/server/register_secrets.go @@ -0,0 +1,194 @@ +package server + +import ( + "net/http" + + "github.com/containers/podman/v2/pkg/api/handlers/compat" + "github.com/containers/podman/v2/pkg/api/handlers/libpod" + "github.com/gorilla/mux" +) + +func (s *APIServer) registerSecretHandlers(r *mux.Router) error { + // swagger:operation POST /libpod/secrets/create libpod libpodCreateSecret + // --- + // tags: + // - secrets + // summary: Create a secret + // parameters: + // - in: query + // name: name + // type: string + // description: User-defined name of the secret. + // required: true + // - in: query + // name: driver + // type: string + // description: Secret driver + // default: "file" + // - in: body + // name: request + // description: Secret + // schema: + // type: string + // produces: + // - application/json + // responses: + // '201': + // $ref: "#/responses/SecretCreateResponse" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/secrets/create"), s.APIHandler(libpod.CreateSecret)).Methods(http.MethodPost) + // swagger:operation GET /libpod/secrets/json libpod libpodListSecret + // --- + // tags: + // - secrets + // summary: List secrets + // description: Returns a list of secrets + // produces: + // - application/json + // parameters: + // responses: + // '200': + // "$ref": "#/responses/SecretListResponse" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/secrets/json"), s.APIHandler(compat.ListSecrets)).Methods(http.MethodGet) + // swagger:operation GET /libpod/secrets/{name}/json libpod libpodInspectSecret + // --- + // tags: + // - secrets + // summary: Inspect secret + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the secret + // produces: + // - application/json + // responses: + // '200': + // "$ref": "#/responses/SecretInspectResponse" + // '404': + // "$ref": "#/responses/NoSuchSecret" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/secrets/{name}/json"), s.APIHandler(compat.InspectSecret)).Methods(http.MethodGet) + // swagger:operation DELETE /libpod/secrets/{name} libpod libpodRemoveSecret + // --- + // tags: + // - secrets + // summary: Remove secret + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the secret + // - in: query + // name: all + // type: boolean + // description: Remove all secrets + // default: false + // produces: + // - application/json + // responses: + // '204': + // description: no error + // '404': + // "$ref": "#/responses/NoSuchSecret" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/libpod/secrets/{name}"), s.APIHandler(compat.RemoveSecret)).Methods(http.MethodDelete) + + /* + * Docker compatibility endpoints + */ + // swagger:operation GET /secrets compat ListSecret + // --- + // tags: + // - secrets (compat) + // summary: List secrets + // description: Returns a list of secrets + // produces: + // - application/json + // parameters: + // responses: + // '200': + // "$ref": "#/responses/SecretListResponse" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/secrets"), s.APIHandler(compat.ListSecrets)).Methods(http.MethodGet) + r.Handle("/secrets", s.APIHandler(compat.ListSecrets)).Methods(http.MethodGet) + // swagger:operation POST /secrets/create compat CreateSecret + // --- + // tags: + // - secrets (compat) + // summary: Create a secret + // parameters: + // - in: body + // name: create + // description: | + // attributes for creating a secret + // schema: + // $ref: "#/definitions/SecretCreate" + // produces: + // - application/json + // responses: + // '201': + // $ref: "#/responses/SecretCreateResponse" + // '409': + // "$ref": "#/responses/SecretInUse" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/secrets/create"), s.APIHandler(compat.CreateSecret)).Methods(http.MethodPost) + r.Handle("/secrets/create", s.APIHandler(compat.CreateSecret)).Methods(http.MethodPost) + // swagger:operation GET /secrets/{name} compat InspectSecret + // --- + // tags: + // - secrets (compat) + // summary: Inspect secret + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the secret + // produces: + // - application/json + // responses: + // '200': + // "$ref": "#/responses/SecretInspectResponse" + // '404': + // "$ref": "#/responses/NoSuchSecret" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/secrets/{name}"), s.APIHandler(compat.InspectSecret)).Methods(http.MethodGet) + r.Handle("/secrets/{name}", s.APIHandler(compat.InspectSecret)).Methods(http.MethodGet) + // swagger:operation DELETE /secrets/{name} compat RemoveSecret + // --- + // tags: + // - secrets (compat) + // summary: Remove secret + // parameters: + // - in: path + // name: name + // type: string + // required: true + // description: the name or ID of the secret + // produces: + // - application/json + // responses: + // '204': + // description: no error + // '404': + // "$ref": "#/responses/NoSuchSecret" + // '500': + // "$ref": "#/responses/InternalError" + r.Handle(VersionedPath("/secrets/{name}"), s.APIHandler(compat.RemoveSecret)).Methods(http.MethodDelete) + r.Handle("/secret/{name}", s.APIHandler(compat.RemoveSecret)).Methods(http.MethodDelete) + + r.Handle(VersionedPath("/secrets/{name}/update"), s.APIHandler(compat.UpdateSecret)).Methods(http.MethodPost) + r.Handle("/secrets/{name}/update", s.APIHandler(compat.UpdateSecret)).Methods(http.MethodPost) + return nil +} diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go index d612041f6..6926eda62 100644 --- a/pkg/api/server/server.go +++ b/pkg/api/server/server.go @@ -124,6 +124,7 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li server.registerPlayHandlers, server.registerPluginsHandlers, server.registerPodsHandlers, + server.registerSecretHandlers, server.RegisterSwaggerHandlers, server.registerSwarmHandlers, server.registerSystemHandlers, diff --git a/pkg/api/tags.yaml b/pkg/api/tags.yaml index 0cfb3f440..bb56098eb 100644 --- a/pkg/api/tags.yaml +++ b/pkg/api/tags.yaml @@ -13,6 +13,8 @@ tags: description: Actions related to pods - name: volumes description: Actions related to volumes + - name: secrets + description: Actions related to secrets - name: system description: Actions related to Podman engine - name: containers (compat) @@ -25,5 +27,7 @@ tags: description: Actions related to compatibility networks - name: volumes (compat) description: Actions related to volumes for the compatibility endpoints + - name: secrets (compat) + description: Actions related to secrets for the compatibility endpoints - name: system (compat) description: Actions related to Podman and compatibility engines diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index 02765816f..8ea09b881 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -31,36 +31,31 @@ import ( func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { params := url.Values{} - if t := options.Output; len(t) > 0 { - params.Set("t", t) + if caps := options.AddCapabilities; len(caps) > 0 { + c, err := jsoniter.MarshalToString(caps) + if err != nil { + return nil, err + } + params.Add("addcaps", c) } + + if annotations := options.Annotations; len(annotations) > 0 { + l, err := jsoniter.MarshalToString(annotations) + if err != nil { + return nil, err + } + params.Set("annotations", l) + } + params.Add("t", options.Output) for _, tag := range options.AdditionalTags { params.Add("t", tag) } - if options.Quiet { - params.Set("q", "1") - } - if options.NoCache { - params.Set("nocache", "1") - } - if options.Layers { - params.Set("layers", "1") - } - // TODO cachefrom - if options.PullPolicy == buildah.PullAlways { - params.Set("pull", "1") - } - if options.RemoveIntermediateCtrs { - params.Set("rm", "1") - } - if options.ForceRmIntermediateCtrs { - params.Set("forcerm", "1") - } - if mem := options.CommonBuildOpts.Memory; mem > 0 { - params.Set("memory", strconv.Itoa(int(mem))) - } - if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { - params.Set("memswap", strconv.Itoa(int(memSwap))) + if buildArgs := options.Args; len(buildArgs) > 0 { + bArgs, err := jsoniter.MarshalToString(buildArgs) + if err != nil { + return nil, err + } + params.Set("buildargs", bArgs) } if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { params.Set("cpushares", strconv.Itoa(int(cpuShares))) @@ -74,22 +69,38 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) } - if buildArgs := options.Args; len(buildArgs) > 0 { - bArgs, err := jsoniter.MarshalToString(buildArgs) + params.Set("networkmode", strconv.Itoa(int(options.ConfigureNetwork))) + params.Set("outputformat", options.OutputFormat) + + if devices := options.Devices; len(devices) > 0 { + d, err := jsoniter.MarshalToString(devices) if err != nil { return nil, err } - params.Set("buildargs", bArgs) + params.Add("devices", d) } - if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { - shmBytes, err := units.RAMInBytes(shmSize) + + if caps := options.DropCapabilities; len(caps) > 0 { + c, err := jsoniter.MarshalToString(caps) if err != nil { return nil, err } - params.Set("shmsize", strconv.Itoa(int(shmBytes))) + params.Add("dropcaps", c) } - if options.Squash { - params.Set("squash", "1") + + if options.ForceRmIntermediateCtrs { + params.Set("forcerm", "1") + } + if len(options.From) > 0 { + params.Set("from", options.From) + } + + params.Set("isolation", strconv.Itoa(int(options.Isolation))) + if options.CommonBuildOpts.HTTPProxy { + params.Set("httpproxy", "1") + } + if options.Jobs != nil { + params.Set("jobs", strconv.FormatUint(uint64(*options.Jobs), 10)) } if labels := options.Labels; len(labels) > 0 { l, err := jsoniter.MarshalToString(labels) @@ -98,10 +109,66 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } params.Set("labels", l) } - if options.CommonBuildOpts.HTTPProxy { - params.Set("httpproxy", "1") + if options.Layers { + params.Set("layers", "1") + } + if options.LogRusage { + params.Set("rusage", "1") + } + if len(options.Manifest) > 0 { + params.Set("manifest", options.Manifest) + } + if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { + params.Set("memswap", strconv.Itoa(int(memSwap))) + } + if mem := options.CommonBuildOpts.Memory; mem > 0 { + params.Set("memory", strconv.Itoa(int(mem))) + } + if options.NoCache { + params.Set("nocache", "1") + } + if t := options.Output; len(t) > 0 { + params.Set("output", t) + } + var platform string + if len(options.OS) > 0 { + platform = options.OS + } + if len(options.Architecture) > 0 { + if len(platform) == 0 { + platform = "linux" + } + platform += "/" + options.Architecture + } + if len(platform) > 0 { + params.Set("platform", platform) + } + if options.PullPolicy == buildah.PullAlways { + params.Set("pull", "1") + } + if options.Quiet { + params.Set("q", "1") + } + if options.RemoveIntermediateCtrs { + params.Set("rm", "1") + } + if hosts := options.CommonBuildOpts.AddHost; len(hosts) > 0 { + h, err := jsoniter.MarshalToString(hosts) + if err != nil { + return nil, err + } + params.Set("extrahosts", h) + } + if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { + shmBytes, err := units.RAMInBytes(shmSize) + if err != nil { + return nil, err + } + params.Set("shmsize", strconv.Itoa(int(shmBytes))) + } + if options.Squash { + params.Set("squash", "1") } - var ( headers map[string]string err error @@ -124,19 +191,6 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO stdout = options.Out } - // TODO network? - - var platform string - if OS := options.OS; len(OS) > 0 { - platform += OS - } - if arch := options.Architecture; len(arch) > 0 { - platform += "/" + arch - } - if len(platform) > 0 { - params.Set("platform", platform) - } - entries := make([]string, len(containerFiles)) copy(entries, containerFiles) entries = append(entries, options.ContextDirectory) diff --git a/pkg/bindings/secrets/secrets.go b/pkg/bindings/secrets/secrets.go new file mode 100644 index 000000000..3fd70dcad --- /dev/null +++ b/pkg/bindings/secrets/secrets.go @@ -0,0 +1,78 @@ +package secrets + +import ( + "context" + "io" + "net/http" + + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/domain/entities" +) + +// List returns information about existing secrets in the form of a slice. +func List(ctx context.Context, options *ListOptions) ([]*entities.SecretInfoReport, error) { + var ( + secrs []*entities.SecretInfoReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(nil, http.MethodGet, "/secrets/json", nil, nil) + if err != nil { + return secrs, err + } + return secrs, response.Process(&secrs) +} + +// Inspect returns low-level information about a secret. +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.SecretInfoReport, error) { + var ( + inspect *entities.SecretInfoReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(nil, http.MethodGet, "/secrets/%s/json", nil, nil, nameOrID) + if err != nil { + return inspect, err + } + return inspect, response.Process(&inspect) +} + +// Remove removes a secret from storage +func Remove(ctx context.Context, nameOrID string) error { + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + response, err := conn.DoRequest(nil, http.MethodDelete, "/secrets/%s", nil, nil, nameOrID) + if err != nil { + return err + } + return response.Process(nil) +} + +// Create creates a secret given some data +func Create(ctx context.Context, reader io.Reader, options *CreateOptions) (*entities.SecretCreateReport, error) { + var ( + create *entities.SecretCreateReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + params, err := options.ToParams() + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(reader, http.MethodPost, "/secrets/create", params, nil) + if err != nil { + return nil, err + } + return create, response.Process(&create) +} diff --git a/pkg/bindings/secrets/types.go b/pkg/bindings/secrets/types.go new file mode 100644 index 000000000..a98e894dc --- /dev/null +++ b/pkg/bindings/secrets/types.go @@ -0,0 +1,23 @@ +package secrets + +//go:generate go run ../generator/generator.go ListOptions +// ListOptions are optional options for inspecting secrets +type ListOptions struct { +} + +//go:generate go run ../generator/generator.go InspectOptions +// InspectOptions are optional options for inspecting secrets +type InspectOptions struct { +} + +//go:generate go run ../generator/generator.go RemoveOptions +// RemoveOptions are optional options for removing secrets +type RemoveOptions struct { +} + +//go:generate go run ../generator/generator.go CreateOptions +// CreateOptions are optional options for Creating secrets +type CreateOptions struct { + Driver *string + Name *string +} diff --git a/pkg/bindings/secrets/types_create_options.go b/pkg/bindings/secrets/types_create_options.go new file mode 100644 index 000000000..84cf38fa3 --- /dev/null +++ b/pkg/bindings/secrets/types_create_options.go @@ -0,0 +1,107 @@ +package secrets + +import ( + "net/url" + "reflect" + "strings" + + "github.com/containers/podman/v2/pkg/bindings/util" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" +) + +/* +This file is generated automatically by go generate. Do not edit. +*/ + +// Changed +func (o *CreateOptions) Changed(fieldName string) bool { + r := reflect.ValueOf(o) + value := reflect.Indirect(r).FieldByName(fieldName) + return !value.IsNil() +} + +// ToParams +func (o *CreateOptions) ToParams() (url.Values, error) { + params := url.Values{} + if o == nil { + return params, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + s := reflect.ValueOf(o) + if reflect.Ptr == s.Kind() { + s = s.Elem() + } + sType := s.Type() + for i := 0; i < s.NumField(); i++ { + fieldName := sType.Field(i).Name + if !o.Changed(fieldName) { + continue + } + fieldName = strings.ToLower(fieldName) + f := s.Field(i) + if reflect.Ptr == f.Kind() { + f = f.Elem() + } + switch { + case util.IsSimpleType(f): + params.Set(fieldName, util.SimpleTypeToParam(f)) + case f.Kind() == reflect.Slice: + for i := 0; i < f.Len(); i++ { + elem := f.Index(i) + if util.IsSimpleType(elem) { + params.Add(fieldName, util.SimpleTypeToParam(elem)) + } else { + return nil, errors.New("slices must contain only simple types") + } + } + case f.Kind() == reflect.Map: + lowerCaseKeys := make(map[string][]string) + iter := f.MapRange() + for iter.Next() { + lowerCaseKeys[iter.Key().Interface().(string)] = iter.Value().Interface().([]string) + + } + s, err := json.MarshalToString(lowerCaseKeys) + if err != nil { + return nil, err + } + + params.Set(fieldName, s) + } + + } + return params, nil +} + +// WithDriver +func (o *CreateOptions) WithDriver(value string) *CreateOptions { + v := &value + o.Driver = v + return o +} + +// GetDriver +func (o *CreateOptions) GetDriver() string { + var driver string + if o.Driver == nil { + return driver + } + return *o.Driver +} + +// WithName +func (o *CreateOptions) WithName(value string) *CreateOptions { + v := &value + o.Name = v + return o +} + +// GetName +func (o *CreateOptions) GetName() string { + var name string + if o.Name == nil { + return name + } + return *o.Name +} diff --git a/pkg/bindings/secrets/types_inspect_options.go b/pkg/bindings/secrets/types_inspect_options.go new file mode 100644 index 000000000..cd36b0531 --- /dev/null +++ b/pkg/bindings/secrets/types_inspect_options.go @@ -0,0 +1,75 @@ +package secrets + +import ( + "net/url" + "reflect" + "strings" + + "github.com/containers/podman/v2/pkg/bindings/util" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" +) + +/* +This file is generated automatically by go generate. Do not edit. +*/ + +// Changed +func (o *InspectOptions) Changed(fieldName string) bool { + r := reflect.ValueOf(o) + value := reflect.Indirect(r).FieldByName(fieldName) + return !value.IsNil() +} + +// ToParams +func (o *InspectOptions) ToParams() (url.Values, error) { + params := url.Values{} + if o == nil { + return params, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + s := reflect.ValueOf(o) + if reflect.Ptr == s.Kind() { + s = s.Elem() + } + sType := s.Type() + for i := 0; i < s.NumField(); i++ { + fieldName := sType.Field(i).Name + if !o.Changed(fieldName) { + continue + } + fieldName = strings.ToLower(fieldName) + f := s.Field(i) + if reflect.Ptr == f.Kind() { + f = f.Elem() + } + switch { + case util.IsSimpleType(f): + params.Set(fieldName, util.SimpleTypeToParam(f)) + case f.Kind() == reflect.Slice: + for i := 0; i < f.Len(); i++ { + elem := f.Index(i) + if util.IsSimpleType(elem) { + params.Add(fieldName, util.SimpleTypeToParam(elem)) + } else { + return nil, errors.New("slices must contain only simple types") + } + } + case f.Kind() == reflect.Map: + lowerCaseKeys := make(map[string][]string) + iter := f.MapRange() + for iter.Next() { + lowerCaseKeys[iter.Key().Interface().(string)] = iter.Value().Interface().([]string) + + } + s, err := json.MarshalToString(lowerCaseKeys) + if err != nil { + return nil, err + } + + params.Set(fieldName, s) + } + + } + return params, nil +} diff --git a/pkg/bindings/secrets/types_list_options.go b/pkg/bindings/secrets/types_list_options.go new file mode 100644 index 000000000..d313d8f73 --- /dev/null +++ b/pkg/bindings/secrets/types_list_options.go @@ -0,0 +1,75 @@ +package secrets + +import ( + "net/url" + "reflect" + "strings" + + "github.com/containers/podman/v2/pkg/bindings/util" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" +) + +/* +This file is generated automatically by go generate. Do not edit. +*/ + +// Changed +func (o *ListOptions) Changed(fieldName string) bool { + r := reflect.ValueOf(o) + value := reflect.Indirect(r).FieldByName(fieldName) + return !value.IsNil() +} + +// ToParams +func (o *ListOptions) ToParams() (url.Values, error) { + params := url.Values{} + if o == nil { + return params, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + s := reflect.ValueOf(o) + if reflect.Ptr == s.Kind() { + s = s.Elem() + } + sType := s.Type() + for i := 0; i < s.NumField(); i++ { + fieldName := sType.Field(i).Name + if !o.Changed(fieldName) { + continue + } + fieldName = strings.ToLower(fieldName) + f := s.Field(i) + if reflect.Ptr == f.Kind() { + f = f.Elem() + } + switch { + case util.IsSimpleType(f): + params.Set(fieldName, util.SimpleTypeToParam(f)) + case f.Kind() == reflect.Slice: + for i := 0; i < f.Len(); i++ { + elem := f.Index(i) + if util.IsSimpleType(elem) { + params.Add(fieldName, util.SimpleTypeToParam(elem)) + } else { + return nil, errors.New("slices must contain only simple types") + } + } + case f.Kind() == reflect.Map: + lowerCaseKeys := make(map[string][]string) + iter := f.MapRange() + for iter.Next() { + lowerCaseKeys[iter.Key().Interface().(string)] = iter.Value().Interface().([]string) + + } + s, err := json.MarshalToString(lowerCaseKeys) + if err != nil { + return nil, err + } + + params.Set(fieldName, s) + } + + } + return params, nil +} diff --git a/pkg/bindings/secrets/types_remove_options.go b/pkg/bindings/secrets/types_remove_options.go new file mode 100644 index 000000000..ca970e30e --- /dev/null +++ b/pkg/bindings/secrets/types_remove_options.go @@ -0,0 +1,75 @@ +package secrets + +import ( + "net/url" + "reflect" + "strings" + + "github.com/containers/podman/v2/pkg/bindings/util" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" +) + +/* +This file is generated automatically by go generate. Do not edit. +*/ + +// Changed +func (o *RemoveOptions) Changed(fieldName string) bool { + r := reflect.ValueOf(o) + value := reflect.Indirect(r).FieldByName(fieldName) + return !value.IsNil() +} + +// ToParams +func (o *RemoveOptions) ToParams() (url.Values, error) { + params := url.Values{} + if o == nil { + return params, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + s := reflect.ValueOf(o) + if reflect.Ptr == s.Kind() { + s = s.Elem() + } + sType := s.Type() + for i := 0; i < s.NumField(); i++ { + fieldName := sType.Field(i).Name + if !o.Changed(fieldName) { + continue + } + fieldName = strings.ToLower(fieldName) + f := s.Field(i) + if reflect.Ptr == f.Kind() { + f = f.Elem() + } + switch { + case util.IsSimpleType(f): + params.Set(fieldName, util.SimpleTypeToParam(f)) + case f.Kind() == reflect.Slice: + for i := 0; i < f.Len(); i++ { + elem := f.Index(i) + if util.IsSimpleType(elem) { + params.Add(fieldName, util.SimpleTypeToParam(elem)) + } else { + return nil, errors.New("slices must contain only simple types") + } + } + case f.Kind() == reflect.Map: + lowerCaseKeys := make(map[string][]string) + iter := f.MapRange() + for iter.Next() { + lowerCaseKeys[iter.Key().Interface().(string)] = iter.Value().Interface().([]string) + + } + s, err := json.MarshalToString(lowerCaseKeys) + if err != nil { + return nil, err + } + + params.Set(fieldName, s) + } + + } + return params, nil +} diff --git a/pkg/bindings/test/secrets_test.go b/pkg/bindings/test/secrets_test.go new file mode 100644 index 000000000..17c043e4b --- /dev/null +++ b/pkg/bindings/test/secrets_test.go @@ -0,0 +1,133 @@ +package test_bindings + +import ( + "context" + "net/http" + "strings" + "time" + + "github.com/containers/podman/v2/pkg/bindings" + "github.com/containers/podman/v2/pkg/bindings/secrets" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +var _ = Describe("Podman secrets", func() { + var ( + bt *bindingTest + s *gexec.Session + connText context.Context + err error + ) + + BeforeEach(func() { + bt = newBindingTest() + bt.RestoreImagesFromCache() + s = bt.startAPIService() + time.Sleep(1 * time.Second) + connText, err = bindings.NewConnection(context.Background(), bt.sock) + Expect(err).To(BeNil()) + }) + + AfterEach(func() { + + s.Kill() + bt.cleanup() + }) + + It("create secret", func() { + r := strings.NewReader("mysecret") + name := "mysecret" + opts := &secrets.CreateOptions{ + Name: &name, + } + _, err := secrets.Create(connText, r, opts) + Expect(err).To(BeNil()) + + // should not be allowed to create duplicate secret name + _, err = secrets.Create(connText, r, opts) + Expect(err).To(Not(BeNil())) + }) + + It("inspect secret", func() { + r := strings.NewReader("mysecret") + name := "mysecret" + opts := &secrets.CreateOptions{ + Name: &name, + } + _, err := secrets.Create(connText, r, opts) + Expect(err).To(BeNil()) + + data, err := secrets.Inspect(connText, name, nil) + Expect(err).To(BeNil()) + Expect(data.Spec.Name).To(Equal(name)) + + // inspecting non-existent secret should fail + data, err = secrets.Inspect(connText, "notasecret", nil) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + }) + + It("list secret", func() { + r := strings.NewReader("mysecret") + name := "mysecret" + opts := &secrets.CreateOptions{ + Name: &name, + } + _, err := secrets.Create(connText, r, opts) + Expect(err).To(BeNil()) + + data, err := secrets.List(connText, nil) + Expect(err).To(BeNil()) + Expect(data[0].Spec.Name).To(Equal(name)) + }) + + It("list multiple secret", func() { + r := strings.NewReader("mysecret") + name := "mysecret" + opts := &secrets.CreateOptions{ + Name: &name, + } + _, err := secrets.Create(connText, r, opts) + Expect(err).To(BeNil()) + + r2 := strings.NewReader("mysecret2") + name2 := "mysecret2" + opts2 := &secrets.CreateOptions{ + Name: &name2, + } + _, err = secrets.Create(connText, r2, opts2) + Expect(err).To(BeNil()) + + data, err := secrets.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(data)).To(Equal(2)) + }) + + It("list no secrets", func() { + data, err := secrets.List(connText, nil) + Expect(err).To(BeNil()) + Expect(len(data)).To(Equal(0)) + }) + + It("remove secret", func() { + r := strings.NewReader("mysecret") + name := "mysecret" + opts := &secrets.CreateOptions{ + Name: &name, + } + _, err := secrets.Create(connText, r, opts) + Expect(err).To(BeNil()) + + err = secrets.Remove(connText, name) + Expect(err).To(BeNil()) + + // removing non-existent secret should fail + err = secrets.Remove(connText, "nosecret") + Expect(err).To(Not(BeNil())) + code, _ := bindings.CheckResponseCode(err) + Expect(code).To(BeNumerically("==", http.StatusNotFound)) + }) + +}) diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go index 2c97d7baf..d43b422a3 100644 --- a/pkg/domain/entities/engine_container.go +++ b/pkg/domain/entities/engine_container.go @@ -82,6 +82,10 @@ type ContainerEngine interface { PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error) PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error) SetupRootless(ctx context.Context, cmd *cobra.Command) error + SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error) + SecretInspect(ctx context.Context, nameOrIDs []string) ([]*SecretInfoReport, []error, error) + SecretList(ctx context.Context) ([]*SecretInfoReport, error) + SecretRm(ctx context.Context, nameOrID []string, opts SecretRmOptions) ([]*SecretRmReport, error) Shutdown(ctx context.Context) SystemDf(ctx context.Context, options SystemDfOptions) (*SystemDfReport, error) Unshare(ctx context.Context, args []string) error diff --git a/pkg/domain/entities/secrets.go b/pkg/domain/entities/secrets.go new file mode 100644 index 000000000..3cad4c099 --- /dev/null +++ b/pkg/domain/entities/secrets.go @@ -0,0 +1,104 @@ +package entities + +import ( + "time" + + "github.com/containers/podman/v2/pkg/errorhandling" +) + +type SecretCreateReport struct { + ID string +} + +type SecretCreateOptions struct { + Driver string +} + +type SecretListRequest struct { + Filters map[string]string +} + +type SecretListReport struct { + ID string + Name string + Driver string + CreatedAt string + UpdatedAt string +} + +type SecretRmOptions struct { + All bool +} + +type SecretRmReport struct { + ID string + Err error +} + +type SecretInfoReport struct { + ID string + CreatedAt time.Time + UpdatedAt time.Time + Spec SecretSpec +} + +type SecretSpec struct { + Name string + Driver SecretDriverSpec +} + +type SecretDriverSpec struct { + Name string + Options map[string]string +} + +// swagger:model SecretCreate +type SecretCreateRequest struct { + // User-defined name of the secret. + Name string + // Base64-url-safe-encoded (RFC 4648) data to store as secret. + Data string + // Driver represents a driver (default "file") + Driver SecretDriverSpec +} + +// Secret create response +// swagger:response SecretCreateResponse +type SwagSecretCreateResponse struct { + // in:body + Body struct { + SecretCreateReport + } +} + +// Secret list response +// swagger:response SecretListResponse +type SwagSecretListResponse struct { + // in:body + Body []*SecretInfoReport +} + +// Secret inspect response +// swagger:response SecretInspectResponse +type SwagSecretInspectResponse struct { + // in:body + Body SecretInfoReport +} + +// No such secret +// swagger:response NoSuchSecret +type SwagErrNoSuchSecret struct { + // in:body + Body struct { + errorhandling.ErrorModel + } +} + +// Secret in use +// swagger:response SecretInUse +type SwagErrSecretInUse struct { + // in:body + Body struct { + errorhandling.ErrorModel + } +} diff --git a/pkg/domain/infra/abi/secrets.go b/pkg/domain/infra/abi/secrets.go new file mode 100644 index 000000000..b1fe60e01 --- /dev/null +++ b/pkg/domain/infra/abi/secrets.go @@ -0,0 +1,138 @@ +package abi + +import ( + "context" + "io" + "io/ioutil" + "path/filepath" + + "github.com/containers/common/pkg/secrets" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/pkg/errors" +) + +func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) { + data, _ := ioutil.ReadAll(reader) + secretsPath := ic.Libpod.GetSecretsStorageDir() + manager, err := secrets.NewManager(secretsPath) + if err != nil { + return nil, err + } + driverOptions := make(map[string]string) + + if options.Driver == "" { + options.Driver = "file" + } + if options.Driver == "file" { + driverOptions["path"] = filepath.Join(secretsPath, "filedriver") + } + secretID, err := manager.Store(name, data, options.Driver, driverOptions) + if err != nil { + return nil, err + } + return &entities.SecretCreateReport{ + ID: secretID, + }, nil +} + +func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string) ([]*entities.SecretInfoReport, []error, error) { + secretsPath := ic.Libpod.GetSecretsStorageDir() + manager, err := secrets.NewManager(secretsPath) + if err != nil { + return nil, nil, err + } + errs := make([]error, 0, len(nameOrIDs)) + reports := make([]*entities.SecretInfoReport, 0, len(nameOrIDs)) + for _, nameOrID := range nameOrIDs { + secret, err := manager.Lookup(nameOrID) + if err != nil { + if errors.Cause(err).Error() == "no such secret" { + errs = append(errs, err) + continue + } else { + return nil, nil, errors.Wrapf(err, "error inspecting secret %s", nameOrID) + } + } + report := &entities.SecretInfoReport{ + ID: secret.ID, + CreatedAt: secret.CreatedAt, + UpdatedAt: secret.CreatedAt, + Spec: entities.SecretSpec{ + Name: secret.Name, + Driver: entities.SecretDriverSpec{ + Name: secret.Driver, + }, + }, + } + reports = append(reports, report) + + } + + return reports, errs, nil +} + +func (ic *ContainerEngine) SecretList(ctx context.Context) ([]*entities.SecretInfoReport, error) { + secretsPath := ic.Libpod.GetSecretsStorageDir() + manager, err := secrets.NewManager(secretsPath) + if err != nil { + return nil, err + } + secretList, err := manager.List() + if err != nil { + return nil, err + } + report := make([]*entities.SecretInfoReport, 0, len(secretList)) + for _, secret := range secretList { + reportItem := entities.SecretInfoReport{ + ID: secret.ID, + CreatedAt: secret.CreatedAt, + UpdatedAt: secret.CreatedAt, + Spec: entities.SecretSpec{ + Name: secret.Name, + Driver: entities.SecretDriverSpec{ + Name: secret.Driver, + Options: secret.DriverOptions, + }, + }, + } + report = append(report, &reportItem) + } + return report, nil +} + +func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, options entities.SecretRmOptions) ([]*entities.SecretRmReport, error) { + var ( + err error + toRemove []string + reports = []*entities.SecretRmReport{} + ) + secretsPath := ic.Libpod.GetSecretsStorageDir() + manager, err := secrets.NewManager(secretsPath) + if err != nil { + return nil, err + } + toRemove = nameOrIDs + if options.All { + allSecrs, err := manager.List() + if err != nil { + return nil, err + } + for _, secr := range allSecrs { + toRemove = append(toRemove, secr.ID) + } + } + for _, nameOrID := range toRemove { + deletedID, err := manager.Delete(nameOrID) + if err == nil || errors.Cause(err).Error() == "no such secret" { + reports = append(reports, &entities.SecretRmReport{ + Err: err, + ID: deletedID, + }) + continue + } else { + return nil, err + } + } + + return reports, nil +} diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index f10c8c175..daad911cd 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -350,17 +350,6 @@ func (ir *ImageEngine) Build(_ context.Context, containerFiles []string, opts en if err != nil { return nil, err } - // For remote clients, if the option for writing to a file was - // selected, we need to write to the *client's* filesystem. - if len(opts.IIDFile) > 0 { - f, err := os.Create(opts.IIDFile) - if err != nil { - return nil, err - } - if _, err := f.WriteString(report.ID); err != nil { - return nil, err - } - } return report, nil } diff --git a/pkg/domain/infra/tunnel/secrets.go b/pkg/domain/infra/tunnel/secrets.go new file mode 100644 index 000000000..f7c0f7d13 --- /dev/null +++ b/pkg/domain/infra/tunnel/secrets.go @@ -0,0 +1,82 @@ +package tunnel + +import ( + "context" + "io" + + "github.com/containers/podman/v2/pkg/bindings/secrets" + "github.com/containers/podman/v2/pkg/domain/entities" + "github.com/containers/podman/v2/pkg/errorhandling" + "github.com/pkg/errors" +) + +func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) { + opts := new(secrets.CreateOptions).WithDriver(options.Driver).WithName(name) + created, _ := secrets.Create(ic.ClientCtx, reader, opts) + return created, nil +} + +func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string) ([]*entities.SecretInfoReport, []error, error) { + allInspect := make([]*entities.SecretInfoReport, 0, len(nameOrIDs)) + errs := make([]error, 0, len(nameOrIDs)) + for _, name := range nameOrIDs { + inspected, err := secrets.Inspect(ic.ClientCtx, name, nil) + if err != nil { + errModel, ok := err.(errorhandling.ErrorModel) + if !ok { + return nil, nil, err + } + if errModel.ResponseCode == 404 { + errs = append(errs, errors.Errorf("no such secret %q", name)) + continue + } + return nil, nil, err + } + allInspect = append(allInspect, inspected) + } + return allInspect, errs, nil +} + +func (ic *ContainerEngine) SecretList(ctx context.Context) ([]*entities.SecretInfoReport, error) { + secrs, _ := secrets.List(ic.ClientCtx, nil) + return secrs, nil +} + +func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, options entities.SecretRmOptions) ([]*entities.SecretRmReport, error) { + allRm := make([]*entities.SecretRmReport, 0, len(nameOrIDs)) + if options.All { + allSecrets, err := secrets.List(ic.ClientCtx, nil) + if err != nil { + return nil, err + } + for _, secret := range allSecrets { + allRm = append(allRm, &entities.SecretRmReport{ + Err: secrets.Remove(ic.ClientCtx, secret.ID), + ID: secret.ID, + }) + } + return allRm, nil + } + for _, name := range nameOrIDs { + secret, err := secrets.Inspect(ic.ClientCtx, name, nil) + if err != nil { + errModel, ok := err.(errorhandling.ErrorModel) + if !ok { + return nil, err + } + if errModel.ResponseCode == 404 { + allRm = append(allRm, &entities.SecretRmReport{ + Err: errors.Errorf("no secret with name or id %q: no such secret ", name), + ID: "", + }) + continue + } + } + allRm = append(allRm, &entities.SecretRmReport{ + Err: secrets.Remove(ic.ClientCtx, name), + ID: secret.ID, + }) + + } + return allRm, nil +} diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go index 1bc050b00..74291325c 100644 --- a/pkg/specgen/generate/container_create.go +++ b/pkg/specgen/generate/container_create.go @@ -359,6 +359,10 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. options = append(options, libpod.WithHealthCheck(s.ContainerHealthCheckConfig.HealthConfig)) logrus.Debugf("New container has a health check") } + + if len(s.Secrets) != 0 { + options = append(options, libpod.WithSecrets(s.Secrets)) + } return options, nil } diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go index a6cc0a730..732579bf0 100644 --- a/pkg/specgen/specgen.go +++ b/pkg/specgen/specgen.go @@ -237,6 +237,9 @@ type ContainerStorageConfig struct { // If not set, the default of rslave will be used. // Optional. RootfsPropagation string `json:"rootfs_propagation,omitempty"` + // Secrets are the secrets that will be added to the container + // Optional. + Secrets []string `json:"secrets,omitempty"` } // ContainerSecurityConfig is a container's security features, including diff --git a/test/apiv2/50-secrets.at b/test/apiv2/50-secrets.at new file mode 100644 index 000000000..1ef43381a --- /dev/null +++ b/test/apiv2/50-secrets.at @@ -0,0 +1,36 @@ +# -*- sh -*- +# +# secret-related tests +# + +# secret create +t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0"' 200\ + .ID~.* \ + +# secret create unsupported labels +t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0","Labels":{"fail":"fail"}' 400 + +# secret create name already in use +t POST secrets/create '"Name":"mysecret","Data":"c2VjcmV0"' 409 + +# secret inspect +t GET secrets/mysecret 200\ + .Spec.Name=mysecret + +# secret inspect non-existent secret +t GET secrets/bogus 404 + +# secret list +t GET secrets 200\ + length=1 + +# secret list unsupported filters +t GET secrets?filters=%7B%22name%22%3A%5B%22foo1%22%5D%7D 400 + +# secret rm +t DELETE secrets/mysecret 204 +# secret rm non-existent secret +t DELETE secrets/bogus 404 + +# secret update not implemented +t POST secrets/mysecret/update "" 501 diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go index 71b4c0089..9bab4c926 100644 --- a/test/e2e/build_test.go +++ b/test/e2e/build_test.go @@ -194,7 +194,7 @@ var _ = Describe("Podman build", func() { inspect := podmanTest.Podman([]string{"inspect", string(id)}) inspect.WaitWithDefaultTimeout() data := inspect.InspectImageJSON() - Expect(data[0].ID).To(Equal(string(id))) + Expect("sha256:" + data[0].ID).To(Equal(string(id))) }) It("podman Test PATH in built image", func() { @@ -458,4 +458,55 @@ RUN [[ -L /test/dummy-symlink ]] && echo SYMLNKOK || echo SYMLNKERR` Expect(ok).To(BeTrue()) }) + It("podman build --from, --add-host, --cap-drop, --cap-add", func() { + targetPath, err := CreateTempDirInTempDir() + Expect(err).To(BeNil()) + + containerFile := filepath.Join(targetPath, "Containerfile") + content := `FROM scratch +RUN cat /etc/hosts +RUN grep CapEff /proc/self/status` + + Expect(ioutil.WriteFile(containerFile, []byte(content), 0755)).To(BeNil()) + + defer func() { + Expect(os.RemoveAll(containerFile)).To(BeNil()) + }() + + // When + session := podmanTest.Podman([]string{ + "build", "--cap-drop=all", "--cap-add=net_bind_service", "--add-host", "testhost:1.2.3.4", "--from", "alpine", targetPath, + }) + session.WaitWithDefaultTimeout() + + // Then + Expect(session.ExitCode()).To(Equal(0)) + Expect(strings.Fields(session.OutputToString())). + To(ContainElement("alpine")) + Expect(strings.Fields(session.OutputToString())). + To(ContainElement("testhost")) + Expect(strings.Fields(session.OutputToString())). + To(ContainElement("0000000000000400")) + }) + + It("podman build --arch", func() { + targetPath, err := CreateTempDirInTempDir() + Expect(err).To(BeNil()) + + containerFile := filepath.Join(targetPath, "Containerfile") + Expect(ioutil.WriteFile(containerFile, []byte("FROM alpine"), 0755)).To(BeNil()) + + defer func() { + Expect(os.RemoveAll(containerFile)).To(BeNil()) + }() + + // When + session := podmanTest.Podman([]string{ + "build", "--arch", "arm64", targetPath, + }) + session.WaitWithDefaultTimeout() + + // Then + Expect(session.ExitCode()).To(Equal(0)) + }) }) diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go index 3c7bbca66..8760978fd 100644 --- a/test/e2e/commit_test.go +++ b/test/e2e/commit_test.go @@ -279,4 +279,29 @@ var _ = Describe("Podman commit", func() { data := check.InspectImageJSON() Expect(data[0].ID).To(Equal(string(id))) }) + + It("podman commit should not commit secret", func() { + secretsString := "somesecretdata" + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "secr", ALPINE, "cat", "/run/secrets/mysecret"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Equal(secretsString)) + + session = podmanTest.Podman([]string{"commit", "secr", "foobar.com/test1-image:latest"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"run", "foobar.com/test1-image:latest", "cat", "/run/secrets/mysecret"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + + }) }) diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go index 54d801e12..53810d882 100644 --- a/test/e2e/common_test.go +++ b/test/e2e/common_test.go @@ -491,6 +491,21 @@ func (p *PodmanTestIntegration) CleanupVolume() { p.Cleanup() } +// CleanupSecret cleans up the temporary store +func (p *PodmanTestIntegration) CleanupSecrets() { + // Remove all containers + session := p.Podman([]string{"secret", "rm", "-a"}) + session.Wait(90) + + // Stop remove service on secret cleanup + p.StopRemoteService() + + // Nuke tempdir + if err := os.RemoveAll(p.TempDir); err != nil { + fmt.Printf("%q\n", err) + } +} + // InspectContainerToJSON takes the session output of an inspect // container and returns json func (s *PodmanSessionIntegration) InspectContainerToJSON() []define.InspectContainerData { diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go index bcfab0f68..cd949c666 100644 --- a/test/e2e/generate_kube_test.go +++ b/test/e2e/generate_kube_test.go @@ -699,4 +699,39 @@ ENTRYPOINT /bin/sleep` Expect(containers[0].Command).To(Equal([]string{"/bin/sh", "-c", "/bin/sleep"})) Expect(containers[0].Args).To(Equal([]string{"10s"})) }) + + It("podman generate kube - --privileged container", func() { + session := podmanTest.Podman([]string{"create", "--pod", "new:testpod", "--privileged", ALPINE, "ls"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + kube := podmanTest.Podman([]string{"generate", "kube", "testpod"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + // Now make sure that the capabilities aren't set. + pod := new(v1.Pod) + err := yaml.Unmarshal(kube.Out.Contents(), pod) + Expect(err).To(BeNil()) + + containers := pod.Spec.Containers + Expect(len(containers)).To(Equal(1)) + Expect(containers[0].SecurityContext.Capabilities).To(BeNil()) + + // Now make sure we can also `play` it. + kubeFile := filepath.Join(podmanTest.TempDir, "kube.yaml") + + kube = podmanTest.Podman([]string{"generate", "kube", "testpod", "-f", kubeFile}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + // Remove the pod so play can recreate it. + kube = podmanTest.Podman([]string{"pod", "rm", "-f", "testpod"}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + + kube = podmanTest.Podman([]string{"play", "kube", kubeFile}) + kube.WaitWithDefaultTimeout() + Expect(kube.ExitCode()).To(Equal(0)) + }) }) diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index caeaf190e..76d362288 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -668,8 +668,8 @@ USER bin` Expect(session.ExitCode()).To(Equal(0)) }) - It("podman run with secrets", func() { - SkipIfRemote("--default-mounts-file option is not supported in podman-remote") + It("podman run with subscription secrets", func() { + SkipIfRemote("--default-mount-file option is not supported in podman-remote") containersDir := filepath.Join(podmanTest.TempDir, "containers") err := os.MkdirAll(containersDir, 0755) Expect(err).To(BeNil()) @@ -1448,4 +1448,26 @@ WORKDIR /madethis` Expect(session.ExitCode()).To(Equal(0)) Expect(session.OutputToString()).To(ContainSubstring(hostnameEnv)) }) + + It("podman run --secret", func() { + secretsString := "somesecretdata" + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte(secretsString), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "mysecret", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"run", "--secret", "mysecret", "--name", "secr", ALPINE, "cat", "/run/secrets/mysecret"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Equal(secretsString)) + + session = podmanTest.Podman([]string{"inspect", "secr", "--format", " {{(index .Config.Secrets 0).Name}}"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(ContainSubstring("mysecret")) + + }) }) diff --git a/test/e2e/secret_test.go b/test/e2e/secret_test.go new file mode 100644 index 000000000..6dad605c5 --- /dev/null +++ b/test/e2e/secret_test.go @@ -0,0 +1,202 @@ +package integration + +import ( + "io/ioutil" + "os" + "path/filepath" + + . "github.com/containers/podman/v2/test/utils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Podman secret", func() { + var ( + tempdir string + err error + podmanTest *PodmanTestIntegration + ) + + BeforeEach(func() { + tempdir, err = CreateTempDirInTempDir() + if err != nil { + os.Exit(1) + } + podmanTest = PodmanTestCreate(tempdir) + podmanTest.Setup() + podmanTest.SeedImages() + }) + + AfterEach(func() { + podmanTest.CleanupSecrets() + f := CurrentGinkgoTestDescription() + processTestResult(f) + + }) + + It("podman secret create", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + secrID := session.OutputToString() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"secret", "inspect", "--format", "{{.ID}}", secrID}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.OutputToString()).To(Equal(secrID)) + }) + + It("podman secret create bad name should fail", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "?!", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + }) + + It("podman secret inspect", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + secrID := session.OutputToString() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"secret", "inspect", secrID}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.IsJSONOutputValid()).To(BeTrue()) + }) + + It("podman secret inspect with --format", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + secrID := session.OutputToString() + Expect(session.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"secret", "inspect", "--format", "{{.ID}}", secrID}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.OutputToString()).To(Equal(secrID)) + }) + + It("podman secret inspect multiple secrets", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + secrID := session.OutputToString() + Expect(session.ExitCode()).To(Equal(0)) + + session2 := podmanTest.Podman([]string{"secret", "create", "b", secretFilePath}) + session2.WaitWithDefaultTimeout() + secrID2 := session2.OutputToString() + Expect(session2.ExitCode()).To(Equal(0)) + + inspect := podmanTest.Podman([]string{"secret", "inspect", secrID, secrID2}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Equal(0)) + Expect(inspect.IsJSONOutputValid()).To(BeTrue()) + }) + + It("podman secret inspect bogus", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + inspect := podmanTest.Podman([]string{"secret", "inspect", "bogus"}) + inspect.WaitWithDefaultTimeout() + Expect(inspect.ExitCode()).To(Not(Equal(0))) + + }) + + It("podman secret ls", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + list := podmanTest.Podman([]string{"secret", "ls"}) + list.WaitWithDefaultTimeout() + Expect(list.ExitCode()).To(Equal(0)) + Expect(len(list.OutputToStringArray())).To(Equal(2)) + + }) + + It("podman secret ls with Go template", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + list := podmanTest.Podman([]string{"secret", "ls", "--format", "table {{.Name}}"}) + list.WaitWithDefaultTimeout() + + Expect(list.ExitCode()).To(Equal(0)) + Expect(len(list.OutputToStringArray())).To(Equal(2), list.OutputToString()) + }) + + It("podman secret rm", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + secrID := session.OutputToString() + Expect(session.ExitCode()).To(Equal(0)) + + removed := podmanTest.Podman([]string{"secret", "rm", "a"}) + removed.WaitWithDefaultTimeout() + Expect(removed.ExitCode()).To(Equal(0)) + Expect(removed.OutputToString()).To(Equal(secrID)) + + session = podmanTest.Podman([]string{"secret", "ls"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(len(session.OutputToStringArray())).To(Equal(1)) + }) + + It("podman secret rm --all", func() { + secretFilePath := filepath.Join(podmanTest.TempDir, "secret") + err := ioutil.WriteFile(secretFilePath, []byte("mysecret"), 0755) + Expect(err).To(BeNil()) + + session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + session = podmanTest.Podman([]string{"secret", "create", "b", secretFilePath}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + removed := podmanTest.Podman([]string{"secret", "rm", "-a"}) + removed.WaitWithDefaultTimeout() + Expect(removed.ExitCode()).To(Equal(0)) + + session = podmanTest.Podman([]string{"secret", "ls"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(len(session.OutputToStringArray())).To(Equal(1)) + }) + +}) diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 0ad3069ce..ccf46b324 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,14 @@ # Changelog +## v1.19.4 (2021-02-06) + run: fix check for host pid namespace + bump containernetworking/cni library to v0.8.1 - fix for CVE-2021-20206 + Finish plumbing for buildah bud --manifest + buildah manifest add localimage should work + Fix build arg check + --iidfile: print hash prefix + ## v1.19.3 (2021-01-28) [ci:docs] Fix man page for buildah push Vendor in containers/image v5.10.1 diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 4fbc475c2..7065e00e4 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -28,7 +28,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.19.3" + Version = "1.19.4" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index db2faf71a..4a0f81b04 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,11 @@ +- Changelog for v1.19.4 (2021-02-06) + * run: fix check for host pid namespace + * bump containernetworking/cni library to v0.8.1 - fix for CVE-2021-20206 + * Finish plumbing for buildah bud --manifest + * buildah manifest add localimage should work + * Fix build arg check + * --iidfile: print hash prefix + - Changelog for v1.19.3 (2021-01-28) * [ci:docs] Fix man page for buildah push * Vendor in containers/image v5.10.1 diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 9c6831601..f588c8043 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -224,7 +224,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse return false, nil } -func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpec string) error { +func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpec string) (string, error) { var create bool systemContext := &types.SystemContext{} var list manifests.List @@ -235,13 +235,13 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe } else { _, list, err = manifests.LoadFromImage(b.store, listImage.ID) if err != nil { - return err + return "", err } } names, err := util.ExpandNames([]string{manifestName}, "", systemContext, b.store) if err != nil { - return errors.Wrapf(err, "error encountered while expanding image name %q", manifestName) + return "", errors.Wrapf(err, "error encountered while expanding image name %q", manifestName) } ref, err := alltransports.ParseImageName(imageSpec) @@ -249,13 +249,13 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil { // check if the local image exists if ref, _, err = util.FindImage(b.store, "", systemContext, imageSpec); err != nil { - return err + return "", err } } } if _, err = list.Add(ctx, systemContext, ref, true); err != nil { - return err + return "", err } var imageID string if create { @@ -263,10 +263,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe } else { imageID, err = list.SaveToImage(b.store, listImage.ID, nil, "") } - if err == nil { - fmt.Printf("%s\n", imageID) - } - return err + return imageID, err } // Commit writes the contents of the container, along with its updated @@ -469,7 +466,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options dest = dest2 } if options.IIDFile != "" { - if err = ioutil.WriteFile(options.IIDFile, []byte(img.ID), 0644); err != nil { + if err = ioutil.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil { return imgID, nil, "", err } } @@ -489,9 +486,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } if options.Manifest != "" { - if err := b.addManifest(ctx, options.Manifest, imgID); err != nil { + manifestID, err := b.addManifest(ctx, options.Manifest, imgID) + if err != nil { return imgID, nil, "", err } + logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID) + } return imgID, ref, manifestDigest, nil } diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index cccf42895..17469ad12 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -4,7 +4,7 @@ go 1.12 require ( github.com/containerd/containerd v1.4.1 // indirect - github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 + github.com/containernetworking/cni v0.8.1 github.com/containers/common v0.33.1 github.com/containers/image/v5 v5.10.1 github.com/containers/ocicrypt v1.0.3 diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index bf796c496..cab904fcf 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -76,8 +76,8 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc= -github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx3z0= github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY= github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q= diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index a72e24eea..74ed9a42b 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -115,6 +115,7 @@ type Executor struct { imageInfoLock sync.Mutex imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs fromOverride string + manifest string } type imageTypeAndHistoryAndDiffIDs struct { @@ -231,6 +232,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod logRusage: options.LogRusage, imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), fromOverride: options.From, + manifest: options.Manifest, } if exec.err == nil { exec.err = os.Stderr @@ -679,7 +681,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } logrus.Debugf("printing final image id %q", imageID) if b.iidfile != "" { - if err = ioutil.WriteFile(b.iidfile, []byte(imageID), 0644); err != nil { + if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil { return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile) } } else { diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 9c15785bc..13631108e 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -838,7 +838,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // we need to call ib.Run() to correctly put the args together before // determining if a cached layer with the same build args already exists // and that is done in the if block below. - if checkForLayers && s.builder.Args == nil { + if checkForLayers && len(s.builder.Args) == 0 { cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") @@ -1276,6 +1276,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer MaxRetries: s.executor.maxPullPushRetries, RetryDelay: s.executor.retryPullPushDelay, HistoryTimestamp: s.executor.timestamp, + Manifest: s.executor.manifest, } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 66c856884..8c7c1bbc0 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -2210,7 +2210,7 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) case IsolationOCI: pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) - if (pidns == nil || pidns.Host) && (userns != nil && !userns.Host) { + if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) { return errors.Errorf("not allowed to mix host PID namespace with container user namespace") } } diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go new file mode 100644 index 000000000..37edc16be --- /dev/null +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -0,0 +1,158 @@ +package filedriver + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containers/storage/pkg/lockfile" + "github.com/pkg/errors" +) + +// secretsDataFile is the file where secrets data/payload will be stored +var secretsDataFile = "secretsdata.json" + +// errNoSecretData indicates that there is not data associated with an id +var errNoSecretData = errors.New("no secret data with ID") + +// errNoSecretData indicates that there is secret data already associated with an id +var errSecretIDExists = errors.New("secret data with ID already exists") + +// Driver is the filedriver object +type Driver struct { + // secretsDataFilePath is the path to the secretsfile + secretsDataFilePath string + // lockfile is the filedriver lockfile + lockfile lockfile.Locker +} + +// NewDriver creates a new file driver. +// rootPath is the directory where the secrets data file resides. +func NewDriver(rootPath string) (*Driver, error) { + fileDriver := new(Driver) + fileDriver.secretsDataFilePath = filepath.Join(rootPath, secretsDataFile) + // the lockfile functions requre that the rootPath dir is executable + if err := os.MkdirAll(rootPath, 0700); err != nil { + return nil, err + } + + lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "secretsdata.lock")) + if err != nil { + return nil, err + } + fileDriver.lockfile = lock + + return fileDriver, nil +} + +// List returns all secret IDs +func (d *Driver) List() ([]string, error) { + d.lockfile.Lock() + defer d.lockfile.Unlock() + secretData, err := d.getAllData() + if err != nil { + return nil, err + } + var allID []string + for k := range secretData { + allID = append(allID, k) + } + sort.Strings(allID) + return allID, err +} + +// Lookup returns the bytes associated with a secret ID +func (d *Driver) Lookup(id string) ([]byte, error) { + d.lockfile.Lock() + defer d.lockfile.Unlock() + + secretData, err := d.getAllData() + if err != nil { + return nil, err + } + if data, ok := secretData[id]; ok { + return data, nil + } + return nil, errors.Wrapf(errNoSecretData, "%s", id) +} + +// Store stores the bytes associated with an ID. An error is returned if the ID arleady exists +func (d *Driver) Store(id string, data []byte) error { + d.lockfile.Lock() + defer d.lockfile.Unlock() + + secretData, err := d.getAllData() + if err != nil { + return err + } + if _, ok := secretData[id]; ok { + return errors.Wrapf(errSecretIDExists, "%s", id) + } + secretData[id] = data + marshalled, err := json.MarshalIndent(secretData, "", " ") + if err != nil { + return err + } + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + if err != nil { + return err + } + return nil +} + +// Delete deletes the secret associated with the specified ID. An error is returned if no matching secret is found. +func (d *Driver) Delete(id string) error { + d.lockfile.Lock() + defer d.lockfile.Unlock() + secretData, err := d.getAllData() + if err != nil { + return err + } + if _, ok := secretData[id]; ok { + delete(secretData, id) + } else { + return errors.Wrap(errNoSecretData, id) + } + marshalled, err := json.MarshalIndent(secretData, "", " ") + if err != nil { + return err + } + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + if err != nil { + return err + } + return nil +} + +// getAllData reads the data file and returns all data +func (d *Driver) getAllData() (map[string][]byte, error) { + // check if the db file exists + _, err := os.Stat(d.secretsDataFilePath) + if err != nil { + if os.IsNotExist(err) { + // the file will be created later on a store() + return make(map[string][]byte), nil + } else { + return nil, err + } + } + + file, err := os.Open(d.secretsDataFilePath) + if err != nil { + return nil, err + } + defer file.Close() + + byteValue, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + secretData := new(map[string][]byte) + err = json.Unmarshal([]byte(byteValue), secretData) + if err != nil { + return nil, err + } + return *secretData, nil +} diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go new file mode 100644 index 000000000..5e0fb3e9d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -0,0 +1,282 @@ +package secrets + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/containers/common/pkg/secrets/filedriver" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// maxSecretSize is the max size for secret data - 512kB +const maxSecretSize = 512000 + +// secretIDLength is the character length of a secret ID - 25 +const secretIDLength = 25 + +// errInvalidPath indicates that the secrets path is invalid +var errInvalidPath = errors.New("invalid secrets path") + +// errNoSuchSecret indicates that the secret does not exist +var errNoSuchSecret = errors.New("no such secret") + +// errSecretNameInUse indicates that the secret name is already in use +var errSecretNameInUse = errors.New("secret name in use") + +// errInvalidSecretName indicates that the secret name is invalid +var errInvalidSecretName = errors.New("invalid secret name") + +// errInvalidDriver indicates that the driver type is invalid +var errInvalidDriver = errors.New("invalid driver") + +// errInvalidDriverOpt indicates that a driver option is invalid +var errInvalidDriverOpt = errors.New("invalid driver option") + +// errAmbiguous indicates that a secret is ambiguous +var errAmbiguous = errors.New("secret is ambiguous") + +// errDataSize indicates that the secret data is too large or too small +var errDataSize = errors.New("secret data must be larger than 0 and less than 512000 bytes") + +// secretsFile is the name of the file that the secrets database will be stored in +var secretsFile = "secrets.json" + +// secretNameRegexp matches valid secret names +// Allowed: 64 [a-zA-Z0-9-_.] characters, and the start and end character must be [a-zA-Z0-9] +var secretNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`) + +// SecretsManager holds information on handling secrets +type SecretsManager struct { + // secretsPath is the path to the db file where secrets are stored + secretsDBPath string + // lockfile is the locker for the secrets file + lockfile lockfile.Locker + // db is an in-memory cache of the database of secrets + db *db +} + +// Secret defines a secret +type Secret struct { + // Name is the name of the secret + Name string `json:"name"` + // ID is the unique secret ID + ID string `json:"id"` + // Metadata stores other metadata on the secret + Metadata map[string]string `json:"metadata,omitempty"` + // CreatedAt is when the secret was created + CreatedAt time.Time `json:"createdAt"` + // Driver is the driver used to store secret data + Driver string `json:"driver"` + // DriverOptions is other metadata needed to use the driver + DriverOptions map[string]string `json:"driverOptions"` +} + +// SecretsDriver interfaces with the secrets data store. +// The driver stores the actual bytes of secret data, as opposed to +// the secret metadata. +// Currently only the unencrypted filedriver is implemented. +type SecretsDriver interface { + // List lists all secret ids in the secrets data store + List() ([]string, error) + // Lookup gets the secret's data bytes + Lookup(id string) ([]byte, error) + // Store stores the secret's data bytes + Store(id string, data []byte) error + // Delete deletes a secret's data from the driver + Delete(id string) error +} + +// NewManager creates a new secrets manager +// rootPath is the directory where the secrets data file resides +func NewManager(rootPath string) (*SecretsManager, error) { + manager := new(SecretsManager) + + if !filepath.IsAbs(rootPath) { + return nil, errors.Wrapf(errInvalidPath, "path must be absolute: %s", rootPath) + } + // the lockfile functions requre that the rootPath dir is executable + if err := os.MkdirAll(rootPath, 0700); err != nil { + return nil, err + } + + lock, err := lockfile.GetLockfile(filepath.Join(rootPath, "secrets.lock")) + if err != nil { + return nil, err + } + manager.lockfile = lock + manager.secretsDBPath = filepath.Join(rootPath, secretsFile) + manager.db = new(db) + manager.db.Secrets = make(map[string]Secret) + manager.db.NameToID = make(map[string]string) + manager.db.IDToName = make(map[string]string) + return manager, nil +} + +// Store takes a name, creates a secret and stores the secret metadata and the secret payload. +// It returns a generated ID that is associated with the secret. +// The max size for secret data is 512kB. +func (s *SecretsManager) Store(name string, data []byte, driverType string, driverOpts map[string]string) (string, error) { + err := validateSecretName(name) + if err != nil { + return "", err + } + + if !(len(data) > 0 && len(data) < maxSecretSize) { + return "", errDataSize + } + + s.lockfile.Lock() + defer s.lockfile.Unlock() + + exist, err := s.exactSecretExists(name) + if err != nil { + return "", err + } + if exist { + return "", errors.Wrapf(errSecretNameInUse, name) + } + + secr := new(Secret) + secr.Name = name + + for { + newID := stringid.GenerateNonCryptoID() + // GenerateNonCryptoID() gives 64 characters, so we truncate to correct length + newID = newID[0:secretIDLength] + _, err := s.lookupSecret(newID) + if err != nil { + if errors.Cause(err) == errNoSuchSecret { + secr.ID = newID + break + } else { + return "", err + } + } + } + + secr.Driver = driverType + secr.Metadata = make(map[string]string) + secr.CreatedAt = time.Now() + secr.DriverOptions = driverOpts + + driver, err := getDriver(driverType, driverOpts) + if err != nil { + return "", err + } + err = driver.Store(secr.ID, data) + if err != nil { + return "", errors.Wrapf(err, "error creating secret %s", name) + } + + err = s.store(secr) + if err != nil { + return "", errors.Wrapf(err, "error creating secret %s", name) + } + + return secr.ID, nil +} + +// Delete removes all secret metadata and secret data associated with the specified secret. +// Delete takes a name, ID, or partial ID. +func (s *SecretsManager) Delete(nameOrID string) (string, error) { + err := validateSecretName(nameOrID) + if err != nil { + return "", err + } + + s.lockfile.Lock() + defer s.lockfile.Unlock() + + secret, err := s.lookupSecret(nameOrID) + if err != nil { + return "", err + } + secretID := secret.ID + + driver, err := getDriver(secret.Driver, secret.DriverOptions) + if err != nil { + return "", err + } + + err = driver.Delete(secretID) + if err != nil { + return "", errors.Wrapf(err, "error deleting secret %s", nameOrID) + } + + err = s.delete(secretID) + if err != nil { + return "", errors.Wrapf(err, "error deleting secret %s", nameOrID) + } + return secretID, nil +} + +// Lookup gives a secret's metadata given its name, ID, or partial ID. +func (s *SecretsManager) Lookup(nameOrID string) (*Secret, error) { + s.lockfile.Lock() + defer s.lockfile.Unlock() + + return s.lookupSecret(nameOrID) +} + +// List lists all secrets. +func (s *SecretsManager) List() ([]Secret, error) { + s.lockfile.Lock() + defer s.lockfile.Unlock() + + secrets, err := s.lookupAll() + if err != nil { + return nil, err + } + var ls []Secret + for _, v := range secrets { + ls = append(ls, v) + + } + return ls, nil +} + +// LookupSecretData returns secret metadata as well as secret data in bytes. +// The secret data can be looked up using its name, ID, or partial ID. +func (s *SecretsManager) LookupSecretData(nameOrID string) (*Secret, []byte, error) { + s.lockfile.Lock() + defer s.lockfile.Unlock() + + secret, err := s.lookupSecret(nameOrID) + if err != nil { + return nil, nil, err + } + driver, err := getDriver(secret.Driver, secret.DriverOptions) + if err != nil { + return nil, nil, err + } + data, err := driver.Lookup(secret.ID) + if err != nil { + return nil, nil, err + } + return secret, data, nil +} + +// validateSecretName checks if the secret name is valid. +func validateSecretName(name string) error { + if !secretNameRegexp.MatchString(name) || len(name) > 64 || strings.HasSuffix(name, "-") || strings.HasSuffix(name, ".") { + return errors.Wrapf(errInvalidSecretName, "only 64 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s", name) + } + return nil +} + +// getDriver creates a new driver. +func getDriver(name string, opts map[string]string) (SecretsDriver, error) { + if name == "file" { + if path, ok := opts["path"]; ok { + return filedriver.NewDriver(path) + } else { + return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver") + } + } + return nil, errInvalidDriver +} diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go new file mode 100644 index 000000000..22db97c12 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go @@ -0,0 +1,211 @@ +package secrets + +import ( + "encoding/json" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/pkg/errors" +) + +type db struct { + // Secrets maps a secret id to secret metadata + Secrets map[string]Secret `json:"secrets"` + // NameToID maps a secret name to a secret id + NameToID map[string]string `json:"nameToID"` + // IDToName maps a secret id to a secret name + IDToName map[string]string `json:"idToName"` + // lastModified is the time when the database was last modified on the file system + lastModified time.Time +} + +// loadDB loads database data into the in-memory cache if it has been modified +func (s *SecretsManager) loadDB() error { + // check if the db file exists + fileInfo, err := os.Stat(s.secretsDBPath) + if err != nil { + if !os.IsExist(err) { + // If the file doesn't exist, then there's no reason to update the db cache, + // the db cache will show no entries anyway. + // The file will be created later on a store() + return nil + } else { + return err + } + } + + // We check if the file has been modified after the last time it was loaded into the cache. + // If the file has been modified, then we know that our cache is not up-to-date, so we load + // the db into the cache. + if s.db.lastModified.Equal(fileInfo.ModTime()) { + return nil + } + + file, err := os.Open(s.secretsDBPath) + if err != nil { + return err + } + defer file.Close() + if err != nil { + return err + } + + byteValue, err := ioutil.ReadAll(file) + if err != nil { + return err + } + unmarshalled := new(db) + if err := json.Unmarshal(byteValue, unmarshalled); err != nil { + return err + } + s.db = unmarshalled + s.db.lastModified = fileInfo.ModTime() + + return nil +} + +// getNameAndID takes a secret's name, ID, or partial ID, and returns both its name and full ID. +func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err error) { + name, id, err = s.getExactNameAndID(nameOrID) + if err == nil { + return name, id, nil + } else if errors.Cause(err) != errNoSuchSecret { + return "", "", err + } + + // ID prefix may have been given, iterate through all IDs. + // ID and partial ID has a max lenth of 25, so we return if its greater than that. + if len(nameOrID) > secretIDLength { + return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) + } + exists := false + var foundID, foundName string + for id, name := range s.db.IDToName { + if strings.HasPrefix(id, nameOrID) { + if exists { + return "", "", errors.Wrapf(errAmbiguous, "more than one result secret with prefix %s", nameOrID) + } + exists = true + foundID = id + foundName = name + } + } + + if exists { + return foundName, foundID, nil + } + return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) +} + +// getExactNameAndID takes a secret's name or ID and returns both its name and full ID. +func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, err error) { + err = s.loadDB() + if err != nil { + return "", "", err + } + if name, ok := s.db.IDToName[nameOrID]; ok { + id := nameOrID + return name, id, nil + } + + if id, ok := s.db.NameToID[nameOrID]; ok { + name := nameOrID + return name, id, nil + } + + return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) +} + +// exactSecretExists checks if the secret exists, given a name or ID +// Does not match partial name or IDs +func (s *SecretsManager) exactSecretExists(nameOrID string) (bool, error) { + _, _, err := s.getExactNameAndID(nameOrID) + if err != nil { + if errors.Cause(err) == errNoSuchSecret { + return false, nil + } + return false, err + } + return true, nil +} + +// lookupAll gets all secrets stored. +func (s *SecretsManager) lookupAll() (map[string]Secret, error) { + err := s.loadDB() + if err != nil { + return nil, err + } + return s.db.Secrets, nil +} + +// lookupSecret returns a secret with the given name, ID, or partial ID. +func (s *SecretsManager) lookupSecret(nameOrID string) (*Secret, error) { + err := s.loadDB() + if err != nil { + return nil, err + } + _, id, err := s.getNameAndID(nameOrID) + if err != nil { + return nil, err + } + allSecrets, err := s.lookupAll() + if err != nil { + return nil, err + } + if secret, ok := allSecrets[id]; ok { + return &secret, nil + } + + return nil, errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) +} + +// Store creates a new secret in the secrets database. +// It deals with only storing metadata, not data payload. +func (s *SecretsManager) store(entry *Secret) error { + err := s.loadDB() + if err != nil { + return err + } + + s.db.Secrets[entry.ID] = *entry + s.db.NameToID[entry.Name] = entry.ID + s.db.IDToName[entry.ID] = entry.Name + + marshalled, err := json.MarshalIndent(s.db, "", " ") + if err != nil { + return err + } + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + if err != nil { + return err + } + + return nil +} + +// delete deletes a secret from the secrets database, given a name, ID, or partial ID. +// It deals with only deleting metadata, not data payload. +func (s *SecretsManager) delete(nameOrID string) error { + name, id, err := s.getNameAndID(nameOrID) + if err != nil { + return err + } + err = s.loadDB() + if err != nil { + return err + } + delete(s.db.Secrets, id) + delete(s.db.NameToID, name) + delete(s.db.IDToName, id) + marshalled, err := json.MarshalIndent(s.db, "", " ") + if err != nil { + return err + } + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml index a5fc8651c..7031d938a 100644 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ b/vendor/github.com/containers/ocicrypt/.travis.yml @@ -1,4 +1,4 @@ -dist: xenial +dist: bionic language: go os: @@ -11,10 +11,16 @@ matrix: include: - os: linux +addons: + apt: + packages: + - gnutls-bin + - softhsm2 + go_import_path: github.com/containers/ocicrypt install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.19.1 + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 script: - make diff --git a/vendor/github.com/containers/ocicrypt/Makefile b/vendor/github.com/containers/ocicrypt/Makefile index 49fa80d74..dc9d98537 100644 --- a/vendor/github.com/containers/ocicrypt/Makefile +++ b/vendor/github.com/containers/ocicrypt/Makefile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: check build decoder +.PHONY: check build decoder generate-protobuf all: build @@ -28,4 +28,7 @@ vendor: go mod tidy test: - go test ./... + go test ./... -test.v + +generate-protobuf: + protoc -I utils/keyprovider/ utils/keyprovider/keyprovider.proto --go_out=plugins=grpc:utils/keyprovider diff --git a/vendor/github.com/containers/ocicrypt/README.md b/vendor/github.com/containers/ocicrypt/README.md index 9f64bddcc..84cab7a40 100644 --- a/vendor/github.com/containers/ocicrypt/README.md +++ b/vendor/github.com/containers/ocicrypt/README.md @@ -11,7 +11,7 @@ Consumers of OCIcrypt: ## Usage -There are various levels of usage for this library. The main consumers of these would be runtime/buil tools, and a more specific use would be in the ability to extend cryptographic function. +There are various levels of usage for this library. The main consumers of these would be runtime/build tools, and a more specific use would be in the ability to extend cryptographic function. ### Runtime/Build tool usage @@ -23,12 +23,12 @@ func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, des func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) ``` -The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the resposibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). +The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the responsibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.). ### Crypto Agility and Extensibility -The implementation for both symmetric and assymetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: +The implementation for both symmetric and asymmetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages: - github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers - github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping @@ -37,3 +37,8 @@ We note that adding interfaces here is risky outside the OCI spec is not recomme ## Security Issues We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file. + + +## Ocicrypt Pkcs11 Support + +Ocicrypt Pkcs11 support is currently experiemental. For more details, please refer to the [this document](docs/pkcs11.md). diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go index 44adcdb35..a789d052d 100644 --- a/vendor/github.com/containers/ocicrypt/config/constructors.go +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -17,7 +17,11 @@ package config import ( + "github.com/containers/ocicrypt/crypto/pkcs11" + "strings" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" ) // EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys @@ -70,6 +74,88 @@ func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig }, nil } +// EncryptWithPkcs11 returns a CryptoConfig to encrypt with configured pkcs11 parameters +func EncryptWithPkcs11(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Pubkeys, pkcs11Yamls [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := map[string][][]byte{} + + if len(pkcs11Yamls) > 0 { + if pkcs11Config == nil { + return CryptoConfig{}, errors.New("pkcs11Config must not be nil") + } + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + } + + dc = DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-config": {p11confYaml}, + }, + } + ep["pkcs11-yamls"] = pkcs11Yamls + } + if len(pkcs11Pubkeys) > 0 { + ep["pkcs11-pubkeys"] = pkcs11Pubkeys + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// EncryptWithKeyProvider returns a CryptoConfig to encrypt with configured keyprovider parameters +func EncryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dc := DecryptConfig{} + ep := make(map[string][][]byte) + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + ep[keyProvidersStr[:idx]] = append(ep[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + ep[keyProvidersStr] = append(ep[keyProvidersStr], []byte("Enabled")) + } + } + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + +// DecryptWithKeyProvider returns a CryptoConfig to decrypt with configured keyprovider parameters +func DecryptWithKeyProvider(keyProviders [][]byte) (CryptoConfig, error) { + dp := make(map[string][][]byte) + ep := map[string][][]byte{} + for _, keyProvider := range keyProviders { + keyProvidersStr := string(keyProvider) + idx := strings.Index(keyProvidersStr, ":") + if idx > 0 { + dp[keyProvidersStr[:idx]] = append(dp[keyProvidersStr[:idx]], []byte(keyProvidersStr[idx+1:])) + } else { + dp[keyProvidersStr] = append(dp[keyProvidersStr], []byte("Enabled")) + } + } + dc := DecryptConfig{ + Parameters: dp, + } + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} + // DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) { if len(privKeys) != len(privKeysPasswords) { @@ -132,3 +218,28 @@ func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig DecryptConfig: &dc, }, nil } + +// DecryptWithPkcs11Yaml returns a CryptoConfig to decrypt with pkcs11 YAML formatted key files +func DecryptWithPkcs11Yaml(pkcs11Config *pkcs11.Pkcs11Config, pkcs11Yamls [][]byte) (CryptoConfig, error) { + p11confYaml, err := yaml.Marshal(pkcs11Config) + if err != nil { + return CryptoConfig{}, errors.Wrapf(err, "Could not marshal Pkcs11Config to Yaml") + } + + dc := DecryptConfig{ + Parameters: map[string][][]byte{ + "pkcs11-yamls": pkcs11Yamls, + "pkcs11-config": {p11confYaml}, + }, + } + + ep := map[string][][]byte{} + + return CryptoConfig{ + EncryptConfig: &EncryptConfig{ + Parameters: ep, + DecryptConfig: dc, + }, + DecryptConfig: &dc, + }, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go new file mode 100644 index 000000000..b454b3716 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/keyprovider-config/config.go @@ -0,0 +1,81 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "github.com/pkg/errors" + "io/ioutil" + "os" +) + +// Command describes the structure of command, it consist of path and args, where path defines the location of +// binary executable and args are passed on to the binary executable +type Command struct { + Path string `json:"path,omitempty"` + Args []string `json:"args,omitempty"` +} + +// KeyProviderAttrs describes the structure of key provider, it defines the way of invocation to key provider +type KeyProviderAttrs struct { + Command *Command `json:"cmd,omitempty"` + Grpc string `json:"grpc,omitempty"` +} + +// OcicryptConfig represents the format of an ocicrypt_provider.conf config file +type OcicryptConfig struct { + KeyProviderConfig map[string]KeyProviderAttrs `json:"key-providers"` +} + +const ENVVARNAME = "OCICRYPT_KEYPROVIDER_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = json.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// ${OCICRYPT_KEYPROVIDER_CONFIG} == "/etc/ocicrypt_keyprovider.yaml" +// If no configuration file could be found or read a null pointer is returned +func GetConfiguration() (*OcicryptConfig, error) { + var ic *OcicryptConfig + var err error + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + ic, err = parseConfigFile(filename) + if err != nil { + return nil, errors.Wrap(err, "Error while parsing keyprovider config file") + } + } else { + return nil, nil + } + return ic, nil +} diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go new file mode 100644 index 000000000..76be34138 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go @@ -0,0 +1,124 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11config + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +// OcicryptConfig represents the format of an imgcrypt.conf config file +type OcicryptConfig struct { + Pkcs11Config pkcs11.Pkcs11Config `yaml:"pkcs11"` +} + +const CONFIGFILE = "ocicrypt.conf" +const ENVVARNAME = "OCICRYPT_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +// A config file may look like this: +// module-directories: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +// allowed-module-paths: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = yaml.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// 1) ${OCICRYPT_CONFIG} == "internal": use internal default allow-all policy +// 2) ${OCICRYPT_CONFIG} +// 3) ${XDG_CONFIG_HOME}/ocicrypt-pkcs11.conf +// 4) ${HOME}/.config/ocicrypt-pkcs11.conf +// 5) /etc/ocicrypt-pkcs11.conf +// If no configuration file could be found or read a null pointer is returned +func getConfiguration() (*OcicryptConfig, error) { + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + if filename == "internal" { + return getDefaultCryptoConfigOpts() + } + ic, err := parseConfigFile(filename) + if err != nil || ic != nil { + return ic, err + } + } + envvar := os.Getenv("XDG_CONFIG_HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + envvar = os.Getenv("HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, ".config", CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + return parseConfigFile(path.Join("etc", CONFIGFILE)) +} + +// getDefaultCryptoConfigOpts returns default crypto config opts needed for pkcs11 module access +func getDefaultCryptoConfigOpts() (*OcicryptConfig, error) { + mdyaml := pkcs11.GetDefaultModuleDirectoriesYaml("") + config := fmt.Sprintf("module-directories:\n"+ + "%s"+ + "allowed-module-paths:\n"+ + "%s", mdyaml, mdyaml) + p11conf, err := pkcs11.ParsePkcs11ConfigFile([]byte(config)) + return &OcicryptConfig{ + Pkcs11Config: *p11conf, + }, err +} + +// GetUserPkcs11Config gets the user's Pkcs11Conig either from a configuration file or if none is +// found the default ones are returned +func GetUserPkcs11Config() (*pkcs11.Pkcs11Config, error) { + fmt.Print("Note: pkcs11 support is currently experimental\n") + ic, err := getConfiguration() + if err != nil { + return &pkcs11.Pkcs11Config{}, err + } + if ic == nil { + return &pkcs11.Pkcs11Config{}, errors.New("No ocicrypt config file was found") + } + return &ic.Pkcs11Config, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go new file mode 100644 index 000000000..7fcd2e3af --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -0,0 +1,134 @@ +/* + Copyright The ocicrypt Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "fmt" + "github.com/pkg/errors" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" + "gopkg.in/yaml.v2" +) + +// Pkcs11KeyFile describes the format of the pkcs11 (private) key file. +// It also carries pkcs11 module related environment variables that are transferred to the +// Pkcs11URI object and activated when the pkcs11 module is used. +type Pkcs11KeyFile struct { + Pkcs11 struct { + Uri string `yaml:"uri"` + } `yaml:"pkcs11"` + Module struct { + Env map[string]string `yaml:"env,omitempty"` + } `yaml:"module"` +} + +// Pkcs11KeyFileObject is a representation of the Pkcs11KeyFile with the pkcs11 URI as an object +type Pkcs11KeyFileObject struct { + Uri *pkcs11uri.Pkcs11URI +} + +// ParsePkcs11Uri parses a pkcs11 URI +func ParsePkcs11Uri(uri string) (*pkcs11uri.Pkcs11URI, error) { + p11uri := pkcs11uri.New() + err := p11uri.Parse(uri) + if err != nil { + return nil, errors.Wrapf(err, "Could not parse Pkcs11URI from file") + } + return p11uri, err +} + +// ParsePkcs11KeyFile parses a pkcs11 key file holding a pkcs11 URI describing a private key. +// The file has the following yaml format: +// pkcs11: +// - uri : <pkcs11 uri> +// An error is returned if the pkcs11 URI is malformed +func ParsePkcs11KeyFile(yamlstr []byte) (*Pkcs11KeyFileObject, error) { + p11keyfile := Pkcs11KeyFile{} + + err := yaml.Unmarshal([]byte(yamlstr), &p11keyfile) + if err != nil { + return nil, errors.Wrapf(err, "Could not unmarshal pkcs11 keyfile") + } + + p11uri, err := ParsePkcs11Uri(p11keyfile.Pkcs11.Uri) + if err != nil { + return nil, err + } + p11uri.SetEnvMap(p11keyfile.Module.Env) + + return &Pkcs11KeyFileObject{Uri: p11uri}, err +} + +// IsPkcs11PrivateKey checks whether the given YAML represents a Pkcs11 private key +func IsPkcs11PrivateKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// IsPkcs11PublicKey checks whether the given YAML represents a Pkcs11 public key +func IsPkcs11PublicKey(yamlstr []byte) bool { + _, err := ParsePkcs11KeyFile(yamlstr) + return err == nil +} + +// Pkcs11Config describes the layout of a pkcs11 config file +// The file has the following yaml format: +// module-directories: +// - /usr/lib64/pkcs11/ +// allowd-module-paths +// - /usr/lib64/pkcs11/libsofthsm2.so +type Pkcs11Config struct { + ModuleDirectories []string `yaml:"module-directories"` + AllowedModulePaths []string `yaml:"allowed-module-paths"` +} + +// GetDefaultModuleDirectories returns module directories covering +// a variety of Linux distros +func GetDefaultModuleDirectories() []string { + dirs := []string{ + "/usr/lib64/pkcs11/", // Fedora,RHEL,openSUSE + "/usr/lib/pkcs11/", // Fedora,ArchLinux + "/usr/local/lib/pkcs11/", + "/usr/lib/softhsm/", // Debian,Ubuntu + } + + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/ + hosttype, ostype, q := getHostAndOsType() + if len(hosttype) > 0 { + dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) + dirs = append(dirs, dir) + } + return dirs +} + +// GetDefaultModuleDirectoresFormatted returns the default module directories formatted for YAML +func GetDefaultModuleDirectoriesYaml(indent string) string { + res := "" + + for _, dir := range GetDefaultModuleDirectories() { + res += indent + "- " + dir + "\n" + } + return res +} + +// ParsePkcs11ConfigFile parses a pkcs11 config file hat influences the module search behavior +// as well as the set of modules that users are allowed to use +func ParsePkcs11ConfigFile(yamlstr []byte) (*Pkcs11Config, error) { + p11conf := Pkcs11Config{} + + err := yaml.Unmarshal([]byte(yamlstr), &p11conf) + if err != nil { + return &p11conf, errors.Wrapf(err, "Could not parse Pkcs11Config") + } + return &p11conf, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go new file mode 100644 index 000000000..448e88c7c --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -0,0 +1,487 @@ +// +build cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "hash" + "net/url" + "os" + "strconv" + "strings" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +var ( + // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed + OAEPLabel = []byte("") + // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed + OAEPDefaultHash = "sha1" + + // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM + OAEPSha1Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA_1, + MGF: pkcs11.CKG_MGF1_SHA1, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } + // OAEPSha256Params describes the OAEP parameters with sha256 hash algorithm + OAEPSha256Params = &pkcs11.OAEPParams{ + HashAlg: pkcs11.CKM_SHA256, + MGF: pkcs11.CKG_MGF1_SHA256, + SourceType: pkcs11.CKZ_DATA_SPECIFIED, + SourceData: OAEPLabel, + } +) + +// rsaPublicEncryptOAEP encrypts the given plaintext with the given *rsa.PublicKey; the +// environment variable OCICRYPT_OAEP_HASHALG can be set to 'sha1' to force usage of sha1 for OAEP (SoftHSM). +// This function is needed by clients who are using a public key file for pkcs11 encryption +func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, string, error) { + var ( + hashfunc hash.Hash + hashalg string + ) + + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // The default is 'sha1' + switch strings.ToLower(oaephash) { + case "sha1", "": + hashfunc = sha1.New() + hashalg = "sha1" + case "sha256": + hashfunc = sha256.New() + hashalg = "sha256" + default: + return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + ciphertext, err := rsa.EncryptOAEP(hashfunc, rand.Reader, pubKey, plaintext, OAEPLabel) + if err != nil { + return nil, "", errors.Wrapf(err, "rss.EncryptOAEP failed") + } + + return ciphertext, hashalg, nil +} + +// pkcs11UriGetLoginParameters gets the parameters necessary for login from the Pkcs11URI +// PIN and module are mandatory; slot-id is optional and if not found -1 will be returned +// For a privateKeyOperation a PIN is required and if none is given, this function will return an error +func pkcs11UriGetLoginParameters(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (string, string, int64, error) { + var ( + pin string + err error + ) + if privateKeyOperation { + if !p11uri.HasPIN() { + return "", "", 0, errors.New("Missing PIN for private key operation") + } + } + // some devices require a PIN to find a *public* key object, others don't + pin, _ = p11uri.GetPIN() + + module, err := p11uri.GetModule() + if err != nil { + return "", "", 0, errors.Wrap(err, "No module available in pkcs11 URI") + } + + slotid := int64(-1) + + slot, ok := p11uri.GetPathAttribute("slot-id", false) + if ok { + slotid, err = strconv.ParseInt(slot, 10, 64) + if err != nil { + return "", "", 0, errors.Wrap(err, "slot-id is not a valid number") + } + if slotid < 0 { + return "", "", 0, fmt.Errorf("slot-id is a negative number") + } + if uint64(slotid) > 0xffffffff { + return "", "", 0, fmt.Errorf("slot-id is larger than 32 bit") + } + } + + return pin, module, slotid, nil +} + +// pkcs11UriGetKeyIdAndLabel gets the key label by retrieving the value of the 'object' attribute +func pkcs11UriGetKeyIdAndLabel(p11uri *pkcs11uri.Pkcs11URI) (string, string, error) { + keyid, ok2 := p11uri.GetPathAttribute("id", false) + label, ok1 := p11uri.GetPathAttribute("object", false) + if !ok1 && !ok2 { + return "", "", errors.New("Neither 'id' nor 'object' attributes were found in pkcs11 URI") + } + return keyid, label, nil +} + +// pkcs11OpenSession opens a session with a pkcs11 device at the given slot and logs in with the given PIN +func pkcs11OpenSession(p11ctx *pkcs11.Ctx, slotid uint, pin string) (session pkcs11.SessionHandle, err error) { + session, err = p11ctx.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return 0, errors.Wrapf(err, "OpenSession to slot %d failed", slotid) + } + if len(pin) > 0 { + err = p11ctx.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + _ = p11ctx.CloseSession(session) + return 0, errors.Wrap(err, "Could not login to device") + } + } + return session, nil +} + +// pkcs11UriLogin uses the given pkcs11 URI to select the pkcs11 module (share libary) and to get +// the PIN to use for login; if the URI contains a slot-id, the given slot-id will be used, otherwise +// one slot after the other will be attempted and the first one where login succeeds will be used +func pkcs11UriLogin(p11uri *pkcs11uri.Pkcs11URI, privateKeyOperation bool) (ctx *pkcs11.Ctx, session pkcs11.SessionHandle, err error) { + pin, module, slotid, err := pkcs11UriGetLoginParameters(p11uri, privateKeyOperation) + if err != nil { + return nil, 0, err + } + + p11ctx := pkcs11.New(module) + if p11ctx == nil { + return nil, 0, errors.New("Please check module path, input is: " + module) + } + + err = p11ctx.Initialize() + if err != nil { + p11Err := err.(pkcs11.Error) + if p11Err != pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED { + return nil, 0, errors.Wrap(err, "Initialize failed") + } + } + + if slotid >= 0 { + session, err := pkcs11OpenSession(p11ctx, uint(slotid), pin) + return p11ctx, session, err + } else { + slots, err := p11ctx.GetSlotList(true) + if err != nil { + return nil, 0, errors.Wrap(err, "GetSlotList failed") + } + + tokenlabel, ok := p11uri.GetPathAttribute("token", false) + if !ok { + return nil, 0, errors.New("Missing 'token' attribute since 'slot-id' was not given") + } + + for _, slot := range slots { + ti, err := p11ctx.GetTokenInfo(slot) + if err != nil || ti.Label != tokenlabel { + continue + } + + session, err = pkcs11OpenSession(p11ctx, slot, pin) + if err == nil { + return p11ctx, session, err + } + } + if len(pin) > 0 { + return nil, 0, errors.New("Could not create session to any slot and/or log in") + } + return nil, 0, errors.New("Could not create session to any slot") + } +} + +func pkcs11Logout(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { + _ = ctx.Logout(session) + _ = ctx.CloseSession(session) + _ = ctx.Finalize() + ctx.Destroy() +} + +// findObject finds an object of the given class with the given keyid and/or label +func findObject(p11ctx *pkcs11.Ctx, session pkcs11.SessionHandle, class uint, keyid, label string) (pkcs11.ObjectHandle, error) { + msg := "" + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + } + if len(label) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + msg = fmt.Sprintf("label '%s'", label) + } + if len(keyid) > 0 { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, keyid)) + if len(msg) > 0 { + msg += " and " + } + msg += url.PathEscape(keyid) + } + + if err := p11ctx.FindObjectsInit(session, template); err != nil { + return 0, errors.Wrap(err, "FindObjectsInit failed") + } + + obj, _, err := p11ctx.FindObjects(session, 100) + if err != nil { + return 0, errors.Wrap(err, "FindObjects failed") + } + + if err := p11ctx.FindObjectsFinal(session); err != nil { + return 0, errors.Wrap(err, "FindObjectsFinal failed") + } + if len(obj) > 1 { + return 0, errors.Errorf("There are too many (=%d) keys with %s", len(obj), msg) + } else if len(obj) == 1 { + return obj[0], nil + } + + return 0, errors.Errorf("Could not find any object with %s", msg) +} + +// publicEncryptOAEP uses a public key described by a pkcs11 URI to OAEP encrypt the given plaintext +func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, string, error) { + oldenv, err := setEnvVars(pubKey.Uri.GetEnvMap()) + if err != nil { + return nil, "", err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(pubKey.Uri, false) + if err != nil { + return nil, "", err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(pubKey.Uri) + if err != nil { + return nil, "", err + } + + p11PubKey, err := findObject(p11ctx, session, pkcs11.CKO_PUBLIC_KEY, keyid, label) + if err != nil { + return nil, "", err + } + + var hashalg string + + var oaep *pkcs11.OAEPParams + oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") + // the default is sha1 + switch strings.ToLower(oaephash) { + case "sha1", "": + oaep = OAEPSha1Params + hashalg = "sha1" + case "sha256": + oaep = OAEPSha256Params + hashalg = "sha256" + default: + return nil, "", errors.Errorf("Unsupported OAEP hash '%s'", oaephash) + } + + err = p11ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PubKey) + if err != nil { + return nil, "", errors.Wrap(err, "EncryptInit error") + } + + ciphertext, err := p11ctx.Encrypt(session, plaintext) + if err != nil { + return nil, "", errors.Wrap(err, "Encrypt failed") + } + return ciphertext, hashalg, nil +} + +// privateDecryptOAEP uses a pkcs11 URI describing a private key to OAEP decrypt a ciphertext +func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hashalg string) ([]byte, error) { + oldenv, err := setEnvVars(privKeyObj.Uri.GetEnvMap()) + if err != nil { + return nil, err + } + defer restoreEnv(oldenv) + + p11ctx, session, err := pkcs11UriLogin(privKeyObj.Uri, true) + if err != nil { + return nil, err + } + defer pkcs11Logout(p11ctx, session) + + keyid, label, err := pkcs11UriGetKeyIdAndLabel(privKeyObj.Uri) + if err != nil { + return nil, err + } + + p11PrivKey, err := findObject(p11ctx, session, pkcs11.CKO_PRIVATE_KEY, keyid, label) + if err != nil { + return nil, err + } + + var oaep *pkcs11.OAEPParams + + // the default is sha1 + switch hashalg { + case "sha1", "": + oaep = OAEPSha1Params + case "sha256": + oaep = OAEPSha256Params + default: + return nil, errors.Errorf("Unsupported hash algorithm '%s' for decryption", hashalg) + } + + err = p11ctx.DecryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, oaep)}, p11PrivKey) + if err != nil { + return nil, errors.Wrapf(err, "DecryptInit failed") + } + plaintext, err := p11ctx.Decrypt(session, ciphertext) + if err != nil { + return nil, errors.Wrapf(err, "Decrypt failed") + } + return plaintext, err +} + +// +// The following part deals with the JSON formatted message for multiple pkcs11 recipients +// + +// Pkcs11Blob holds the encrypted blobs for all recipients; this is what we will put into the image's annotations +type Pkcs11Blob struct { + Version uint `json:"version"` + Recipients []Pkcs11Recipient `json:"recipients"` +} + +// Pkcs11Recipient holds the b64-encoded and encrypted blob for a particular recipient +type Pkcs11Recipient struct { + Version uint `json:"version"` + Blob string `json:"blob"` + Hash string `json:"hash,omitempty"` +} + +// EncryptMultiple encrypts for one or multiple pkcs11 devices; the public keys passed to this function +// may either be *rsa.PublicKey or *pkcs11uri.Pkcs11URI; the returned byte array is a JSON string of the +// following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": <base64 encoded RSA OAEP encrypted blob>, +// "hash": <hash used for OAEP other than 'sha256'> +// } , +// { +// "version": 0, +// "blob": <base64 encoded RSA OAEP encrypted blob>, +// "hash": <hash used for OAEP other than 'sha256'> +// } , +// [...] +// ] +// } +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + var ( + ciphertext []byte + err error + pkcs11blob Pkcs11Blob = Pkcs11Blob{Version: 0} + hashalg string + ) + + for _, pubKey := range pubKeys { + switch pkey := pubKey.(type) { + case *rsa.PublicKey: + ciphertext, hashalg, err = rsaPublicEncryptOAEP(pkey, data) + case *Pkcs11KeyFileObject: + ciphertext, hashalg, err = publicEncryptOAEP(pkey, data) + default: + err = errors.Errorf("Unsupported key object type for pkcs11 public key") + } + if err != nil { + return nil, err + } + + if hashalg == OAEPDefaultHash { + hashalg = "" + } + recipient := Pkcs11Recipient{ + Version: 0, + Blob: base64.StdEncoding.EncodeToString(ciphertext), + Hash: hashalg, + } + + pkcs11blob.Recipients = append(pkcs11blob.Recipients, recipient) + } + return json.Marshal(&pkcs11blob) +} + +// Decrypt tries to decrypt one of the recipients' blobs using a pkcs11 private key. +// The input pkcs11blobstr is a string with the following format: +// { +// recipients: [ // recipient list +// { +// "version": 0, +// "blob": <base64 encoded RSA OAEP encrypted blob>, +// "hash": <hash used for OAEP other than 'sha256'> +// } , +// { +// "version": 0, +// "blob": <base64 encoded RSA OAEP encrypted blob>, +// "hash": <hash used for OAEP other than 'sha256'> +// } , +// [...] +// } +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + pkcs11blob := Pkcs11Blob{} + err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) + if err != nil { + return nil, errors.Wrapf(err, "Could not parse Pkcs11Blob") + } + switch pkcs11blob.Version { + case 0: + // latest supported version + default: + return nil, errors.Errorf("Found Pkcs11Blob with version %d but maximum supported version is 0.", pkcs11blob.Version) + } + // since we do trial and error, collect all encountered errors + errs := "" + + for _, recipient := range pkcs11blob.Recipients { + switch recipient.Version { + case 0: + // last supported version + default: + return nil, errors.Errorf("Found Pkcs11Recipient with version %d but maximum supported version is 0.", recipient.Version) + } + + ciphertext, err := base64.StdEncoding.DecodeString(recipient.Blob) + if err != nil || len(ciphertext) == 0 { + // This should never happen... we skip over decoding issues + errs += fmt.Sprintf("Base64 decoding failed: %s\n", err) + continue + } + // try all keys until one works + for _, privKeyObj := range privKeyObjs { + plaintext, err := privateDecryptOAEP(privKeyObj, ciphertext, recipient.Hash) + if err == nil { + return plaintext, nil + } + if uri, err2 := privKeyObj.Uri.Format(); err2 == nil { + errs += fmt.Sprintf("%s : %s\n", uri, err) + } else { + errs += fmt.Sprintf("%s\n", err) + } + } + } + + return nil, errors.Errorf("Could not find a pkcs11 key for decryption:\n%s", errs) +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go new file mode 100644 index 000000000..6edf75269 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers_nocgo.go @@ -0,0 +1,31 @@ +// +build !cgo + +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "github.com/pkg/errors" +) + +func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { + return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") +} + +func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { + return nil, errors.Errorf("ocicrypt pkcs11 not supported on this build") +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go new file mode 100644 index 000000000..306e372d5 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -0,0 +1,114 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "os" + "runtime" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + envLock sync.Mutex +) + +// setEnvVars sets the environment variables given in the map and locks the environment from +// modification with the same function; if successful, you *must* call restoreEnv with the return +// value from this function +func setEnvVars(env map[string]string) ([]string, error) { + envLock.Lock() + + if len(env) == 0 { + return nil, nil + } + + oldenv := os.Environ() + + for k, v := range env { + err := os.Setenv(k, v) + if err != nil { + restoreEnv(oldenv) + return nil, errors.Wrapf(err, "Could not set environment variable '%s' to '%s'", k, v) + } + } + + return oldenv, nil +} + +func arrayToMap(elements []string) map[string]string { + o := make(map[string]string) + + for _, element := range elements { + p := strings.SplitN(element, "=", 2) + if len(p) == 2 { + o[p[0]] = p[1] + } + } + + return o +} + +// restoreEnv restores the environment to be exactly as given in the array of strings +// and unlocks the lock +func restoreEnv(envs []string) { + if envs != nil && len(envs) >= 0 { + target := arrayToMap(envs) + curr := arrayToMap(os.Environ()) + + for nc, vc := range curr { + vt, ok := target[nc] + if !ok { + os.Unsetenv(nc) + } else if vc == vt { + delete(target, nc) + } + } + + for nt, vt := range target { + os.Setenv(nt, vt) + } + } + + envLock.Unlock() +} + +func getHostAndOsType() (string, string, string) { + ht := "" + ot := "" + st := "" + switch runtime.GOOS { + case "linux": + ot = "linux" + st = "gnu" + switch runtime.GOARCH { + case "arm": + ht = "arm" + case "arm64": + ht = "aarch64" + case "amd64": + ht = "x86_64" + case "ppc64le": + ht = "powerpc64le" + case "s390x": + ht = "s390x" + } + } + return ht, ot, st +} diff --git a/vendor/github.com/containers/ocicrypt/encryption.go b/vendor/github.com/containers/ocicrypt/encryption.go index 3153b63d7..f5142cc8d 100644 --- a/vendor/github.com/containers/ocicrypt/encryption.go +++ b/vendor/github.com/containers/ocicrypt/encryption.go @@ -19,6 +19,9 @@ package ocicrypt import ( "encoding/base64" "encoding/json" + "fmt" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap/keyprovider" "io" "strings" @@ -27,8 +30,10 @@ import ( "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/keywrap/jwe" "github.com/containers/ocicrypt/keywrap/pgp" + "github.com/containers/ocicrypt/keywrap/pkcs11" "github.com/containers/ocicrypt/keywrap/pkcs7" "github.com/opencontainers/go-digest" + log "github.com/sirupsen/logrus" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -43,6 +48,15 @@ func init() { RegisterKeyWrapper("pgp", pgp.NewKeyWrapper()) RegisterKeyWrapper("jwe", jwe.NewKeyWrapper()) RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper()) + RegisterKeyWrapper("pkcs11", pkcs11.NewKeyWrapper()) + ic, err := keyproviderconfig.GetConfiguration() + if err != nil { + log.Error(err) + } else if ic != nil { + for provider, attrs := range ic.KeyProviderConfig { + RegisterKeyWrapper("provider."+provider, keyprovider.NewKeyWrapper(provider, attrs)) + } + } } var keyWrappers map[string]keywrap.KeyWrapper @@ -128,6 +142,7 @@ func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, des } newAnnotations := make(map[string]string) + keysWrapped := false for annotationsID, scheme := range keyWrapperAnnotations { b64Annotations := desc.Annotations[annotationsID] keywrapper := GetKeyWrapper(scheme) @@ -136,10 +151,14 @@ func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, des return nil, err } if b64Annotations != "" { + keysWrapped = true newAnnotations[annotationsID] = b64Annotations } } + if !keysWrapped { + return nil, errors.New("no wrapped keys produced by encryption") + } newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData) if len(newAnnotations) == 0 { @@ -191,6 +210,7 @@ func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocisp func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) { privKeyGiven := false + errs := "" for annotationsID, scheme := range keyWrapperAnnotations { b64Annotation := desc.Annotations[annotationsID] if b64Annotation != "" { @@ -203,10 +223,10 @@ func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) if len(keywrapper.GetPrivateKeys(dc.Parameters)) > 0 { privKeyGiven = true } - optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation) if err != nil { // try next keywrap.KeyWrapper + errs += fmt.Sprintf("%s\n", err) continue } if optsData == nil { @@ -219,7 +239,7 @@ func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) if !privKeyGiven { return nil, errors.New("missing private key needed for decryption") } - return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption") + return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption:\n%s", errs) } func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) { @@ -237,6 +257,7 @@ func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64An if b64Annotations == "" { return nil, nil } + errs := "" for _, b64Annotation := range strings.Split(b64Annotations, ",") { annotation, err := base64.StdEncoding.DecodeString(b64Annotation) if err != nil { @@ -244,11 +265,12 @@ func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64An } optsData, err := keywrapper.UnwrapKey(dc, annotation) if err != nil { + errs += fmt.Sprintf("- %s\n", err) continue } return optsData, nil } - return nil, errors.New("no suitable key found for decrypting layer key") + return nil, errors.Errorf("no suitable key found for decrypting layer key:\n%s", errs) } // commonEncryptLayer is a function to encrypt the plain layer using a new random diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod index 5e6bc2a75..06a77af95 100644 --- a/vendor/github.com/containers/ocicrypt/go.mod +++ b/vendor/github.com/containers/ocicrypt/go.mod @@ -3,13 +3,19 @@ module github.com/containers/ocicrypt go 1.12 require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.2 // indirect + github.com/miekg/pkcs11 v1.0.3 + github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.1 - github.com/pkg/errors v0.8.1 - github.com/stretchr/testify v1.3.0 // indirect + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 + github.com/stretchr/testify v1.3.0 go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 - golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect - gopkg.in/square/go-jose.v2 v2.3.1 + golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de + golang.org/x/sys v0.0.0-20200817155316-9781c653f443 // indirect + google.golang.org/grpc v1.33.2 + gopkg.in/square/go-jose.v2 v2.5.1 + gopkg.in/yaml.v2 v2.3.0 ) diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum index 6b4e83d75..b014b76ee 100644 --- a/vendor/github.com/containers/ocicrypt/go.sum +++ b/vendor/github.com/containers/ocicrypt/go.sum @@ -1,31 +1,116 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443 h1:X18bCaipMcoJGm27Nv7zr4XYPKGUy92GtqboKC2Hxaw= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/ocicrypt/gpg.go b/vendor/github.com/containers/ocicrypt/gpg.go index c89f3b0ea..c4d31e52d 100644 --- a/vendor/github.com/containers/ocicrypt/gpg.go +++ b/vendor/github.com/containers/ocicrypt/gpg.go @@ -180,13 +180,13 @@ func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, return keydata, err == nil, err } -// GetSecretKeyDetails retrives the secret key details of key with keyid. +// GetSecretKeyDetails retrieves the secret key details of key with keyid. // returns a byte array of the details and a bool if the key exists func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { return gc.getKeyDetails("-K", keyid) } -// GetKeyDetails retrives the public key details of key with keyid. +// GetKeyDetails retrieves the public key details of key with keyid. // returns a byte array of the details and a bool if the key exists func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { return gc.getKeyDetails("-k", keyid) @@ -240,13 +240,13 @@ func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, return keydata, err == nil, err } -// GetSecretKeyDetails retrives the secret key details of key with keyid. +// GetSecretKeyDetails retrieves the secret key details of key with keyid. // returns a byte array of the details and a bool if the key exists func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) { return gc.getKeyDetails("-K", keyid) } -// GetKeyDetails retrives the public key details of key with keyid. +// GetKeyDetails retrieves the public key details of key with keyid. // returns a byte array of the details and a bool if the key exists func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) { return gc.getKeyDetails("-k", keyid) diff --git a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go new file mode 100644 index 000000000..198c554aa --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go @@ -0,0 +1,377 @@ +package helpers + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/config/pkcs11config" + "github.com/containers/ocicrypt/crypto/pkcs11" + encutils "github.com/containers/ocicrypt/utils" + + "github.com/pkg/errors" +) + +// processRecipientKeys sorts the array of recipients by type. Recipients may be either +// x509 certificates, public keys, or PGP public keys identified by email address or name +func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgRecipients [][]byte + pubkeys [][]byte + x509s [][]byte + pkcs11Pubkeys [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + ) + + for _, recipient := range recipients { + + idx := strings.Index(recipient, ":") + if idx < 0 { + return nil, nil, nil, nil, nil, nil, errors.New("Invalid recipient format") + } + + protocol := recipient[:idx] + value := recipient[idx+1:] + + switch protocol { + case "pgp": + gpgRecipients = append(gpgRecipients, []byte(value)) + + case "jwe": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsPublicKey(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not a public key") + } + pubkeys = append(pubkeys, tmp) + + case "pkcs7": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not an x509 cert") + } + x509s = append(x509s, tmp) + + case "pkcs11": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if encutils.IsPkcs11PublicKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if encutils.IsPublicKey(tmp) { + pkcs11Pubkeys = append(pkcs11Pubkeys, tmp) + } else { + return nil, nil, nil, nil, nil, nil, errors.New("Provided file is not a public key") + } + + case "provider": + keyProviders = append(keyProviders, []byte(value)) + + default: + return nil, nil, nil, nil, nil, nil, errors.New("Provided protocol not recognized") + } + } + return gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil +} + +// processx509Certs processes x509 certificate files +func processx509Certs(keys []string) ([][]byte, error) { + var x509s [][]byte + for _, key := range keys { + tmp, err := ioutil.ReadFile(strings.Split(key, ":")[0]) + if err != nil { + return nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + continue + } + x509s = append(x509s, tmp) + + } + return x509s, nil +} + +// processPwdString process a password that may be in any of the following formats: +// - file=<passwordfile> +// - pass=<password> +// - fd=<filedescriptor> +// - <password> +func processPwdString(pwdString string) ([]byte, error) { + if strings.HasPrefix(pwdString, "file=") { + return ioutil.ReadFile(pwdString[5:]) + } else if strings.HasPrefix(pwdString, "pass=") { + return []byte(pwdString[5:]), nil + } else if strings.HasPrefix(pwdString, "fd=") { + fdStr := pwdString[3:] + fd, err := strconv.Atoi(fdStr) + if err != nil { + return nil, errors.Wrapf(err, "could not parse file descriptor %s", fdStr) + } + f := os.NewFile(uintptr(fd), "pwdfile") + if f == nil { + return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr) + } + defer f.Close() + pwd := make([]byte, 64) + n, err := f.Read(pwd) + if err != nil { + return nil, errors.Wrapf(err, "could not read from file descriptor") + } + return pwd[:n], nil + } + return []byte(pwdString), nil +} + +// processPrivateKeyFiles sorts the different types of private key files; private key files may either be +// private keys or GPG private key ring files. The private key files may include the password for the +// private key and take any of the following forms: +// - <filename> +// - <filename>:file=<passwordfile> +// - <filename>:pass=<password> +// - <filename>:fd=<filedescriptor> +// - <filename>:<password> +// - keyprovider:<...> +func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgSecretKeyRingFiles [][]byte + gpgSecretKeyPasswords [][]byte + privkeys [][]byte + privkeysPasswords [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + err error + ) + // keys needed for decryption in case of adding a recipient + for _, keyfileAndPwd := range keyFilesAndPwds { + var password []byte + + // treat "provider" protocol separately + if strings.HasPrefix(keyfileAndPwd, "provider:"){ + keyProviders = append(keyProviders, []byte(keyfileAndPwd[len("provider:"):])) + continue + } + parts := strings.Split(keyfileAndPwd, ":") + if len(parts) == 2 { + password, err = processPwdString(parts[1]) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + } + + keyfile := parts[0] + tmp, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + isPrivKey, err := encutils.IsPrivateKey(tmp, password) + if encutils.IsPasswordError(err) { + return nil, nil, nil, nil, nil, nil, err + } + + if encutils.IsPkcs11PrivateKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if isPrivKey { + privkeys = append(privkeys, tmp) + privkeysPasswords = append(privkeysPasswords, password) + } else if encutils.IsGPGPrivateKeyRing(tmp) { + gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp) + gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password) + } else { + // ignore if file is not recognized, so as not to error if additional + // metadata/cert files exists + continue + } + } + return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil +} + +// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary +// information to perform decryption from command line options. +func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) { + ccs := []encconfig.CryptoConfig{} + + // x509 cert is needed for PKCS7 decryption + _, _, x509s, _, _, _, err := processRecipientKeys(decRecipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + if len(x509s) > 0 { + // x509 certs can also be passed in via keys + x509FromKeys, err := processx509Certs(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + x509s = append(x509s, x509FromKeys...) + } + gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, pkcs11Yamls, keyProviders, err := processPrivateKeyFiles(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + } + + /* TODO: Add in GPG client query for secret keys in the future. + _, err = createGPGClient(context) + gpgInstalled := err == nil + if gpgInstalled { + if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && len(pkcs11Yamls) == 0 && len(keyProviders) == 0 && descs != nil { + // Get pgp private keys from keyring only if no private key was passed + gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } else if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } + } + */ + + if len(x509s) > 0 { + x509sCc, err := encconfig.DecryptWithX509s(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, x509sCc) + } + if len(privKeys) > 0 { + privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, privKeysCc) + } + if len(pkcs11Yamls) > 0 { + p11conf, err := pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11PrivKeysCc, err := encconfig.DecryptWithPkcs11Yaml(p11conf, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, pkcs11PrivKeysCc) + } + if len(keyProviders) > 0 { + keyProviderCc, err := encconfig.DecryptWithKeyProvider(keyProviders) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, keyProviderCc) + } + return encconfig.CombineCryptoConfigs(ccs), nil +} + +// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys +func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) { + var decryptCc *encconfig.CryptoConfig + ccs := []encconfig.CryptoConfig{} + if len(keys) > 0 { + dcc, err := CreateDecryptCryptoConfig(keys, []string{}) + if err != nil { + return encconfig.CryptoConfig{}, err + } + decryptCc = &dcc + ccs = append(ccs, dcc) + } + + if len(recipients) > 0 { + gpgRecipients, pubKeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProvider, err := processRecipientKeys(recipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs := []encconfig.CryptoConfig{} + + // Create GPG client with guessed GPG version and default homedir + gpgClient, err := ocicrypt.NewGPGClient("", "") + gpgInstalled := err == nil + if len(gpgRecipients) > 0 && gpgInstalled { + gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile() + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, gpgCc) + } + + // Create Encryption Crypto Config + if len(x509s) > 0 { + pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs7Cc) + } + if len(pubKeys) > 0 { + jweCc, err := encconfig.EncryptWithJwe(pubKeys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, jweCc) + } + var p11conf *pkcs11.Pkcs11Config + if len(pkcs11Yamls) > 0 || len(pkcs11Pubkeys) > 0 { + p11conf, err = pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11Cc, err := encconfig.EncryptWithPkcs11(p11conf, pkcs11Pubkeys, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs11Cc) + } + + if len(keyProvider) > 0 { + keyProviderCc, err := encconfig.EncryptWithKeyProvider(keyProvider) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, keyProviderCc) + } + ecc := encconfig.CombineCryptoConfigs(encryptCcs) + if decryptCc != nil { + ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig) + } + ccs = append(ccs, ecc) + } + + if len(ccs) > 0 { + return encconfig.CombineCryptoConfigs(ccs), nil + } else { + return encconfig.CryptoConfig{}, nil + } +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go new file mode 100644 index 000000000..3b4c47ed4 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/keyprovider/keyprovider.go @@ -0,0 +1,242 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package keyprovider + +import ( + "context" + "encoding/json" + "github.com/containers/ocicrypt/config" + keyproviderconfig "github.com/containers/ocicrypt/config/keyprovider-config" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + keyproviderpb "github.com/containers/ocicrypt/utils/keyprovider" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" +) + +type keyProviderKeyWrapper struct { + provider string + attrs keyproviderconfig.KeyProviderAttrs +} + +func (kw *keyProviderKeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.provider." + kw.provider +} + +// NewKeyWrapper returns a new key wrapping interface using keyprovider +func NewKeyWrapper(p string, a keyproviderconfig.KeyProviderAttrs) keywrap.KeyWrapper { + return &keyProviderKeyWrapper{provider: p, attrs: a} +} + +type KeyProviderKeyWrapProtocolOperation string + +var ( + OpKeyWrap KeyProviderKeyWrapProtocolOperation = "keywrap" + OpKeyUnwrap KeyProviderKeyWrapProtocolOperation = "keyunwrap" +) + +// KeyProviderKeyWrapProtocolInput defines the input to the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolInput struct { + // Operation is either "keywrap" or "keyunwrap" + Operation KeyProviderKeyWrapProtocolOperation `json:"op"` + // KeyWrapParams encodes the arguments to key wrap if operation is set to wrap + KeyWrapParams KeyWrapParams `json:"keywrapparams,omitempty"` + // KeyUnwrapParams encodes the arguments to key unwrap if operation is set to unwrap + KeyUnwrapParams KeyUnwrapParams `json:"keyunwrapparams,omitempty"` +} + +// KeyProviderKeyWrapProtocolOutput defines the output of the key provider binary or grpc method. +type KeyProviderKeyWrapProtocolOutput struct { + // KeyWrapResult encodes the results to key wrap if operation is to wrap + KeyWrapResults KeyWrapResults `json:"keywrapresults,omitempty"` + // KeyUnwrapResult encodes the result to key unwrap if operation is to unwrap + KeyUnwrapResults KeyUnwrapResults `json:"keyunwrapresults,omitempty"` +} + +type KeyWrapParams struct { + Ec *config.EncryptConfig `json:"ec"` + OptsData []byte `json:"optsdata"` +} + +type KeyUnwrapParams struct { + Dc *config.DecryptConfig `json:"dc"` + Annotation []byte `json:"annotation"` +} + +type KeyUnwrapResults struct { + OptsData []byte `json:"optsdata"` +} + +type KeyWrapResults struct { + Annotation []byte `json:"annotation"` +} + +var runner utils.CommandExecuter + +func init() { + runner = utils.Runner{} +} + +// WrapKeys calls appropriate binary executable/grpc server for wrapping the session key for recipients and gets encrypted optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyWrap, + KeyWrapParams: KeyWrapParams{ + Ec: ec, + OptsData: optsData, + }, + }) + + if err != nil { + return nil, err + } + + if _, ok := ec.Parameters[kw.provider]; ok { + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + return nil, errors.Wrap(err, "error while retrieving keyprovider protocol command output") + } + return protocolOuput.KeyWrapResults.Annotation, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyWrap) + if err != nil { + return nil, errors.Wrap(err, "error while retrieving keyprovider protocol grpc output") + } + + return protocolOuput.KeyWrapResults.Annotation, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } + } + + return nil, nil +} + +// UnwrapKey calls appropriate binary executable/grpc server for unwrapping the session key based on the protocol given in annotation for recipients and gets decrypted optsData, +// which describe the symmetric key used for decrypting the layer +func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + input, err := json.Marshal(KeyProviderKeyWrapProtocolInput{ + Operation: OpKeyUnwrap, + KeyUnwrapParams: KeyUnwrapParams{ + Dc: dc, + Annotation: jsonString, + }, + }) + if err != nil { + return nil, err + } + + if kw.attrs.Command != nil { + protocolOuput, err := getProviderCommandOutput(input, kw.attrs.Command) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else if kw.attrs.Grpc != "" { + protocolOuput, err := getProviderGRPCOutput(input, kw.attrs.Grpc, OpKeyUnwrap) + if err != nil { + // If err is not nil, then ignore it and continue with rest of the given keyproviders + return nil, err + } + + return protocolOuput.KeyUnwrapResults.OptsData, nil + } else { + return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd") + } +} + +func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + var grpcOutput *keyproviderpb.KeyProviderKeyWrapProtocolOutput + cc, err := grpc.Dial(connString, grpc.WithInsecure()) + if err != nil { + return nil, errors.Wrap(err, "error while dialing rpc server") + } + defer func() { + derr := cc.Close() + if derr != nil { + log.WithError(derr).Error("Error closing grpc socket") + } + }() + + client := keyproviderpb.NewKeyProviderServiceClient(cc) + req := &keyproviderpb.KeyProviderKeyWrapProtocolInput{ + KeyProviderKeyWrapProtocolInput: input, + } + + if operation == OpKeyWrap { + grpcOutput, err = client.WrapKey(context.Background(), req) + if err != nil { + return nil, errors.Wrap(err, "Error from grpc method") + } + } else if operation == OpKeyUnwrap { + grpcOutput, err = client.UnWrapKey(context.Background(), req) + if err != nil { + return nil, errors.Wrap(err, "Error from grpc method") + } + } else { + return nil, errors.New("Unsupported operation") + } + + respBytes := grpcOutput.GetKeyProviderKeyWrapProtocolOutput() + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, errors.Wrap(err, "Error while unmarshalling grpc method output") + } + + return &protocolOuput, nil +} + +func getProviderCommandOutput(input []byte, command *keyproviderconfig.Command) (*KeyProviderKeyWrapProtocolOutput, error) { + var protocolOuput KeyProviderKeyWrapProtocolOutput + // Convert interface to command structure + respBytes, err := runner.Exec(command.Path, command.Args, input) + if err != nil { + return nil, err + } + err = json.Unmarshal(respBytes, &protocolOuput) + if err != nil { + return nil, errors.Wrap(err, "Error while unmarshalling binary executable command output") + } + return &protocolOuput, nil +} + +// Return false as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return false +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +// Return nil as it is not applicable to keyprovider protocol +func (kw *keyProviderKeyWrapper) GetRecipients(_ string) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go new file mode 100644 index 000000000..803b90865 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go @@ -0,0 +1,147 @@ +/* + Copyright The ocicrypt Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11 + +import ( + "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/containers/ocicrypt/keywrap" + "github.com/containers/ocicrypt/utils" + + "github.com/pkg/errors" +) + +type pkcs11KeyWrapper struct { +} + +func (kw *pkcs11KeyWrapper) GetAnnotationID() string { + return "org.opencontainers.image.enc.keys.pkcs11" +} + +// NewKeyWrapper returns a new key wrapping interface using pkcs11 +func NewKeyWrapper() keywrap.KeyWrapper { + return &pkcs11KeyWrapper{} +} + +// WrapKeys wraps the session key for recpients and encrypts the optsData, which +// describe the symmetric key used for encrypting the layer +func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) { + pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...)) + if err != nil { + return nil, err + } + // no recipients is not an error... + if len(pkcs11Recipients) == 0 { + return nil, nil + } + + jsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData) + if err != nil { + return nil, errors.Wrapf(err, "PKCS11 EncryptMulitple failed") + } + return jsonString, nil +} + +func (kw *pkcs11KeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString []byte) ([]byte, error) { + var pkcs11PrivKeys []*pkcs11.Pkcs11KeyFileObject + + privKeys := kw.GetPrivateKeys(dc.Parameters) + if len(privKeys) == 0 { + return nil, errors.New("No private keys found for PKCS11 decryption") + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, privKey := range privKeys { + key, err := utils.ParsePrivateKey(privKey, nil, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PrivKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PrivKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PrivKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + pkcs11PrivKeys = append(pkcs11PrivKeys, pkcs11PrivKey) + default: + continue + } + } + + plaintext, err := pkcs11.Decrypt(pkcs11PrivKeys, jsonString) + if err == nil { + return plaintext, nil + } + + return nil, errors.Wrapf(err, "PKCS11: No suitable private key found for decryption") +} + +func (kw *pkcs11KeyWrapper) NoPossibleKeys(dcparameters map[string][][]byte) bool { + return len(kw.GetPrivateKeys(dcparameters)) == 0 +} + +func (kw *pkcs11KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte { + return dcparameters["pkcs11-yamls"] +} + +func (kw *pkcs11KeyWrapper) GetKeyIdsFromPacket(_ string) ([]uint64, error) { + return nil, nil +} + +func (kw *pkcs11KeyWrapper) GetRecipients(_ string) ([]string, error) { + return []string{"[pkcs11]"}, nil +} + +func addPubKeys(dc *config.DecryptConfig, pubKeys [][]byte) ([]interface{}, error) { + var pkcs11Keys []interface{} + + if len(pubKeys) == 0 { + return pkcs11Keys, nil + } + + p11conf, err := p11confFromParameters(dc.Parameters) + if err != nil { + return nil, err + } + + for _, pubKey := range pubKeys { + key, err := utils.ParsePublicKey(pubKey, "PKCS11") + if err != nil { + return nil, err + } + switch pkcs11PubKey := key.(type) { + case *pkcs11.Pkcs11KeyFileObject: + if p11conf != nil { + pkcs11PubKey.Uri.SetModuleDirectories(p11conf.ModuleDirectories) + pkcs11PubKey.Uri.SetAllowedModulePaths(p11conf.AllowedModulePaths) + } + } + pkcs11Keys = append(pkcs11Keys, key) + } + return pkcs11Keys, nil +} + +func p11confFromParameters(dcparameters map[string][][]byte) (*pkcs11.Pkcs11Config, error){ + if _, ok := dcparameters["pkcs11-config"]; ok { + return pkcs11.ParsePkcs11ConfigFile(dcparameters["pkcs11-config"][0]) + } + return nil, nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/ioutils.go b/vendor/github.com/containers/ocicrypt/utils/ioutils.go index c360e0a33..078c34799 100644 --- a/vendor/github.com/containers/ocicrypt/utils/ioutils.go +++ b/vendor/github.com/containers/ocicrypt/utils/ioutils.go @@ -17,7 +17,10 @@ package utils import ( + "bytes" "io" + "os/exec" + "github.com/pkg/errors" ) // FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns @@ -29,3 +32,25 @@ func FillBuffer(reader io.Reader, buffer []byte) (int, error) { } return n, err } + +// first argument is the command, like cat or echo, +// the second is the list of args to pass to it +type CommandExecuter interface { + Exec(string, []string, []byte) ([]byte, error) +} + +type Runner struct{} + +// ExecuteCommand is used to execute a linux command line command and return the output of the command with an error if it exists. +func (r Runner) Exec(cmdName string, args []string, input []byte) ([]byte, error) { + var out bytes.Buffer + stdInputBuffer := bytes.NewBuffer(input) + cmd := exec.Command(cmdName, args...) + cmd.Stdin = stdInputBuffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "Error while running command: %s", cmdName) + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go new file mode 100644 index 000000000..dc477d3cf --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: keyprovider.proto + +package keyprovider + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type KeyProviderKeyWrapProtocolInput struct { + KeyProviderKeyWrapProtocolInput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolInput,proto3" json:"KeyProviderKeyWrapProtocolInput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolInput) Reset() { *m = KeyProviderKeyWrapProtocolInput{} } +func (m *KeyProviderKeyWrapProtocolInput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolInput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolInput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{0} +} + +func (m *KeyProviderKeyWrapProtocolInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolInput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolInput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolInput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolInput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolInput) GetKeyProviderKeyWrapProtocolInput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolInput + } + return nil +} + +type KeyProviderKeyWrapProtocolOutput struct { + KeyProviderKeyWrapProtocolOutput []byte `protobuf:"bytes,1,opt,name=KeyProviderKeyWrapProtocolOutput,proto3" json:"KeyProviderKeyWrapProtocolOutput,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyProviderKeyWrapProtocolOutput) Reset() { *m = KeyProviderKeyWrapProtocolOutput{} } +func (m *KeyProviderKeyWrapProtocolOutput) String() string { return proto.CompactTextString(m) } +func (*KeyProviderKeyWrapProtocolOutput) ProtoMessage() {} +func (*KeyProviderKeyWrapProtocolOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_da74c8e785ad390c, []int{1} +} + +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Unmarshal(m, b) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Marshal(b, m, deterministic) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Merge(m, src) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_Size() int { + return xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.Size(m) +} +func (m *KeyProviderKeyWrapProtocolOutput) XXX_DiscardUnknown() { + xxx_messageInfo_KeyProviderKeyWrapProtocolOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyProviderKeyWrapProtocolOutput proto.InternalMessageInfo + +func (m *KeyProviderKeyWrapProtocolOutput) GetKeyProviderKeyWrapProtocolOutput() []byte { + if m != nil { + return m.KeyProviderKeyWrapProtocolOutput + } + return nil +} + +func init() { + proto.RegisterType((*KeyProviderKeyWrapProtocolInput)(nil), "keyprovider.keyProviderKeyWrapProtocolInput") + proto.RegisterType((*KeyProviderKeyWrapProtocolOutput)(nil), "keyprovider.keyProviderKeyWrapProtocolOutput") +} + +func init() { + proto.RegisterFile("keyprovider.proto", fileDescriptor_da74c8e785ad390c) +} + +var fileDescriptor_da74c8e785ad390c = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xcc, 0x4e, 0xad, 0x2c, + 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x03, 0x32, 0x4a, 0xf2, 0x85, 0xb8, 0x91, 0x84, + 0x94, 0xb2, 0xb9, 0xe4, 0x81, 0xdc, 0x00, 0x28, 0xd7, 0x3b, 0xb5, 0x32, 0xbc, 0x28, 0xb1, 0x20, + 0x00, 0xa4, 0x2e, 0x39, 0x3f, 0xc7, 0x33, 0xaf, 0xa0, 0xb4, 0x44, 0xc8, 0x83, 0x4b, 0xde, 0x1b, + 0xbf, 0x12, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x42, 0xca, 0x94, 0xf2, 0xb8, 0x14, 0x70, + 0x5b, 0xe6, 0x5f, 0x5a, 0x02, 0xb2, 0xcd, 0x8b, 0x4b, 0xc1, 0x9b, 0x80, 0x1a, 0xa8, 0x75, 0x04, + 0xd5, 0x19, 0xbd, 0x62, 0xe4, 0x12, 0x42, 0x52, 0x14, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a, + 0x94, 0xc1, 0xc5, 0x0e, 0x52, 0x0c, 0x94, 0x11, 0xd2, 0xd1, 0x43, 0x0e, 0x1f, 0x02, 0x21, 0x21, + 0xa5, 0x4b, 0xa4, 0x6a, 0x88, 0xf5, 0x4a, 0x0c, 0x42, 0x59, 0x5c, 0x9c, 0xa1, 0x79, 0xf4, 0xb1, + 0xcb, 0x89, 0x37, 0x0a, 0x39, 0x62, 0x93, 0xd8, 0xc0, 0x91, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x9a, 0x10, 0xcb, 0xf9, 0x01, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyProviderServiceClient is the client API for KeyProviderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyProviderServiceClient interface { + WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) +} + +type keyProviderServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyProviderServiceClient(cc grpc.ClientConnInterface) KeyProviderServiceClient { + return &keyProviderServiceClient{cc} +} + +func (c *keyProviderServiceClient) WrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/WrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyProviderServiceClient) UnWrapKey(ctx context.Context, in *KeyProviderKeyWrapProtocolInput, opts ...grpc.CallOption) (*KeyProviderKeyWrapProtocolOutput, error) { + out := new(KeyProviderKeyWrapProtocolOutput) + err := c.cc.Invoke(ctx, "/keyprovider.KeyProviderService/UnWrapKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyProviderServiceServer is the server API for KeyProviderService service. +type KeyProviderServiceServer interface { + WrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) + UnWrapKey(context.Context, *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) +} + +// UnimplementedKeyProviderServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyProviderServiceServer struct { +} + +func (*UnimplementedKeyProviderServiceServer) WrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method WrapKey not implemented") +} +func (*UnimplementedKeyProviderServiceServer) UnWrapKey(ctx context.Context, req *KeyProviderKeyWrapProtocolInput) (*KeyProviderKeyWrapProtocolOutput, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnWrapKey not implemented") +} + +func RegisterKeyProviderServiceServer(s *grpc.Server, srv KeyProviderServiceServer) { + s.RegisterService(&_KeyProviderService_serviceDesc, srv) +} + +func _KeyProviderService_WrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).WrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/WrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).WrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyProviderService_UnWrapKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeyProviderKeyWrapProtocolInput) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/keyprovider.KeyProviderService/UnWrapKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyProviderServiceServer).UnWrapKey(ctx, req.(*KeyProviderKeyWrapProtocolInput)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyProviderService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "keyprovider.KeyProviderService", + HandlerType: (*KeyProviderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "WrapKey", + Handler: _KeyProviderService_WrapKey_Handler, + }, + { + MethodName: "UnWrapKey", + Handler: _KeyProviderService_UnWrapKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "keyprovider.proto", +} diff --git a/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto new file mode 100644 index 000000000..a71f0a592 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/utils/keyprovider/keyprovider.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package keyprovider; +option go_package = "keyprovider"; + +message keyProviderKeyWrapProtocolInput { + bytes KeyProviderKeyWrapProtocolInput = 1; +} + +message keyProviderKeyWrapProtocolOutput { + bytes KeyProviderKeyWrapProtocolOutput = 1; +} + +service KeyProviderService { + rpc WrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; + rpc UnWrapKey(keyProviderKeyWrapProtocolInput) returns (keyProviderKeyWrapProtocolOutput) {}; +}
\ No newline at end of file diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go index 14eea38c1..7bc2aa28d 100644 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -24,6 +24,8 @@ import ( "fmt" "strings" + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/pkg/errors" "golang.org/x/crypto/openpgp" json "gopkg.in/square/go-jose.v2" @@ -55,6 +57,18 @@ func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { return &jwk, nil } +// parsePkcs11PrivateKeyYaml parses the input byte array as pkcs11 key file yaml format) +func parsePkcs11PrivateKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + +// parsePkcs11URIPublicKey parses the input byte array as a pkcs11 key file yaml +func parsePkcs11PublicKeyYaml(yaml []byte, prefix string) (*pkcs11.Pkcs11KeyFileObject, error) { + // if the URI does not have enough attributes, we will throw an error when decrypting + return pkcs11.ParsePkcs11KeyFile(yaml) +} + // IsPasswordError checks whether an error is related to a missing or wrong // password func IsPasswordError(err error) bool { @@ -102,6 +116,9 @@ func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{ } } else { key, err = parseJWKPrivateKey(privKey, prefix) + if err != nil { + key, err = parsePkcs11PrivateKeyYaml(privKey, prefix) + } } } return key, err @@ -114,6 +131,11 @@ func IsPrivateKey(data []byte, password []byte) (bool, error) { return err == nil, err } +// IsPkcs11PrivateKey returns true in case the given byte array represents a pkcs11 private key +func IsPkcs11PrivateKey(data []byte) bool { + return pkcs11.IsPkcs11PrivateKey(data) +} + // ParsePublicKey tries to parse a public key in DER format first and // PEM format after, returning an error if the parsing failed func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { @@ -127,6 +149,9 @@ func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) { } } else { key, err = parseJWKPublicKey(pubKey, prefix) + if err != nil { + key, err = parsePkcs11PublicKeyYaml(pubKey, prefix) + } } } return key, err @@ -138,6 +163,11 @@ func IsPublicKey(data []byte) bool { return err == nil } +// IsPkcs11PublicKey returns true in case the given byte array represents a pkcs11 public key +func IsPkcs11PublicKey(data []byte) bool { + return pkcs11.IsPkcs11PublicKey(data) +} + // ParseCertificate tries to parse a public key in DER format first and // PEM format after, returning an error if the parsing failed func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) { diff --git a/vendor/github.com/miekg/pkcs11/.gitignore b/vendor/github.com/miekg/pkcs11/.gitignore new file mode 100644 index 000000000..5fde17f99 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/.gitignore @@ -0,0 +1,3 @@ +tags +test_db/*/generation +test_db/*/*.lock diff --git a/vendor/github.com/miekg/pkcs11/.travis.yml b/vendor/github.com/miekg/pkcs11/.travis.yml new file mode 100644 index 000000000..687044d83 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: required +dist: trusty + +go: + - 1.9 + - tip + +script: + - go test -v ./... + +before_script: + - sudo apt-get update + - sudo apt-get -y install libsofthsm diff --git a/vendor/github.com/miekg/pkcs11/LICENSE b/vendor/github.com/miekg/pkcs11/LICENSE new file mode 100644 index 000000000..ce25d13ab --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Miek Gieben. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Miek Gieben nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/miekg/pkcs11/Makefile.release b/vendor/github.com/miekg/pkcs11/Makefile.release new file mode 100644 index 000000000..4f58165f9 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/Makefile.release @@ -0,0 +1,57 @@ +# Makefile for releasing. +# +# The release is controlled from version.go. The version found there is +# used to tag the git repo, we're not building any artifects so there is nothing +# to upload to github. +# +# * Up the version in version.go +# * Run: make -f Makefile.release release +# * will *commit* your change with 'Release $VERSION' +# * push to github +# + +define GO +//+build ignore + +package main + +import ( + "fmt" + + "github.com/miekg/pkcs11" +) + +func main() { + fmt.Println(pkcs11.Release.String()) +} +endef + +$(file > version_release.go,$(GO)) +VERSION:=$(shell go run -tags release version_release.go) +TAG="v$(VERSION)" + +all: + rm -f version_release.go + @echo Use the \'release\' target to start a release $(VERSION) + +.PHONY: run +run: + rm -f version_release.go + @echo $(VERSION) + +.PHONY: release +release: commit push + @echo Released $(VERSION) + +.PHONY: commit +commit: + rm -f version_release.go + @echo Committing release $(VERSION) + git commit -am"Release $(VERSION)" + git tag $(TAG) + +.PHONY: push +push: + @echo Pushing release $(VERSION) to master + git push --tags + git push diff --git a/vendor/github.com/miekg/pkcs11/README.md b/vendor/github.com/miekg/pkcs11/README.md new file mode 100644 index 000000000..0a5c1b7b6 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/README.md @@ -0,0 +1,68 @@ +# PKCS#11 [![Build Status](https://travis-ci.org/miekg/pkcs11.png?branch=master)](https://travis-ci.org/miekg/pkcs11) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/miekg/pkcs11) + +This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were +it makes sense. It has been tested with SoftHSM. + +## SoftHSM + + * Make it use a custom configuration file `export SOFTHSM_CONF=$PWD/softhsm.conf` + + * Then use `softhsm` to init it + + ~~~ + softhsm --init-token --slot 0 --label test --pin 1234 + ~~~ + + * Then use `libsofthsm.so` as the pkcs11 module: + + ~~~ go + p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") + ~~~ + +## Examples + +A skeleton program would look somewhat like this (yes, pkcs#11 is verbose): + +~~~ go +p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") +err := p.Initialize() +if err != nil { + panic(err) +} + +defer p.Destroy() +defer p.Finalize() + +slots, err := p.GetSlotList(true) +if err != nil { + panic(err) +} + +session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) +if err != nil { + panic(err) +} +defer p.CloseSession(session) + +err = p.Login(session, pkcs11.CKU_USER, "1234") +if err != nil { + panic(err) +} +defer p.Logout(session) + +p.DigestInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_SHA_1, nil)}) +hash, err := p.Digest(session, []byte("this is a string")) +if err != nil { + panic(err) +} + +for _, d := range hash { + fmt.Printf("%x", d) +} +fmt.Println() +~~~ + +Further examples are included in the tests. + +To expose PKCS#11 keys using the [crypto.Signer interface](https://golang.org/pkg/crypto/#Signer), +please see [github.com/thalesignite/crypto11](https://github.com/thalesignite/crypto11). diff --git a/vendor/github.com/miekg/pkcs11/const.go b/vendor/github.com/miekg/pkcs11/const.go new file mode 100644 index 000000000..408856146 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/const.go @@ -0,0 +1,736 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +const ( + CKU_SO uint = 0 + CKU_USER uint = 1 + CKU_CONTEXT_SPECIFIC uint = 2 +) + +const ( + CKO_DATA uint = 0x00000000 + CKO_CERTIFICATE uint = 0x00000001 + CKO_PUBLIC_KEY uint = 0x00000002 + CKO_PRIVATE_KEY uint = 0x00000003 + CKO_SECRET_KEY uint = 0x00000004 + CKO_HW_FEATURE uint = 0x00000005 + CKO_DOMAIN_PARAMETERS uint = 0x00000006 + CKO_MECHANISM uint = 0x00000007 + CKO_OTP_KEY uint = 0x00000008 + CKO_VENDOR_DEFINED uint = 0x80000000 +) + +const ( + CKG_MGF1_SHA1 uint = 0x00000001 + CKG_MGF1_SHA224 uint = 0x00000005 + CKG_MGF1_SHA256 uint = 0x00000002 + CKG_MGF1_SHA384 uint = 0x00000003 + CKG_MGF1_SHA512 uint = 0x00000004 + CKG_MGF1_SHA3_224 uint = 0x00000006 + CKG_MGF1_SHA3_256 uint = 0x00000007 + CKG_MGF1_SHA3_384 uint = 0x00000008 + CKG_MGF1_SHA3_512 uint = 0x00000009 +) + +const ( + CKZ_DATA_SPECIFIED uint = 0x00000001 +) + +// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g' + +// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and +// mechanism (CKM_) constants as defined in PKCS#11. +const ( + CKF_TOKEN_PRESENT = 0x00000001 + CKF_REMOVABLE_DEVICE = 0x00000002 + CKF_HW_SLOT = 0x00000004 + CKF_RNG = 0x00000001 + CKF_WRITE_PROTECTED = 0x00000002 + CKF_LOGIN_REQUIRED = 0x00000004 + CKF_USER_PIN_INITIALIZED = 0x00000008 + CKF_RESTORE_KEY_NOT_NEEDED = 0x00000020 + CKF_CLOCK_ON_TOKEN = 0x00000040 + CKF_PROTECTED_AUTHENTICATION_PATH = 0x00000100 + CKF_DUAL_CRYPTO_OPERATIONS = 0x00000200 + CKF_TOKEN_INITIALIZED = 0x00000400 + CKF_SECONDARY_AUTHENTICATION = 0x00000800 + CKF_USER_PIN_COUNT_LOW = 0x00010000 + CKF_USER_PIN_FINAL_TRY = 0x00020000 + CKF_USER_PIN_LOCKED = 0x00040000 + CKF_USER_PIN_TO_BE_CHANGED = 0x00080000 + CKF_SO_PIN_COUNT_LOW = 0x00100000 + CKF_SO_PIN_FINAL_TRY = 0x00200000 + CKF_SO_PIN_LOCKED = 0x00400000 + CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 + CKF_ERROR_STATE = 0x01000000 + CKF_RW_SESSION = 0x00000002 + CKF_SERIAL_SESSION = 0x00000004 + CKK_RSA = 0x00000000 + CKK_DSA = 0x00000001 + CKK_DH = 0x00000002 + CKK_ECDSA = 0x00000003 + CKK_EC = 0x00000003 + CKK_X9_42_DH = 0x00000004 + CKK_KEA = 0x00000005 + CKK_GENERIC_SECRET = 0x00000010 + CKK_RC2 = 0x00000011 + CKK_RC4 = 0x00000012 + CKK_DES = 0x00000013 + CKK_DES2 = 0x00000014 + CKK_DES3 = 0x00000015 + CKK_CAST = 0x00000016 + CKK_CAST3 = 0x00000017 + CKK_CAST5 = 0x00000018 + CKK_CAST128 = 0x00000018 + CKK_RC5 = 0x00000019 + CKK_IDEA = 0x0000001A + CKK_SKIPJACK = 0x0000001B + CKK_BATON = 0x0000001C + CKK_JUNIPER = 0x0000001D + CKK_CDMF = 0x0000001E + CKK_AES = 0x0000001F + CKK_BLOWFISH = 0x00000020 + CKK_TWOFISH = 0x00000021 + CKK_SECURID = 0x00000022 + CKK_HOTP = 0x00000023 + CKK_ACTI = 0x00000024 + CKK_CAMELLIA = 0x00000025 + CKK_ARIA = 0x00000026 + CKK_SHA512_224_HMAC = 0x00000027 + CKK_SHA512_256_HMAC = 0x00000028 + CKK_SHA512_T_HMAC = 0x00000029 + CKK_SHA_1_HMAC = 0x00000028 + CKK_SHA224_HMAC = 0x0000002E + CKK_SHA256_HMAC = 0x0000002B + CKK_SHA384_HMAC = 0x0000002C + CKK_SHA512_HMAC = 0x0000002D + CKK_SEED = 0x0000002F + CKK_GOSTR3410 = 0x00000030 + CKK_GOSTR3411 = 0x00000031 + CKK_GOST28147 = 0x00000032 + CKK_SHA3_224_HMAC = 0x00000033 + CKK_SHA3_256_HMAC = 0x00000034 + CKK_SHA3_384_HMAC = 0x00000035 + CKK_SHA3_512_HMAC = 0x00000036 + CKK_VENDOR_DEFINED = 0x80000000 + CKC_X_509 = 0x00000000 + CKC_X_509_ATTR_CERT = 0x00000001 + CKC_WTLS = 0x00000002 + CKC_VENDOR_DEFINED = 0x80000000 + CKF_ARRAY_ATTRIBUTE = 0x40000000 + CKA_CLASS = 0x00000000 + CKA_TOKEN = 0x00000001 + CKA_PRIVATE = 0x00000002 + CKA_LABEL = 0x00000003 + CKA_APPLICATION = 0x00000010 + CKA_VALUE = 0x00000011 + CKA_OBJECT_ID = 0x00000012 + CKA_CERTIFICATE_TYPE = 0x00000080 + CKA_ISSUER = 0x00000081 + CKA_SERIAL_NUMBER = 0x00000082 + CKA_AC_ISSUER = 0x00000083 + CKA_OWNER = 0x00000084 + CKA_ATTR_TYPES = 0x00000085 + CKA_TRUSTED = 0x00000086 + CKA_CERTIFICATE_CATEGORY = 0x00000087 + CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088 + CKA_URL = 0x00000089 + CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A + CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B + CKA_NAME_HASH_ALGORITHM = 0x0000008C + CKA_CHECK_VALUE = 0x00000090 + CKA_KEY_TYPE = 0x00000100 + CKA_SUBJECT = 0x00000101 + CKA_ID = 0x00000102 + CKA_SENSITIVE = 0x00000103 + CKA_ENCRYPT = 0x00000104 + CKA_DECRYPT = 0x00000105 + CKA_WRAP = 0x00000106 + CKA_UNWRAP = 0x00000107 + CKA_SIGN = 0x00000108 + CKA_SIGN_RECOVER = 0x00000109 + CKA_VERIFY = 0x0000010A + CKA_VERIFY_RECOVER = 0x0000010B + CKA_DERIVE = 0x0000010C + CKA_START_DATE = 0x00000110 + CKA_END_DATE = 0x00000111 + CKA_MODULUS = 0x00000120 + CKA_MODULUS_BITS = 0x00000121 + CKA_PUBLIC_EXPONENT = 0x00000122 + CKA_PRIVATE_EXPONENT = 0x00000123 + CKA_PRIME_1 = 0x00000124 + CKA_PRIME_2 = 0x00000125 + CKA_EXPONENT_1 = 0x00000126 + CKA_EXPONENT_2 = 0x00000127 + CKA_COEFFICIENT = 0x00000128 + CKA_PUBLIC_KEY_INFO = 0x00000129 + CKA_PRIME = 0x00000130 + CKA_SUBPRIME = 0x00000131 + CKA_BASE = 0x00000132 + CKA_PRIME_BITS = 0x00000133 + CKA_SUBPRIME_BITS = 0x00000134 + CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS + CKA_VALUE_BITS = 0x00000160 + CKA_VALUE_LEN = 0x00000161 + CKA_EXTRACTABLE = 0x00000162 + CKA_LOCAL = 0x00000163 + CKA_NEVER_EXTRACTABLE = 0x00000164 + CKA_ALWAYS_SENSITIVE = 0x00000165 + CKA_KEY_GEN_MECHANISM = 0x00000166 + CKA_MODIFIABLE = 0x00000170 + CKA_COPYABLE = 0x00000171 + CKA_DESTROYABLE = 0x00000172 + CKA_ECDSA_PARAMS = 0x00000180 + CKA_EC_PARAMS = 0x00000180 + CKA_EC_POINT = 0x00000181 + CKA_SECONDARY_AUTH = 0x00000200 + CKA_AUTH_PIN_FLAGS = 0x00000201 + CKA_ALWAYS_AUTHENTICATE = 0x00000202 + CKA_WRAP_WITH_TRUSTED = 0x00000210 + CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211 + CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212 + CKA_OTP_FORMAT = 0x00000220 + CKA_OTP_LENGTH = 0x00000221 + CKA_OTP_TIME_INTERVAL = 0x00000222 + CKA_OTP_USER_FRIENDLY_MODE = 0x00000223 + CKA_OTP_CHALLENGE_REQUIREMENT = 0x00000224 + CKA_OTP_TIME_REQUIREMENT = 0x00000225 + CKA_OTP_COUNTER_REQUIREMENT = 0x00000226 + CKA_OTP_PIN_REQUIREMENT = 0x00000227 + CKA_OTP_COUNTER = 0x0000022E + CKA_OTP_TIME = 0x0000022F + CKA_OTP_USER_IDENTIFIER = 0x0000022A + CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B + CKA_OTP_SERVICE_LOGO = 0x0000022C + CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D + CKA_GOSTR3410_PARAMS = 0x00000250 + CKA_GOSTR3411_PARAMS = 0x00000251 + CKA_GOST28147_PARAMS = 0x00000252 + CKA_HW_FEATURE_TYPE = 0x00000300 + CKA_RESET_ON_INIT = 0x00000301 + CKA_HAS_RESET = 0x00000302 + CKA_PIXEL_X = 0x00000400 + CKA_PIXEL_Y = 0x00000401 + CKA_RESOLUTION = 0x00000402 + CKA_CHAR_ROWS = 0x00000403 + CKA_CHAR_COLUMNS = 0x00000404 + CKA_COLOR = 0x00000405 + CKA_BITS_PER_PIXEL = 0x00000406 + CKA_CHAR_SETS = 0x00000480 + CKA_ENCODING_METHODS = 0x00000481 + CKA_MIME_TYPES = 0x00000482 + CKA_MECHANISM_TYPE = 0x00000500 + CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 + CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 + CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 + CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600 + CKA_VENDOR_DEFINED = 0x80000000 + CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 + CKM_RSA_PKCS = 0x00000001 + CKM_RSA_9796 = 0x00000002 + CKM_RSA_X_509 = 0x00000003 + CKM_MD2_RSA_PKCS = 0x00000004 + CKM_MD5_RSA_PKCS = 0x00000005 + CKM_SHA1_RSA_PKCS = 0x00000006 + CKM_RIPEMD128_RSA_PKCS = 0x00000007 + CKM_RIPEMD160_RSA_PKCS = 0x00000008 + CKM_RSA_PKCS_OAEP = 0x00000009 + CKM_RSA_X9_31_KEY_PAIR_GEN = 0x0000000A + CKM_RSA_X9_31 = 0x0000000B + CKM_SHA1_RSA_X9_31 = 0x0000000C + CKM_RSA_PKCS_PSS = 0x0000000D + CKM_SHA1_RSA_PKCS_PSS = 0x0000000E + CKM_DSA_KEY_PAIR_GEN = 0x00000010 + CKM_DSA = 0x00000011 + CKM_DSA_SHA1 = 0x00000012 + CKM_DSA_FIPS_G_GEN = 0x00000013 + CKM_DSA_SHA224 = 0x00000014 + CKM_DSA_SHA256 = 0x00000015 + CKM_DSA_SHA384 = 0x00000016 + CKM_DSA_SHA512 = 0x00000017 + CKM_DSA_SHA3_224 = 0x00000018 + CKM_DSA_SHA3_256 = 0x00000019 + CKM_DSA_SHA3_384 = 0x0000001A + CKM_DSA_SHA3_512 = 0x0000001B + CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020 + CKM_DH_PKCS_DERIVE = 0x00000021 + CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030 + CKM_X9_42_DH_DERIVE = 0x00000031 + CKM_X9_42_DH_HYBRID_DERIVE = 0x00000032 + CKM_X9_42_MQV_DERIVE = 0x00000033 + CKM_SHA256_RSA_PKCS = 0x00000040 + CKM_SHA384_RSA_PKCS = 0x00000041 + CKM_SHA512_RSA_PKCS = 0x00000042 + CKM_SHA256_RSA_PKCS_PSS = 0x00000043 + CKM_SHA384_RSA_PKCS_PSS = 0x00000044 + CKM_SHA512_RSA_PKCS_PSS = 0x00000045 + CKM_SHA224_RSA_PKCS = 0x00000046 + CKM_SHA224_RSA_PKCS_PSS = 0x00000047 + CKM_SHA512_224 = 0x00000048 + CKM_SHA512_224_HMAC = 0x00000049 + CKM_SHA512_224_HMAC_GENERAL = 0x0000004A + CKM_SHA512_224_KEY_DERIVATION = 0x0000004B + CKM_SHA512_256 = 0x0000004C + CKM_SHA512_256_HMAC = 0x0000004D + CKM_SHA512_256_HMAC_GENERAL = 0x0000004E + CKM_SHA512_256_KEY_DERIVATION = 0x0000004F + CKM_SHA512_T = 0x00000050 + CKM_SHA512_T_HMAC = 0x00000051 + CKM_SHA512_T_HMAC_GENERAL = 0x00000052 + CKM_SHA512_T_KEY_DERIVATION = 0x00000053 + CKM_SHA3_256_RSA_PKCS = 0x00000060 + CKM_SHA3_384_RSA_PKCS = 0x00000061 + CKM_SHA3_512_RSA_PKCS = 0x00000062 + CKM_SHA3_256_RSA_PKCS_PSS = 0x00000063 + CKM_SHA3_384_RSA_PKCS_PSS = 0x00000064 + CKM_SHA3_512_RSA_PKCS_PSS = 0x00000065 + CKM_SHA3_224_RSA_PKCS = 0x00000066 + CKM_SHA3_224_RSA_PKCS_PSS = 0x00000067 + CKM_RC2_KEY_GEN = 0x00000100 + CKM_RC2_ECB = 0x00000101 + CKM_RC2_CBC = 0x00000102 + CKM_RC2_MAC = 0x00000103 + CKM_RC2_MAC_GENERAL = 0x00000104 + CKM_RC2_CBC_PAD = 0x00000105 + CKM_RC4_KEY_GEN = 0x00000110 + CKM_RC4 = 0x00000111 + CKM_DES_KEY_GEN = 0x00000120 + CKM_DES_ECB = 0x00000121 + CKM_DES_CBC = 0x00000122 + CKM_DES_MAC = 0x00000123 + CKM_DES_MAC_GENERAL = 0x00000124 + CKM_DES_CBC_PAD = 0x00000125 + CKM_DES2_KEY_GEN = 0x00000130 + CKM_DES3_KEY_GEN = 0x00000131 + CKM_DES3_ECB = 0x00000132 + CKM_DES3_CBC = 0x00000133 + CKM_DES3_MAC = 0x00000134 + CKM_DES3_MAC_GENERAL = 0x00000135 + CKM_DES3_CBC_PAD = 0x00000136 + CKM_DES3_CMAC_GENERAL = 0x00000137 + CKM_DES3_CMAC = 0x00000138 + CKM_CDMF_KEY_GEN = 0x00000140 + CKM_CDMF_ECB = 0x00000141 + CKM_CDMF_CBC = 0x00000142 + CKM_CDMF_MAC = 0x00000143 + CKM_CDMF_MAC_GENERAL = 0x00000144 + CKM_CDMF_CBC_PAD = 0x00000145 + CKM_DES_OFB64 = 0x00000150 + CKM_DES_OFB8 = 0x00000151 + CKM_DES_CFB64 = 0x00000152 + CKM_DES_CFB8 = 0x00000153 + CKM_MD2 = 0x00000200 + CKM_MD2_HMAC = 0x00000201 + CKM_MD2_HMAC_GENERAL = 0x00000202 + CKM_MD5 = 0x00000210 + CKM_MD5_HMAC = 0x00000211 + CKM_MD5_HMAC_GENERAL = 0x00000212 + CKM_SHA_1 = 0x00000220 + CKM_SHA_1_HMAC = 0x00000221 + CKM_SHA_1_HMAC_GENERAL = 0x00000222 + CKM_RIPEMD128 = 0x00000230 + CKM_RIPEMD128_HMAC = 0x00000231 + CKM_RIPEMD128_HMAC_GENERAL = 0x00000232 + CKM_RIPEMD160 = 0x00000240 + CKM_RIPEMD160_HMAC = 0x00000241 + CKM_RIPEMD160_HMAC_GENERAL = 0x00000242 + CKM_SHA256 = 0x00000250 + CKM_SHA256_HMAC = 0x00000251 + CKM_SHA256_HMAC_GENERAL = 0x00000252 + CKM_SHA224 = 0x00000255 + CKM_SHA224_HMAC = 0x00000256 + CKM_SHA224_HMAC_GENERAL = 0x00000257 + CKM_SHA384 = 0x00000260 + CKM_SHA384_HMAC = 0x00000261 + CKM_SHA384_HMAC_GENERAL = 0x00000262 + CKM_SHA512 = 0x00000270 + CKM_SHA512_HMAC = 0x00000271 + CKM_SHA512_HMAC_GENERAL = 0x00000272 + CKM_SECURID_KEY_GEN = 0x00000280 + CKM_SECURID = 0x00000282 + CKM_HOTP_KEY_GEN = 0x00000290 + CKM_HOTP = 0x00000291 + CKM_ACTI = 0x000002A0 + CKM_ACTI_KEY_GEN = 0x000002A1 + CKM_SHA3_256 = 0x000002B0 + CKM_SHA3_256_HMAC = 0x000002B1 + CKM_SHA3_256_HMAC_GENERAL = 0x000002B2 + CKM_SHA3_256_KEY_GEN = 0x000002B3 + CKM_SHA3_224 = 0x000002B5 + CKM_SHA3_224_HMAC = 0x000002B6 + CKM_SHA3_224_HMAC_GENERAL = 0x000002B7 + CKM_SHA3_224_KEY_GEN = 0x000002B8 + CKM_SHA3_384 = 0x000002C0 + CKM_SHA3_384_HMAC = 0x000002C1 + CKM_SHA3_384_HMAC_GENERAL = 0x000002C2 + CKM_SHA3_384_KEY_GEN = 0x000002C3 + CKM_SHA3_512 = 0x000002D0 + CKM_SHA3_512_HMAC = 0x000002D1 + CKM_SHA3_512_HMAC_GENERAL = 0x000002D2 + CKM_SHA3_512_KEY_GEN = 0x000002D3 + CKM_CAST_KEY_GEN = 0x00000300 + CKM_CAST_ECB = 0x00000301 + CKM_CAST_CBC = 0x00000302 + CKM_CAST_MAC = 0x00000303 + CKM_CAST_MAC_GENERAL = 0x00000304 + CKM_CAST_CBC_PAD = 0x00000305 + CKM_CAST3_KEY_GEN = 0x00000310 + CKM_CAST3_ECB = 0x00000311 + CKM_CAST3_CBC = 0x00000312 + CKM_CAST3_MAC = 0x00000313 + CKM_CAST3_MAC_GENERAL = 0x00000314 + CKM_CAST3_CBC_PAD = 0x00000315 + CKM_CAST5_KEY_GEN = 0x00000320 + CKM_CAST128_KEY_GEN = 0x00000320 + CKM_CAST5_ECB = 0x00000321 + CKM_CAST128_ECB = 0x00000321 + CKM_CAST5_CBC = 0x00000322 + CKM_CAST128_CBC = 0x00000322 + CKM_CAST5_MAC = 0x00000323 + CKM_CAST128_MAC = 0x00000323 + CKM_CAST5_MAC_GENERAL = 0x00000324 + CKM_CAST128_MAC_GENERAL = 0x00000324 + CKM_CAST5_CBC_PAD = 0x00000325 + CKM_CAST128_CBC_PAD = 0x00000325 + CKM_RC5_KEY_GEN = 0x00000330 + CKM_RC5_ECB = 0x00000331 + CKM_RC5_CBC = 0x00000332 + CKM_RC5_MAC = 0x00000333 + CKM_RC5_MAC_GENERAL = 0x00000334 + CKM_RC5_CBC_PAD = 0x00000335 + CKM_IDEA_KEY_GEN = 0x00000340 + CKM_IDEA_ECB = 0x00000341 + CKM_IDEA_CBC = 0x00000342 + CKM_IDEA_MAC = 0x00000343 + CKM_IDEA_MAC_GENERAL = 0x00000344 + CKM_IDEA_CBC_PAD = 0x00000345 + CKM_GENERIC_SECRET_KEY_GEN = 0x00000350 + CKM_CONCATENATE_BASE_AND_KEY = 0x00000360 + CKM_CONCATENATE_BASE_AND_DATA = 0x00000362 + CKM_CONCATENATE_DATA_AND_BASE = 0x00000363 + CKM_XOR_BASE_AND_DATA = 0x00000364 + CKM_EXTRACT_KEY_FROM_KEY = 0x00000365 + CKM_SSL3_PRE_MASTER_KEY_GEN = 0x00000370 + CKM_SSL3_MASTER_KEY_DERIVE = 0x00000371 + CKM_SSL3_KEY_AND_MAC_DERIVE = 0x00000372 + CKM_SSL3_MASTER_KEY_DERIVE_DH = 0x00000373 + CKM_TLS_PRE_MASTER_KEY_GEN = 0x00000374 + CKM_TLS_MASTER_KEY_DERIVE = 0x00000375 + CKM_TLS_KEY_AND_MAC_DERIVE = 0x00000376 + CKM_TLS_MASTER_KEY_DERIVE_DH = 0x00000377 + CKM_TLS_PRF = 0x00000378 + CKM_SSL3_MD5_MAC = 0x00000380 + CKM_SSL3_SHA1_MAC = 0x00000381 + CKM_MD5_KEY_DERIVATION = 0x00000390 + CKM_MD2_KEY_DERIVATION = 0x00000391 + CKM_SHA1_KEY_DERIVATION = 0x00000392 + CKM_SHA256_KEY_DERIVATION = 0x00000393 + CKM_SHA384_KEY_DERIVATION = 0x00000394 + CKM_SHA512_KEY_DERIVATION = 0x00000395 + CKM_SHA224_KEY_DERIVATION = 0x00000396 + CKM_SHA3_256_KEY_DERIVE = 0x00000397 + CKM_SHA3_224_KEY_DERIVE = 0x00000398 + CKM_SHA3_384_KEY_DERIVE = 0x00000399 + CKM_SHA3_512_KEY_DERIVE = 0x0000039A + CKM_SHAKE_128_KEY_DERIVE = 0x0000039B + CKM_SHAKE_256_KEY_DERIVE = 0x0000039C + CKM_PBE_MD2_DES_CBC = 0x000003A0 + CKM_PBE_MD5_DES_CBC = 0x000003A1 + CKM_PBE_MD5_CAST_CBC = 0x000003A2 + CKM_PBE_MD5_CAST3_CBC = 0x000003A3 + CKM_PBE_MD5_CAST5_CBC = 0x000003A4 + CKM_PBE_MD5_CAST128_CBC = 0x000003A4 + CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 + CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 + CKM_PBE_SHA1_RC4_128 = 0x000003A6 + CKM_PBE_SHA1_RC4_40 = 0x000003A7 + CKM_PBE_SHA1_DES3_EDE_CBC = 0x000003A8 + CKM_PBE_SHA1_DES2_EDE_CBC = 0x000003A9 + CKM_PBE_SHA1_RC2_128_CBC = 0x000003AA + CKM_PBE_SHA1_RC2_40_CBC = 0x000003AB + CKM_PKCS5_PBKD2 = 0x000003B0 + CKM_PBA_SHA1_WITH_SHA1_HMAC = 0x000003C0 + CKM_WTLS_PRE_MASTER_KEY_GEN = 0x000003D0 + CKM_WTLS_MASTER_KEY_DERIVE = 0x000003D1 + CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 0x000003D2 + CKM_WTLS_PRF = 0x000003D3 + CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4 + CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5 + CKM_TLS10_MAC_SERVER = 0x000003D6 + CKM_TLS10_MAC_CLIENT = 0x000003D7 + CKM_TLS12_MAC = 0x000003D8 + CKM_TLS12_KDF = 0x000003D9 + CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0 + CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1 + CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2 + CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3 + CKM_TLS_MAC = 0x000003E4 + CKM_TLS_KDF = 0x000003E5 + CKM_KEY_WRAP_LYNKS = 0x00000400 + CKM_KEY_WRAP_SET_OAEP = 0x00000401 + CKM_CMS_SIG = 0x00000500 + CKM_KIP_DERIVE = 0x00000510 + CKM_KIP_WRAP = 0x00000511 + CKM_KIP_MAC = 0x00000512 + CKM_CAMELLIA_KEY_GEN = 0x00000550 + CKM_CAMELLIA_ECB = 0x00000551 + CKM_CAMELLIA_CBC = 0x00000552 + CKM_CAMELLIA_MAC = 0x00000553 + CKM_CAMELLIA_MAC_GENERAL = 0x00000554 + CKM_CAMELLIA_CBC_PAD = 0x00000555 + CKM_CAMELLIA_ECB_ENCRYPT_DATA = 0x00000556 + CKM_CAMELLIA_CBC_ENCRYPT_DATA = 0x00000557 + CKM_CAMELLIA_CTR = 0x00000558 + CKM_ARIA_KEY_GEN = 0x00000560 + CKM_ARIA_ECB = 0x00000561 + CKM_ARIA_CBC = 0x00000562 + CKM_ARIA_MAC = 0x00000563 + CKM_ARIA_MAC_GENERAL = 0x00000564 + CKM_ARIA_CBC_PAD = 0x00000565 + CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566 + CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567 + CKM_SEED_KEY_GEN = 0x00000650 + CKM_SEED_ECB = 0x00000651 + CKM_SEED_CBC = 0x00000652 + CKM_SEED_MAC = 0x00000653 + CKM_SEED_MAC_GENERAL = 0x00000654 + CKM_SEED_CBC_PAD = 0x00000655 + CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656 + CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657 + CKM_SKIPJACK_KEY_GEN = 0x00001000 + CKM_SKIPJACK_ECB64 = 0x00001001 + CKM_SKIPJACK_CBC64 = 0x00001002 + CKM_SKIPJACK_OFB64 = 0x00001003 + CKM_SKIPJACK_CFB64 = 0x00001004 + CKM_SKIPJACK_CFB32 = 0x00001005 + CKM_SKIPJACK_CFB16 = 0x00001006 + CKM_SKIPJACK_CFB8 = 0x00001007 + CKM_SKIPJACK_WRAP = 0x00001008 + CKM_SKIPJACK_PRIVATE_WRAP = 0x00001009 + CKM_SKIPJACK_RELAYX = 0x0000100a + CKM_KEA_KEY_PAIR_GEN = 0x00001010 + CKM_KEA_KEY_DERIVE = 0x00001011 + CKM_KEA_DERIVE = 0x00001012 + CKM_FORTEZZA_TIMESTAMP = 0x00001020 + CKM_BATON_KEY_GEN = 0x00001030 + CKM_BATON_ECB128 = 0x00001031 + CKM_BATON_ECB96 = 0x00001032 + CKM_BATON_CBC128 = 0x00001033 + CKM_BATON_COUNTER = 0x00001034 + CKM_BATON_SHUFFLE = 0x00001035 + CKM_BATON_WRAP = 0x00001036 + CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 + CKM_EC_KEY_PAIR_GEN = 0x00001040 + CKM_ECDSA = 0x00001041 + CKM_ECDSA_SHA1 = 0x00001042 + CKM_ECDSA_SHA224 = 0x00001043 + CKM_ECDSA_SHA256 = 0x00001044 + CKM_ECDSA_SHA384 = 0x00001045 + CKM_ECDSA_SHA512 = 0x00001046 + CKM_ECDH1_DERIVE = 0x00001050 + CKM_ECDH1_COFACTOR_DERIVE = 0x00001051 + CKM_ECMQV_DERIVE = 0x00001052 + CKM_ECDH_AES_KEY_WRAP = 0x00001053 + CKM_RSA_AES_KEY_WRAP = 0x00001054 + CKM_JUNIPER_KEY_GEN = 0x00001060 + CKM_JUNIPER_ECB128 = 0x00001061 + CKM_JUNIPER_CBC128 = 0x00001062 + CKM_JUNIPER_COUNTER = 0x00001063 + CKM_JUNIPER_SHUFFLE = 0x00001064 + CKM_JUNIPER_WRAP = 0x00001065 + CKM_FASTHASH = 0x00001070 + CKM_AES_KEY_GEN = 0x00001080 + CKM_AES_ECB = 0x00001081 + CKM_AES_CBC = 0x00001082 + CKM_AES_MAC = 0x00001083 + CKM_AES_MAC_GENERAL = 0x00001084 + CKM_AES_CBC_PAD = 0x00001085 + CKM_AES_CTR = 0x00001086 + CKM_AES_GCM = 0x00001087 + CKM_AES_CCM = 0x00001088 + CKM_AES_CMAC_GENERAL = 0x00001089 + CKM_AES_CMAC = 0x0000108A + CKM_AES_CTS = 0x0000108B + CKM_AES_XCBC_MAC = 0x0000108C + CKM_AES_XCBC_MAC_96 = 0x0000108D + CKM_AES_GMAC = 0x0000108E + CKM_BLOWFISH_KEY_GEN = 0x00001090 + CKM_BLOWFISH_CBC = 0x00001091 + CKM_TWOFISH_KEY_GEN = 0x00001092 + CKM_TWOFISH_CBC = 0x00001093 + CKM_BLOWFISH_CBC_PAD = 0x00001094 + CKM_TWOFISH_CBC_PAD = 0x00001095 + CKM_DES_ECB_ENCRYPT_DATA = 0x00001100 + CKM_DES_CBC_ENCRYPT_DATA = 0x00001101 + CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102 + CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103 + CKM_AES_ECB_ENCRYPT_DATA = 0x00001104 + CKM_AES_CBC_ENCRYPT_DATA = 0x00001105 + CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200 + CKM_GOSTR3410 = 0x00001201 + CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202 + CKM_GOSTR3410_KEY_WRAP = 0x00001203 + CKM_GOSTR3410_DERIVE = 0x00001204 + CKM_GOSTR3411 = 0x00001210 + CKM_GOSTR3411_HMAC = 0x00001211 + CKM_GOST28147_KEY_GEN = 0x00001220 + CKM_GOST28147_ECB = 0x00001221 + CKM_GOST28147 = 0x00001222 + CKM_GOST28147_MAC = 0x00001223 + CKM_GOST28147_KEY_WRAP = 0x00001224 + CKM_DSA_PARAMETER_GEN = 0x00002000 + CKM_DH_PKCS_PARAMETER_GEN = 0x00002001 + CKM_X9_42_DH_PARAMETER_GEN = 0x00002002 + CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003 + CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004 + CKM_AES_OFB = 0x00002104 + CKM_AES_CFB64 = 0x00002105 + CKM_AES_CFB8 = 0x00002106 + CKM_AES_CFB128 = 0x00002107 + CKM_AES_CFB1 = 0x00002108 + CKM_AES_KEY_WRAP = 0x00002109 + CKM_AES_KEY_WRAP_PAD = 0x0000210A + CKM_RSA_PKCS_TPM_1_1 = 0x00004001 + CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002 + CKM_VENDOR_DEFINED = 0x80000000 + CKF_HW = 0x00000001 + CKF_ENCRYPT = 0x00000100 + CKF_DECRYPT = 0x00000200 + CKF_DIGEST = 0x00000400 + CKF_SIGN = 0x00000800 + CKF_SIGN_RECOVER = 0x00001000 + CKF_VERIFY = 0x00002000 + CKF_VERIFY_RECOVER = 0x00004000 + CKF_GENERATE = 0x00008000 + CKF_GENERATE_KEY_PAIR = 0x00010000 + CKF_WRAP = 0x00020000 + CKF_UNWRAP = 0x00040000 + CKF_DERIVE = 0x00080000 + CKF_EC_F_P = 0x00100000 + CKF_EC_F_2M = 0x00200000 + CKF_EC_ECPARAMETERS = 0x00400000 + CKF_EC_NAMEDCURVE = 0x00800000 + CKF_EC_UNCOMPRESS = 0x01000000 + CKF_EC_COMPRESS = 0x02000000 + CKF_EXTENSION = 0x80000000 + CKR_OK = 0x00000000 + CKR_CANCEL = 0x00000001 + CKR_HOST_MEMORY = 0x00000002 + CKR_SLOT_ID_INVALID = 0x00000003 + CKR_GENERAL_ERROR = 0x00000005 + CKR_FUNCTION_FAILED = 0x00000006 + CKR_ARGUMENTS_BAD = 0x00000007 + CKR_NO_EVENT = 0x00000008 + CKR_NEED_TO_CREATE_THREADS = 0x00000009 + CKR_CANT_LOCK = 0x0000000A + CKR_ATTRIBUTE_READ_ONLY = 0x00000010 + CKR_ATTRIBUTE_SENSITIVE = 0x00000011 + CKR_ATTRIBUTE_TYPE_INVALID = 0x00000012 + CKR_ATTRIBUTE_VALUE_INVALID = 0x00000013 + CKR_ACTION_PROHIBITED = 0x0000001B + CKR_DATA_INVALID = 0x00000020 + CKR_DATA_LEN_RANGE = 0x00000021 + CKR_DEVICE_ERROR = 0x00000030 + CKR_DEVICE_MEMORY = 0x00000031 + CKR_DEVICE_REMOVED = 0x00000032 + CKR_ENCRYPTED_DATA_INVALID = 0x00000040 + CKR_ENCRYPTED_DATA_LEN_RANGE = 0x00000041 + CKR_FUNCTION_CANCELED = 0x00000050 + CKR_FUNCTION_NOT_PARALLEL = 0x00000051 + CKR_FUNCTION_NOT_SUPPORTED = 0x00000054 + CKR_KEY_HANDLE_INVALID = 0x00000060 + CKR_KEY_SIZE_RANGE = 0x00000062 + CKR_KEY_TYPE_INCONSISTENT = 0x00000063 + CKR_KEY_NOT_NEEDED = 0x00000064 + CKR_KEY_CHANGED = 0x00000065 + CKR_KEY_NEEDED = 0x00000066 + CKR_KEY_INDIGESTIBLE = 0x00000067 + CKR_KEY_FUNCTION_NOT_PERMITTED = 0x00000068 + CKR_KEY_NOT_WRAPPABLE = 0x00000069 + CKR_KEY_UNEXTRACTABLE = 0x0000006A + CKR_MECHANISM_INVALID = 0x00000070 + CKR_MECHANISM_PARAM_INVALID = 0x00000071 + CKR_OBJECT_HANDLE_INVALID = 0x00000082 + CKR_OPERATION_ACTIVE = 0x00000090 + CKR_OPERATION_NOT_INITIALIZED = 0x00000091 + CKR_PIN_INCORRECT = 0x000000A0 + CKR_PIN_INVALID = 0x000000A1 + CKR_PIN_LEN_RANGE = 0x000000A2 + CKR_PIN_EXPIRED = 0x000000A3 + CKR_PIN_LOCKED = 0x000000A4 + CKR_SESSION_CLOSED = 0x000000B0 + CKR_SESSION_COUNT = 0x000000B1 + CKR_SESSION_HANDLE_INVALID = 0x000000B3 + CKR_SESSION_PARALLEL_NOT_SUPPORTED = 0x000000B4 + CKR_SESSION_READ_ONLY = 0x000000B5 + CKR_SESSION_EXISTS = 0x000000B6 + CKR_SESSION_READ_ONLY_EXISTS = 0x000000B7 + CKR_SESSION_READ_WRITE_SO_EXISTS = 0x000000B8 + CKR_SIGNATURE_INVALID = 0x000000C0 + CKR_SIGNATURE_LEN_RANGE = 0x000000C1 + CKR_TEMPLATE_INCOMPLETE = 0x000000D0 + CKR_TEMPLATE_INCONSISTENT = 0x000000D1 + CKR_TOKEN_NOT_PRESENT = 0x000000E0 + CKR_TOKEN_NOT_RECOGNIZED = 0x000000E1 + CKR_TOKEN_WRITE_PROTECTED = 0x000000E2 + CKR_UNWRAPPING_KEY_HANDLE_INVALID = 0x000000F0 + CKR_UNWRAPPING_KEY_SIZE_RANGE = 0x000000F1 + CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT = 0x000000F2 + CKR_USER_ALREADY_LOGGED_IN = 0x00000100 + CKR_USER_NOT_LOGGED_IN = 0x00000101 + CKR_USER_PIN_NOT_INITIALIZED = 0x00000102 + CKR_USER_TYPE_INVALID = 0x00000103 + CKR_USER_ANOTHER_ALREADY_LOGGED_IN = 0x00000104 + CKR_USER_TOO_MANY_TYPES = 0x00000105 + CKR_WRAPPED_KEY_INVALID = 0x00000110 + CKR_WRAPPED_KEY_LEN_RANGE = 0x00000112 + CKR_WRAPPING_KEY_HANDLE_INVALID = 0x00000113 + CKR_WRAPPING_KEY_SIZE_RANGE = 0x00000114 + CKR_WRAPPING_KEY_TYPE_INCONSISTENT = 0x00000115 + CKR_RANDOM_SEED_NOT_SUPPORTED = 0x00000120 + CKR_RANDOM_NO_RNG = 0x00000121 + CKR_DOMAIN_PARAMS_INVALID = 0x00000130 + CKR_CURVE_NOT_SUPPORTED = 0x00000140 + CKR_BUFFER_TOO_SMALL = 0x00000150 + CKR_SAVED_STATE_INVALID = 0x00000160 + CKR_INFORMATION_SENSITIVE = 0x00000170 + CKR_STATE_UNSAVEABLE = 0x00000180 + CKR_CRYPTOKI_NOT_INITIALIZED = 0x00000190 + CKR_CRYPTOKI_ALREADY_INITIALIZED = 0x00000191 + CKR_MUTEX_BAD = 0x000001A0 + CKR_MUTEX_NOT_LOCKED = 0x000001A1 + CKR_NEW_PIN_MODE = 0x000001B0 + CKR_NEXT_OTP = 0x000001B1 + CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0 + CKR_FIPS_SELF_TEST_FAILED = 0x000001C1 + CKR_LIBRARY_LOAD_FAILED = 0x000001C2 + CKR_PIN_TOO_WEAK = 0x000001C3 + CKR_PUBLIC_KEY_INVALID = 0x000001C4 + CKR_FUNCTION_REJECTED = 0x00000200 + CKR_VENDOR_DEFINED = 0x80000000 + CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 + CKF_OS_LOCKING_OK = 0x00000002 + CKF_DONT_BLOCK = 1 + CKF_NEXT_OTP = 0x00000001 + CKF_EXCLUDE_TIME = 0x00000002 + CKF_EXCLUDE_COUNTER = 0x00000004 + CKF_EXCLUDE_CHALLENGE = 0x00000008 + CKF_EXCLUDE_PIN = 0x00000010 + CKF_USER_FRIENDLY_OTP = 0x00000020 + CKD_NULL = 0x00000001 + CKD_SHA1_KDF = 0x00000002 +) + +// Special return values defined in PKCS#11 v2.40 section 3.2. +const ( + // CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount. + // It indicates there is no practical limit on the number of sessions. + CK_EFFECTIVELY_INFINITE = 0 + + // CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates + // the token is unable or unwilling to provide the requested information. + CK_UNAVAILABLE_INFORMATION = ^uint(0) +) diff --git a/vendor/github.com/miekg/pkcs11/error.go b/vendor/github.com/miekg/pkcs11/error.go new file mode 100644 index 000000000..7df0e93a6 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/error.go @@ -0,0 +1,98 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +// awk '/#define CKR_/{ print $3":\""$2"\"," }' pkcs11t.h + +var strerror = map[uint]string{ + 0x00000000: "CKR_OK", + 0x00000001: "CKR_CANCEL", + 0x00000002: "CKR_HOST_MEMORY", + 0x00000003: "CKR_SLOT_ID_INVALID", + 0x00000005: "CKR_GENERAL_ERROR", + 0x00000006: "CKR_FUNCTION_FAILED", + 0x00000007: "CKR_ARGUMENTS_BAD", + 0x00000008: "CKR_NO_EVENT", + 0x00000009: "CKR_NEED_TO_CREATE_THREADS", + 0x0000000A: "CKR_CANT_LOCK", + 0x00000010: "CKR_ATTRIBUTE_READ_ONLY", + 0x00000011: "CKR_ATTRIBUTE_SENSITIVE", + 0x00000012: "CKR_ATTRIBUTE_TYPE_INVALID", + 0x00000013: "CKR_ATTRIBUTE_VALUE_INVALID", + 0x00000020: "CKR_DATA_INVALID", + 0x00000021: "CKR_DATA_LEN_RANGE", + 0x00000030: "CKR_DEVICE_ERROR", + 0x00000031: "CKR_DEVICE_MEMORY", + 0x00000032: "CKR_DEVICE_REMOVED", + 0x00000040: "CKR_ENCRYPTED_DATA_INVALID", + 0x00000041: "CKR_ENCRYPTED_DATA_LEN_RANGE", + 0x00000050: "CKR_FUNCTION_CANCELED", + 0x00000051: "CKR_FUNCTION_NOT_PARALLEL", + 0x00000054: "CKR_FUNCTION_NOT_SUPPORTED", + 0x00000060: "CKR_KEY_HANDLE_INVALID", + 0x00000062: "CKR_KEY_SIZE_RANGE", + 0x00000063: "CKR_KEY_TYPE_INCONSISTENT", + 0x00000064: "CKR_KEY_NOT_NEEDED", + 0x00000065: "CKR_KEY_CHANGED", + 0x00000066: "CKR_KEY_NEEDED", + 0x00000067: "CKR_KEY_INDIGESTIBLE", + 0x00000068: "CKR_KEY_FUNCTION_NOT_PERMITTED", + 0x00000069: "CKR_KEY_NOT_WRAPPABLE", + 0x0000006A: "CKR_KEY_UNEXTRACTABLE", + 0x00000070: "CKR_MECHANISM_INVALID", + 0x00000071: "CKR_MECHANISM_PARAM_INVALID", + 0x00000082: "CKR_OBJECT_HANDLE_INVALID", + 0x00000090: "CKR_OPERATION_ACTIVE", + 0x00000091: "CKR_OPERATION_NOT_INITIALIZED", + 0x000000A0: "CKR_PIN_INCORRECT", + 0x000000A1: "CKR_PIN_INVALID", + 0x000000A2: "CKR_PIN_LEN_RANGE", + 0x000000A3: "CKR_PIN_EXPIRED", + 0x000000A4: "CKR_PIN_LOCKED", + 0x000000B0: "CKR_SESSION_CLOSED", + 0x000000B1: "CKR_SESSION_COUNT", + 0x000000B3: "CKR_SESSION_HANDLE_INVALID", + 0x000000B4: "CKR_SESSION_PARALLEL_NOT_SUPPORTED", + 0x000000B5: "CKR_SESSION_READ_ONLY", + 0x000000B6: "CKR_SESSION_EXISTS", + 0x000000B7: "CKR_SESSION_READ_ONLY_EXISTS", + 0x000000B8: "CKR_SESSION_READ_WRITE_SO_EXISTS", + 0x000000C0: "CKR_SIGNATURE_INVALID", + 0x000000C1: "CKR_SIGNATURE_LEN_RANGE", + 0x000000D0: "CKR_TEMPLATE_INCOMPLETE", + 0x000000D1: "CKR_TEMPLATE_INCONSISTENT", + 0x000000E0: "CKR_TOKEN_NOT_PRESENT", + 0x000000E1: "CKR_TOKEN_NOT_RECOGNIZED", + 0x000000E2: "CKR_TOKEN_WRITE_PROTECTED", + 0x000000F0: "CKR_UNWRAPPING_KEY_HANDLE_INVALID", + 0x000000F1: "CKR_UNWRAPPING_KEY_SIZE_RANGE", + 0x000000F2: "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT", + 0x00000100: "CKR_USER_ALREADY_LOGGED_IN", + 0x00000101: "CKR_USER_NOT_LOGGED_IN", + 0x00000102: "CKR_USER_PIN_NOT_INITIALIZED", + 0x00000103: "CKR_USER_TYPE_INVALID", + 0x00000104: "CKR_USER_ANOTHER_ALREADY_LOGGED_IN", + 0x00000105: "CKR_USER_TOO_MANY_TYPES", + 0x00000110: "CKR_WRAPPED_KEY_INVALID", + 0x00000112: "CKR_WRAPPED_KEY_LEN_RANGE", + 0x00000113: "CKR_WRAPPING_KEY_HANDLE_INVALID", + 0x00000114: "CKR_WRAPPING_KEY_SIZE_RANGE", + 0x00000115: "CKR_WRAPPING_KEY_TYPE_INCONSISTENT", + 0x00000120: "CKR_RANDOM_SEED_NOT_SUPPORTED", + 0x00000121: "CKR_RANDOM_NO_RNG", + 0x00000130: "CKR_DOMAIN_PARAMS_INVALID", + 0x00000150: "CKR_BUFFER_TOO_SMALL", + 0x00000160: "CKR_SAVED_STATE_INVALID", + 0x00000170: "CKR_INFORMATION_SENSITIVE", + 0x00000180: "CKR_STATE_UNSAVEABLE", + 0x00000190: "CKR_CRYPTOKI_NOT_INITIALIZED", + 0x00000191: "CKR_CRYPTOKI_ALREADY_INITIALIZED", + 0x000001A0: "CKR_MUTEX_BAD", + 0x000001A1: "CKR_MUTEX_NOT_LOCKED", + 0x000001B0: "CKR_NEW_PIN_MODE", + 0x000001B1: "CKR_NEXT_OTP", + 0x00000200: "CKR_FUNCTION_REJECTED", + 0x80000000: "CKR_VENDOR_DEFINED", +} diff --git a/vendor/github.com/miekg/pkcs11/go.mod b/vendor/github.com/miekg/pkcs11/go.mod new file mode 100644 index 000000000..704296fc9 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/go.mod @@ -0,0 +1,3 @@ +module github.com/miekg/pkcs11 + +go 1.12 diff --git a/vendor/github.com/miekg/pkcs11/hsm.db b/vendor/github.com/miekg/pkcs11/hsm.db Binary files differnew file mode 100644 index 000000000..eb3f10dad --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/hsm.db diff --git a/vendor/github.com/miekg/pkcs11/params.go b/vendor/github.com/miekg/pkcs11/params.go new file mode 100644 index 000000000..6d9ce96ae --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/params.go @@ -0,0 +1,190 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +/* +#include <stdlib.h> +#include <string.h> +#include "pkcs11go.h" + +static inline void putOAEPParams(CK_RSA_PKCS_OAEP_PARAMS_PTR params, CK_VOID_PTR pSourceData, CK_ULONG ulSourceDataLen) +{ + params->pSourceData = pSourceData; + params->ulSourceDataLen = ulSourceDataLen; +} + +static inline void putECDH1SharedParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pSharedData, CK_ULONG ulSharedDataLen) +{ + params->pSharedData = pSharedData; + params->ulSharedDataLen = ulSharedDataLen; +} + +static inline void putECDH1PublicParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pPublicData, CK_ULONG ulPublicDataLen) +{ + params->pPublicData = pPublicData; + params->ulPublicDataLen = ulPublicDataLen; +} +*/ +import "C" +import "unsafe" + +// GCMParams represents the parameters for the AES-GCM mechanism. +type GCMParams struct { + arena + params *C.CK_GCM_PARAMS + iv []byte + aad []byte + tagSize int +} + +// NewGCMParams returns a pointer to AES-GCM parameters that can be used with the CKM_AES_GCM mechanism. +// The Free() method must be called after the operation is complete. +// +// Note that some HSMs, like CloudHSM, will ignore the IV you pass in and write their +// own. As a result, to support all libraries, memory is not freed +// automatically, so that after the EncryptInit/Encrypt operation the HSM's IV +// can be read back out. It is up to the caller to ensure that Free() is called +// on the GCMParams object at an appropriate time, which is after +// +// Encrypt/Decrypt. As an example: +// +// gcmParams := pkcs11.NewGCMParams(make([]byte, 12), nil, 128) +// p.ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_AES_GCM, gcmParams)}, +// aesObjHandle) +// ct, _ := p.ctx.Encrypt(session, pt) +// iv := gcmParams.IV() +// gcmParams.Free() +// +func NewGCMParams(iv, aad []byte, tagSize int) *GCMParams { + return &GCMParams{ + iv: iv, + aad: aad, + tagSize: tagSize, + } +} + +func cGCMParams(p *GCMParams) []byte { + params := C.CK_GCM_PARAMS{ + ulTagBits: C.CK_ULONG(p.tagSize), + } + var arena arena + if len(p.iv) > 0 { + iv, ivLen := arena.Allocate(p.iv) + params.pIv = C.CK_BYTE_PTR(iv) + params.ulIvLen = ivLen + params.ulIvBits = ivLen * 8 + } + if len(p.aad) > 0 { + aad, aadLen := arena.Allocate(p.aad) + params.pAAD = C.CK_BYTE_PTR(aad) + params.ulAADLen = aadLen + } + p.Free() + p.arena = arena + p.params = ¶ms + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))) +} + +// IV returns a copy of the actual IV used for the operation. +// +// Some HSMs may ignore the user-specified IV and write their own at the end of +// the encryption operation; this method allows you to retrieve it. +func (p *GCMParams) IV() []byte { + if p == nil || p.params == nil { + return nil + } + newIv := C.GoBytes(unsafe.Pointer(p.params.pIv), C.int(p.params.ulIvLen)) + iv := make([]byte, len(newIv)) + copy(iv, newIv) + return iv +} + +// Free deallocates the memory reserved for the HSM to write back the actual IV. +// +// This must be called after the entire operation is complete, i.e. after +// Encrypt or EncryptFinal. It is safe to call Free multiple times. +func (p *GCMParams) Free() { + if p == nil || p.arena == nil { + return + } + p.arena.Free() + p.params = nil + p.arena = nil +} + +// NewPSSParams creates a CK_RSA_PKCS_PSS_PARAMS structure and returns it as a byte array for use with the CKM_RSA_PKCS_PSS mechanism. +func NewPSSParams(hashAlg, mgf, saltLength uint) []byte { + p := C.CK_RSA_PKCS_PSS_PARAMS{ + hashAlg: C.CK_MECHANISM_TYPE(hashAlg), + mgf: C.CK_RSA_PKCS_MGF_TYPE(mgf), + sLen: C.CK_ULONG(saltLength), + } + return C.GoBytes(unsafe.Pointer(&p), C.int(unsafe.Sizeof(p))) +} + +// OAEPParams can be passed to NewMechanism to implement CKM_RSA_PKCS_OAEP. +type OAEPParams struct { + HashAlg uint + MGF uint + SourceType uint + SourceData []byte +} + +// NewOAEPParams creates a CK_RSA_PKCS_OAEP_PARAMS structure suitable for use with the CKM_RSA_PKCS_OAEP mechanism. +func NewOAEPParams(hashAlg, mgf, sourceType uint, sourceData []byte) *OAEPParams { + return &OAEPParams{ + HashAlg: hashAlg, + MGF: mgf, + SourceType: sourceType, + SourceData: sourceData, + } +} + +func cOAEPParams(p *OAEPParams, arena arena) ([]byte, arena) { + params := C.CK_RSA_PKCS_OAEP_PARAMS{ + hashAlg: C.CK_MECHANISM_TYPE(p.HashAlg), + mgf: C.CK_RSA_PKCS_MGF_TYPE(p.MGF), + source: C.CK_RSA_PKCS_OAEP_SOURCE_TYPE(p.SourceType), + } + if len(p.SourceData) != 0 { + buf, len := arena.Allocate(p.SourceData) + // field is unaligned on windows so this has to call into C + C.putOAEPParams(¶ms, buf, len) + } + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena +} + +// ECDH1DeriveParams can be passed to NewMechanism to implement CK_ECDH1_DERIVE_PARAMS. +type ECDH1DeriveParams struct { + KDF uint + SharedData []byte + PublicKeyData []byte +} + +// NewECDH1DeriveParams creates a CK_ECDH1_DERIVE_PARAMS structure suitable for use with the CKM_ECDH1_DERIVE mechanism. +func NewECDH1DeriveParams(kdf uint, sharedData []byte, publicKeyData []byte) *ECDH1DeriveParams { + return &ECDH1DeriveParams{ + KDF: kdf, + SharedData: sharedData, + PublicKeyData: publicKeyData, + } +} + +func cECDH1DeriveParams(p *ECDH1DeriveParams, arena arena) ([]byte, arena) { + params := C.CK_ECDH1_DERIVE_PARAMS{ + kdf: C.CK_EC_KDF_TYPE(p.KDF), + } + + // SharedData MUST be null if key derivation function (KDF) is CKD_NULL + if len(p.SharedData) != 0 { + sharedData, sharedDataLen := arena.Allocate(p.SharedData) + C.putECDH1SharedParams(¶ms, sharedData, sharedDataLen) + } + + publicKeyData, publicKeyDataLen := arena.Allocate(p.PublicKeyData) + C.putECDH1PublicParams(¶ms, publicKeyData, publicKeyDataLen) + + return C.GoBytes(unsafe.Pointer(¶ms), C.int(unsafe.Sizeof(params))), arena +} diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.go b/vendor/github.com/miekg/pkcs11/pkcs11.go new file mode 100644 index 000000000..e21d23b73 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/pkcs11.go @@ -0,0 +1,1606 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. +package pkcs11 + +// It is *assumed*, that: +// +// * Go's uint size == PKCS11's CK_ULONG size +// * CK_ULONG never overflows an Go int + +/* +#cgo windows CFLAGS: -DPACKED_STRUCTURES +#cgo linux LDFLAGS: -ldl +#cgo darwin LDFLAGS: -ldl +#cgo openbsd LDFLAGS: -ldl +#cgo freebsd LDFLAGS: -ldl + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#include "pkcs11go.h" + +#ifdef _WIN32 +#include <windows.h> + +struct ctx { + HMODULE handle; + CK_FUNCTION_LIST_PTR sym; +}; + +// New initializes a ctx and fills the symbol table. +struct ctx *New(const char *module) +{ + CK_C_GetFunctionList list; + struct ctx *c = calloc(1, sizeof(struct ctx)); + c->handle = LoadLibrary(module); + if (c->handle == NULL) { + free(c); + return NULL; + } + list = (CK_C_GetFunctionList) GetProcAddress(c->handle, "C_GetFunctionList"); + if (list == NULL) { + free(c); + return NULL; + } + list(&c->sym); + return c; +} + +// Destroy cleans up a ctx. +void Destroy(struct ctx *c) +{ + if (!c) { + return; + } + free(c); +} +#else +#include <dlfcn.h> + +struct ctx { + void *handle; + CK_FUNCTION_LIST_PTR sym; +}; + +// New initializes a ctx and fills the symbol table. +struct ctx *New(const char *module) +{ + CK_C_GetFunctionList list; + struct ctx *c = calloc(1, sizeof(struct ctx)); + c->handle = dlopen(module, RTLD_LAZY); + if (c->handle == NULL) { + free(c); + return NULL; + } + list = (CK_C_GetFunctionList) dlsym(c->handle, "C_GetFunctionList"); + if (list == NULL) { + free(c); + return NULL; + } + list(&c->sym); + return c; +} + +// Destroy cleans up a ctx. +void Destroy(struct ctx *c) +{ + if (!c) { + return; + } + if (c->handle == NULL) { + return; + } + if (dlclose(c->handle) < 0) { + return; + } + free(c); +} +#endif + +CK_RV Initialize(struct ctx * c) +{ + CK_C_INITIALIZE_ARGS args; + memset(&args, 0, sizeof(args)); + args.flags = CKF_OS_LOCKING_OK; + return c->sym->C_Initialize(&args); +} + +CK_RV Finalize(struct ctx * c) +{ + return c->sym->C_Finalize(NULL); +} + +CK_RV GetInfo(struct ctx * c, ckInfoPtr info) +{ + CK_INFO p; + CK_RV e = c->sym->C_GetInfo(&p); + if (e != CKR_OK) { + return e; + } + info->cryptokiVersion = p.cryptokiVersion; + memcpy(info->manufacturerID, p.manufacturerID, sizeof(p.manufacturerID)); + info->flags = p.flags; + memcpy(info->libraryDescription, p.libraryDescription, sizeof(p.libraryDescription)); + info->libraryVersion = p.libraryVersion; + return e; +} + +CK_RV GetSlotList(struct ctx * c, CK_BBOOL tokenPresent, + CK_ULONG_PTR * slotList, CK_ULONG_PTR ulCount) +{ + CK_RV e = c->sym->C_GetSlotList(tokenPresent, NULL, ulCount); + if (e != CKR_OK) { + return e; + } + *slotList = calloc(*ulCount, sizeof(CK_SLOT_ID)); + e = c->sym->C_GetSlotList(tokenPresent, *slotList, ulCount); + return e; +} + +CK_RV GetSlotInfo(struct ctx * c, CK_ULONG slotID, CK_SLOT_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetSlotInfo((CK_SLOT_ID) slotID, info); + return e; +} + +CK_RV GetTokenInfo(struct ctx * c, CK_ULONG slotID, CK_TOKEN_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetTokenInfo((CK_SLOT_ID) slotID, info); + return e; +} + +CK_RV GetMechanismList(struct ctx * c, CK_ULONG slotID, + CK_ULONG_PTR * mech, CK_ULONG_PTR mechlen) +{ + CK_RV e = + c->sym->C_GetMechanismList((CK_SLOT_ID) slotID, NULL, mechlen); + // Gemaltos PKCS11 implementation returns CKR_BUFFER_TOO_SMALL on a NULL ptr instad of CKR_OK as the spec states. + if (e != CKR_OK && e != CKR_BUFFER_TOO_SMALL) { + return e; + } + *mech = calloc(*mechlen, sizeof(CK_MECHANISM_TYPE)); + e = c->sym->C_GetMechanismList((CK_SLOT_ID) slotID, + (CK_MECHANISM_TYPE_PTR) * mech, mechlen); + return e; +} + +CK_RV GetMechanismInfo(struct ctx * c, CK_ULONG slotID, CK_MECHANISM_TYPE mech, + CK_MECHANISM_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetMechanismInfo((CK_SLOT_ID) slotID, mech, info); + return e; +} + +CK_RV InitToken(struct ctx * c, CK_ULONG slotID, char *pin, CK_ULONG pinlen, + char *label) +{ + CK_RV e = + c->sym->C_InitToken((CK_SLOT_ID) slotID, (CK_UTF8CHAR_PTR) pin, + pinlen, (CK_UTF8CHAR_PTR) label); + return e; +} + +CK_RV InitPIN(struct ctx * c, CK_SESSION_HANDLE sh, char *pin, CK_ULONG pinlen) +{ + CK_RV e = c->sym->C_InitPIN(sh, (CK_UTF8CHAR_PTR) pin, pinlen); + return e; +} + +CK_RV SetPIN(struct ctx * c, CK_SESSION_HANDLE sh, char *oldpin, + CK_ULONG oldpinlen, char *newpin, CK_ULONG newpinlen) +{ + CK_RV e = c->sym->C_SetPIN(sh, (CK_UTF8CHAR_PTR) oldpin, oldpinlen, + (CK_UTF8CHAR_PTR) newpin, newpinlen); + return e; +} + +CK_RV OpenSession(struct ctx * c, CK_ULONG slotID, CK_ULONG flags, + CK_SESSION_HANDLE_PTR session) +{ + CK_RV e = + c->sym->C_OpenSession((CK_SLOT_ID) slotID, (CK_FLAGS) flags, NULL, + NULL, session); + return e; +} + +CK_RV CloseSession(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_CloseSession(session); + return e; +} + +CK_RV CloseAllSessions(struct ctx * c, CK_ULONG slotID) +{ + CK_RV e = c->sym->C_CloseAllSessions(slotID); + return e; +} + +CK_RV GetSessionInfo(struct ctx * c, CK_SESSION_HANDLE session, + CK_SESSION_INFO_PTR info) +{ + CK_RV e = c->sym->C_GetSessionInfo(session, info); + return e; +} + +CK_RV GetOperationState(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * state, CK_ULONG_PTR statelen) +{ + CK_RV rv = c->sym->C_GetOperationState(session, NULL, statelen); + if (rv != CKR_OK) { + return rv; + } + *state = calloc(*statelen, sizeof(CK_BYTE)); + if (*state == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_GetOperationState(session, *state, statelen); + return rv; +} + +CK_RV SetOperationState(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR state, CK_ULONG statelen, + CK_OBJECT_HANDLE encryptkey, CK_OBJECT_HANDLE authkey) +{ + return c->sym->C_SetOperationState(session, state, statelen, encryptkey, + authkey); +} + +CK_RV Login(struct ctx *c, CK_SESSION_HANDLE session, CK_USER_TYPE userType, + char *pin, CK_ULONG pinLen) +{ + if (pinLen == 0) { + pin = NULL; + } + CK_RV e = + c->sym->C_Login(session, userType, (CK_UTF8CHAR_PTR) pin, pinLen); + return e; +} + +CK_RV Logout(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_Logout(session); + return e; +} + +CK_RV CreateObject(struct ctx * c, CK_SESSION_HANDLE session, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount, + CK_OBJECT_HANDLE_PTR obj) +{ + return c->sym->C_CreateObject(session, temp, tempCount, obj); +} + +CK_RV CopyObject(struct ctx * c, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE o, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount, + CK_OBJECT_HANDLE_PTR obj) +{ + return c->sym->C_CopyObject(session, o, temp, tempCount, obj); +} + +CK_RV DestroyObject(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object) +{ + CK_RV e = c->sym->C_DestroyObject(session, object); + return e; +} + +CK_RV GetObjectSize(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ULONG_PTR size) +{ + CK_RV e = c->sym->C_GetObjectSize(session, object, size); + return e; +} + +CK_RV GetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp, + CK_ULONG templen) +{ + // Call for the first time, check the returned ulValue in the attributes, then + // allocate enough space and try again. + CK_RV e = c->sym->C_GetAttributeValue(session, object, temp, templen); + if (e != CKR_OK) { + return e; + } + CK_ULONG i; + for (i = 0; i < templen; i++) { + if ((CK_LONG) temp[i].ulValueLen == -1) { + // either access denied or no such object + continue; + } + temp[i].pValue = calloc(temp[i].ulValueLen, sizeof(CK_BYTE)); + } + return c->sym->C_GetAttributeValue(session, object, temp, templen); +} + +CK_RV SetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp, + CK_ULONG templen) +{ + return c->sym->C_SetAttributeValue(session, object, temp, templen); +} + +CK_RV FindObjectsInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount) +{ + return c->sym->C_FindObjectsInit(session, temp, tempCount); +} + +CK_RV FindObjects(struct ctx * c, CK_SESSION_HANDLE session, + CK_OBJECT_HANDLE_PTR * obj, CK_ULONG max, + CK_ULONG_PTR objCount) +{ + *obj = calloc(max, sizeof(CK_OBJECT_HANDLE)); + CK_RV e = c->sym->C_FindObjects(session, *obj, max, objCount); + return e; +} + +CK_RV FindObjectsFinal(struct ctx * c, CK_SESSION_HANDLE session) +{ + CK_RV e = c->sym->C_FindObjectsFinal(session); + return e; +} + +CK_RV EncryptInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_EncryptInit(session, mechanism, key); +} + +CK_RV Encrypt(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * enc, CK_ULONG_PTR enclen) +{ + CK_RV rv = c->sym->C_Encrypt(session, message, mlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Encrypt(session, message, mlen, *enc, enclen); + return rv; +} + +CK_RV EncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR plain, CK_ULONG plainlen, CK_BYTE_PTR * cipher, + CK_ULONG_PTR cipherlen) +{ + CK_RV rv = + c->sym->C_EncryptUpdate(session, plain, plainlen, NULL, cipherlen); + if (rv != CKR_OK) { + return rv; + } + *cipher = calloc(*cipherlen, sizeof(CK_BYTE)); + if (*cipher == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_EncryptUpdate(session, plain, plainlen, *cipher, + cipherlen); + return rv; +} + +CK_RV EncryptFinal(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * cipher, CK_ULONG_PTR cipherlen) +{ + CK_RV rv = c->sym->C_EncryptFinal(session, NULL, cipherlen); + if (rv != CKR_OK) { + return rv; + } + *cipher = calloc(*cipherlen, sizeof(CK_BYTE)); + if (*cipher == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_EncryptFinal(session, *cipher, cipherlen); + return rv; +} + +CK_RV DecryptInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_DecryptInit(session, mechanism, key); +} + +CK_RV Decrypt(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR cipher, + CK_ULONG clen, CK_BYTE_PTR * plain, CK_ULONG_PTR plainlen) +{ + CK_RV e = c->sym->C_Decrypt(session, cipher, clen, NULL, plainlen); + if (e != CKR_OK) { + return e; + } + *plain = calloc(*plainlen, sizeof(CK_BYTE)); + if (*plain == NULL) { + return CKR_HOST_MEMORY; + } + e = c->sym->C_Decrypt(session, cipher, clen, *plain, plainlen); + return e; +} + +CK_RV DecryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, CK_BYTE_PTR * part, + CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptUpdate(session, cipher, cipherlen, NULL, partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV DecryptFinal(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * plain, CK_ULONG_PTR plainlen) +{ + CK_RV rv = c->sym->C_DecryptFinal(session, NULL, plainlen); + if (rv != CKR_OK) { + return rv; + } + *plain = calloc(*plainlen, sizeof(CK_BYTE)); + if (*plain == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptFinal(session, *plain, plainlen); + return rv; +} + +CK_RV DigestInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism) +{ + return c->sym->C_DigestInit(session, mechanism); +} + +CK_RV Digest(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * hash, CK_ULONG_PTR hashlen) +{ + CK_RV rv = c->sym->C_Digest(session, message, mlen, NULL, hashlen); + if (rv != CKR_OK) { + return rv; + } + *hash = calloc(*hashlen, sizeof(CK_BYTE)); + if (*hash == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Digest(session, message, mlen, *hash, hashlen); + return rv; +} + +CK_RV DigestUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR message, CK_ULONG mlen) +{ + CK_RV rv = c->sym->C_DigestUpdate(session, message, mlen); + return rv; +} + +CK_RV DigestKey(struct ctx * c, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE key) +{ + CK_RV rv = c->sym->C_DigestKey(session, key); + return rv; +} + +CK_RV DigestFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * hash, + CK_ULONG_PTR hashlen) +{ + CK_RV rv = c->sym->C_DigestFinal(session, NULL, hashlen); + if (rv != CKR_OK) { + return rv; + } + *hash = calloc(*hashlen, sizeof(CK_BYTE)); + if (*hash == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DigestFinal(session, *hash, hashlen); + return rv; +} + +CK_RV SignInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_SignInit(session, mechanism, key); +} + +CK_RV Sign(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mlen, CK_BYTE_PTR * sig, CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_Sign(session, message, mlen, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_Sign(session, message, mlen, *sig, siglen); + return rv; +} + +CK_RV SignUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR message, CK_ULONG mlen) +{ + CK_RV rv = c->sym->C_SignUpdate(session, message, mlen); + return rv; +} + +CK_RV SignFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * sig, + CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_SignFinal(session, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignFinal(session, *sig, siglen); + return rv; +} + +CK_RV SignRecoverInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_SignRecoverInit(session, mechanism, key); +} + +CK_RV SignRecover(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR data, + CK_ULONG datalen, CK_BYTE_PTR * sig, CK_ULONG_PTR siglen) +{ + CK_RV rv = c->sym->C_SignRecover(session, data, datalen, NULL, siglen); + if (rv != CKR_OK) { + return rv; + } + *sig = calloc(*siglen, sizeof(CK_BYTE)); + if (*sig == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignRecover(session, data, datalen, *sig, siglen); + return rv; +} + +CK_RV VerifyInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_VerifyInit(session, mechanism, key); +} + +CK_RV Verify(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR message, + CK_ULONG mesglen, CK_BYTE_PTR sig, CK_ULONG siglen) +{ + CK_RV rv = c->sym->C_Verify(session, message, mesglen, sig, siglen); + return rv; +} + +CK_RV VerifyUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen) +{ + CK_RV rv = c->sym->C_VerifyUpdate(session, part, partlen); + return rv; +} + +CK_RV VerifyFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR sig, + CK_ULONG siglen) +{ + CK_RV rv = c->sym->C_VerifyFinal(session, sig, siglen); + return rv; +} + +CK_RV VerifyRecoverInit(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key) +{ + return c->sym->C_VerifyRecoverInit(session, mechanism, key); +} + +CK_RV VerifyRecover(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR sig, + CK_ULONG siglen, CK_BYTE_PTR * data, CK_ULONG_PTR datalen) +{ + CK_RV rv = c->sym->C_VerifyRecover(session, sig, siglen, NULL, datalen); + if (rv != CKR_OK) { + return rv; + } + *data = calloc(*datalen, sizeof(CK_BYTE)); + if (*data == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_VerifyRecover(session, sig, siglen, *data, datalen); + return rv; +} + +CK_RV DigestEncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen, CK_BYTE_PTR * enc, + CK_ULONG_PTR enclen) +{ + CK_RV rv = + c->sym->C_DigestEncryptUpdate(session, part, partlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DigestEncryptUpdate(session, part, partlen, *enc, + enclen); + return rv; +} + +CK_RV DecryptDigestUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, + CK_BYTE_PTR * part, CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptDigestUpdate(session, cipher, cipherlen, NULL, + partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptDigestUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV SignEncryptUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR part, CK_ULONG partlen, CK_BYTE_PTR * enc, + CK_ULONG_PTR enclen) +{ + CK_RV rv = + c->sym->C_SignEncryptUpdate(session, part, partlen, NULL, enclen); + if (rv != CKR_OK) { + return rv; + } + *enc = calloc(*enclen, sizeof(CK_BYTE)); + if (*enc == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_SignEncryptUpdate(session, part, partlen, *enc, enclen); + return rv; +} + +CK_RV DecryptVerifyUpdate(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR cipher, CK_ULONG cipherlen, + CK_BYTE_PTR * part, CK_ULONG_PTR partlen) +{ + CK_RV rv = + c->sym->C_DecryptVerifyUpdate(session, cipher, cipherlen, NULL, + partlen); + if (rv != CKR_OK) { + return rv; + } + *part = calloc(*partlen, sizeof(CK_BYTE)); + if (*part == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_DecryptVerifyUpdate(session, cipher, cipherlen, *part, + partlen); + return rv; +} + +CK_RV GenerateKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR temp, + CK_ULONG tempCount, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_GenerateKey(session, mechanism, temp, tempCount, key); +} + +CK_RV GenerateKeyPair(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR pub, + CK_ULONG pubCount, CK_ATTRIBUTE_PTR priv, + CK_ULONG privCount, CK_OBJECT_HANDLE_PTR pubkey, + CK_OBJECT_HANDLE_PTR privkey) +{ + return c->sym->C_GenerateKeyPair(session, mechanism, pub, pubCount, + priv, privCount, pubkey, privkey); +} + +CK_RV WrapKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE wrappingkey, + CK_OBJECT_HANDLE key, CK_BYTE_PTR * wrapped, + CK_ULONG_PTR wrappedlen) +{ + CK_RV rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, NULL, + wrappedlen); + if (rv != CKR_OK) { + return rv; + } + *wrapped = calloc(*wrappedlen, sizeof(CK_BYTE)); + if (*wrapped == NULL) { + return CKR_HOST_MEMORY; + } + rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, *wrapped, + wrappedlen); + return rv; +} + +CK_RV DeriveKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE basekey, + CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_DeriveKey(session, mechanism, basekey, a, alen, key); +} + +CK_RV UnwrapKey(struct ctx * c, CK_SESSION_HANDLE session, + CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE unwrappingkey, + CK_BYTE_PTR wrappedkey, CK_ULONG wrappedkeylen, + CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key) +{ + return c->sym->C_UnwrapKey(session, mechanism, unwrappingkey, wrappedkey, + wrappedkeylen, a, alen, key); +} + +CK_RV SeedRandom(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR seed, + CK_ULONG seedlen) +{ + CK_RV e = c->sym->C_SeedRandom(session, seed, seedlen); + return e; +} + +CK_RV GenerateRandom(struct ctx * c, CK_SESSION_HANDLE session, + CK_BYTE_PTR * rand, CK_ULONG length) +{ + *rand = calloc(length, sizeof(CK_BYTE)); + if (*rand == NULL) { + return CKR_HOST_MEMORY; + } + CK_RV e = c->sym->C_GenerateRandom(session, *rand, length); + return e; +} + +CK_RV WaitForSlotEvent(struct ctx * c, CK_FLAGS flags, CK_ULONG_PTR slot) +{ + CK_RV e = + c->sym->C_WaitForSlotEvent(flags, (CK_SLOT_ID_PTR) slot, NULL); + return e; +} + +static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a) +{ + return a->pValue; +} + +*/ +import "C" +import "strings" + +import "unsafe" + +// Ctx contains the current pkcs11 context. +type Ctx struct { + ctx *C.struct_ctx +} + +// New creates a new context and initializes the module/library for use. +func New(module string) *Ctx { + c := new(Ctx) + mod := C.CString(module) + defer C.free(unsafe.Pointer(mod)) + c.ctx = C.New(mod) + if c.ctx == nil { + return nil + } + return c +} + +// Destroy unloads the module/library and frees any remaining memory. +func (c *Ctx) Destroy() { + if c == nil || c.ctx == nil { + return + } + C.Destroy(c.ctx) + c.ctx = nil +} + +// Initialize initializes the Cryptoki library. +func (c *Ctx) Initialize() error { + e := C.Initialize(c.ctx) + return toError(e) +} + +// Finalize indicates that an application is done with the Cryptoki library. +func (c *Ctx) Finalize() error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.Finalize(c.ctx) + return toError(e) +} + +// GetInfo returns general information about Cryptoki. +func (c *Ctx) GetInfo() (Info, error) { + var p C.ckInfo + e := C.GetInfo(c.ctx, &p) + i := Info{ + CryptokiVersion: toVersion(p.cryptokiVersion), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&p.manufacturerID[0]), 32)), " "), + Flags: uint(p.flags), + LibraryDescription: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&p.libraryDescription[0]), 32)), " "), + LibraryVersion: toVersion(p.libraryVersion), + } + return i, toError(e) +} + +// GetSlotList obtains a list of slots in the system. +func (c *Ctx) GetSlotList(tokenPresent bool) ([]uint, error) { + var ( + slotList C.CK_ULONG_PTR + ulCount C.CK_ULONG + ) + e := C.GetSlotList(c.ctx, cBBool(tokenPresent), &slotList, &ulCount) + if toError(e) != nil { + return nil, toError(e) + } + l := toList(slotList, ulCount) + return l, nil +} + +// GetSlotInfo obtains information about a particular slot in the system. +func (c *Ctx) GetSlotInfo(slotID uint) (SlotInfo, error) { + var csi C.CK_SLOT_INFO + e := C.GetSlotInfo(c.ctx, C.CK_ULONG(slotID), &csi) + s := SlotInfo{ + SlotDescription: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&csi.slotDescription[0]), 64)), " "), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&csi.manufacturerID[0]), 32)), " "), + Flags: uint(csi.flags), + HardwareVersion: toVersion(csi.hardwareVersion), + FirmwareVersion: toVersion(csi.firmwareVersion), + } + return s, toError(e) +} + +// GetTokenInfo obtains information about a particular token +// in the system. +func (c *Ctx) GetTokenInfo(slotID uint) (TokenInfo, error) { + var cti C.CK_TOKEN_INFO + e := C.GetTokenInfo(c.ctx, C.CK_ULONG(slotID), &cti) + s := TokenInfo{ + Label: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.label[0]), 32)), " "), + ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.manufacturerID[0]), 32)), " "), + Model: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.model[0]), 16)), " "), + SerialNumber: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.serialNumber[0]), 16)), " "), + Flags: uint(cti.flags), + MaxSessionCount: uint(cti.ulMaxSessionCount), + SessionCount: uint(cti.ulSessionCount), + MaxRwSessionCount: uint(cti.ulMaxRwSessionCount), + RwSessionCount: uint(cti.ulRwSessionCount), + MaxPinLen: uint(cti.ulMaxPinLen), + MinPinLen: uint(cti.ulMinPinLen), + TotalPublicMemory: uint(cti.ulTotalPublicMemory), + FreePublicMemory: uint(cti.ulFreePublicMemory), + TotalPrivateMemory: uint(cti.ulTotalPrivateMemory), + FreePrivateMemory: uint(cti.ulFreePrivateMemory), + HardwareVersion: toVersion(cti.hardwareVersion), + FirmwareVersion: toVersion(cti.firmwareVersion), + UTCTime: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&cti.utcTime[0]), 16)), " "), + } + return s, toError(e) +} + +// GetMechanismList obtains a list of mechanism types supported by a token. +func (c *Ctx) GetMechanismList(slotID uint) ([]*Mechanism, error) { + var ( + mech C.CK_ULONG_PTR // in pkcs#11 we're all CK_ULONGs \o/ + mechlen C.CK_ULONG + ) + e := C.GetMechanismList(c.ctx, C.CK_ULONG(slotID), &mech, &mechlen) + if toError(e) != nil { + return nil, toError(e) + } + // Although the function returns only type, cast them back into real + // attributes as this is used in other functions. + m := make([]*Mechanism, int(mechlen)) + for i, typ := range toList(mech, mechlen) { + m[i] = NewMechanism(typ, nil) + } + return m, nil +} + +// GetMechanismInfo obtains information about a particular +// mechanism possibly supported by a token. +func (c *Ctx) GetMechanismInfo(slotID uint, m []*Mechanism) (MechanismInfo, error) { + var cm C.CK_MECHANISM_INFO + e := C.GetMechanismInfo(c.ctx, C.CK_ULONG(slotID), C.CK_MECHANISM_TYPE(m[0].Mechanism), + C.CK_MECHANISM_INFO_PTR(&cm)) + mi := MechanismInfo{ + MinKeySize: uint(cm.ulMinKeySize), + MaxKeySize: uint(cm.ulMaxKeySize), + Flags: uint(cm.flags), + } + return mi, toError(e) +} + +// InitToken initializes a token. The label must be 32 characters +// long, it is blank padded if it is not. If it is longer it is capped +// to 32 characters. +func (c *Ctx) InitToken(slotID uint, pin string, label string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + ll := len(label) + for ll < 32 { + label += " " + ll++ + } + l := C.CString(label[:32]) + defer C.free(unsafe.Pointer(l)) + e := C.InitToken(c.ctx, C.CK_ULONG(slotID), p, C.CK_ULONG(len(pin)), l) + return toError(e) +} + +// InitPIN initializes the normal user's PIN. +func (c *Ctx) InitPIN(sh SessionHandle, pin string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + e := C.InitPIN(c.ctx, C.CK_SESSION_HANDLE(sh), p, C.CK_ULONG(len(pin))) + return toError(e) +} + +// SetPIN modifies the PIN of the user who is logged in. +func (c *Ctx) SetPIN(sh SessionHandle, oldpin string, newpin string) error { + old := C.CString(oldpin) + defer C.free(unsafe.Pointer(old)) + new := C.CString(newpin) + defer C.free(unsafe.Pointer(new)) + e := C.SetPIN(c.ctx, C.CK_SESSION_HANDLE(sh), old, C.CK_ULONG(len(oldpin)), new, C.CK_ULONG(len(newpin))) + return toError(e) +} + +// OpenSession opens a session between an application and a token. +func (c *Ctx) OpenSession(slotID uint, flags uint) (SessionHandle, error) { + var s C.CK_SESSION_HANDLE + e := C.OpenSession(c.ctx, C.CK_ULONG(slotID), C.CK_ULONG(flags), C.CK_SESSION_HANDLE_PTR(&s)) + return SessionHandle(s), toError(e) +} + +// CloseSession closes a session between an application and a token. +func (c *Ctx) CloseSession(sh SessionHandle) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.CloseSession(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// CloseAllSessions closes all sessions with a token. +func (c *Ctx) CloseAllSessions(slotID uint) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.CloseAllSessions(c.ctx, C.CK_ULONG(slotID)) + return toError(e) +} + +// GetSessionInfo obtains information about the session. +func (c *Ctx) GetSessionInfo(sh SessionHandle) (SessionInfo, error) { + var csi C.CK_SESSION_INFO + e := C.GetSessionInfo(c.ctx, C.CK_SESSION_HANDLE(sh), &csi) + s := SessionInfo{SlotID: uint(csi.slotID), + State: uint(csi.state), + Flags: uint(csi.flags), + DeviceError: uint(csi.ulDeviceError), + } + return s, toError(e) +} + +// GetOperationState obtains the state of the cryptographic operation in a session. +func (c *Ctx) GetOperationState(sh SessionHandle) ([]byte, error) { + var ( + state C.CK_BYTE_PTR + statelen C.CK_ULONG + ) + e := C.GetOperationState(c.ctx, C.CK_SESSION_HANDLE(sh), &state, &statelen) + defer C.free(unsafe.Pointer(state)) + if toError(e) != nil { + return nil, toError(e) + } + b := C.GoBytes(unsafe.Pointer(state), C.int(statelen)) + return b, nil +} + +// SetOperationState restores the state of the cryptographic operation in a session. +func (c *Ctx) SetOperationState(sh SessionHandle, state []byte, encryptKey, authKey ObjectHandle) error { + e := C.SetOperationState(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_BYTE_PTR(unsafe.Pointer(&state[0])), + C.CK_ULONG(len(state)), C.CK_OBJECT_HANDLE(encryptKey), C.CK_OBJECT_HANDLE(authKey)) + return toError(e) +} + +// Login logs a user into a token. +func (c *Ctx) Login(sh SessionHandle, userType uint, pin string) error { + p := C.CString(pin) + defer C.free(unsafe.Pointer(p)) + e := C.Login(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_USER_TYPE(userType), p, C.CK_ULONG(len(pin))) + return toError(e) +} + +// Logout logs a user out from a token. +func (c *Ctx) Logout(sh SessionHandle) error { + if c.ctx == nil { + return toError(CKR_CRYPTOKI_NOT_INITIALIZED) + } + e := C.Logout(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// CreateObject creates a new object. +func (c *Ctx) CreateObject(sh SessionHandle, temp []*Attribute) (ObjectHandle, error) { + var obj C.CK_OBJECT_HANDLE + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + e := C.CreateObject(c.ctx, C.CK_SESSION_HANDLE(sh), t, tcount, C.CK_OBJECT_HANDLE_PTR(&obj)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(obj), nil + } + return 0, e1 +} + +// CopyObject copies an object, creating a new object for the copy. +func (c *Ctx) CopyObject(sh SessionHandle, o ObjectHandle, temp []*Attribute) (ObjectHandle, error) { + var obj C.CK_OBJECT_HANDLE + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + + e := C.CopyObject(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), t, tcount, C.CK_OBJECT_HANDLE_PTR(&obj)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(obj), nil + } + return 0, e1 +} + +// DestroyObject destroys an object. +func (c *Ctx) DestroyObject(sh SessionHandle, oh ObjectHandle) error { + e := C.DestroyObject(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(oh)) + return toError(e) +} + +// GetObjectSize gets the size of an object in bytes. +func (c *Ctx) GetObjectSize(sh SessionHandle, oh ObjectHandle) (uint, error) { + var size C.CK_ULONG + e := C.GetObjectSize(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(oh), &size) + return uint(size), toError(e) +} + +// GetAttributeValue obtains the value of one or more object attributes. +func (c *Ctx) GetAttributeValue(sh SessionHandle, o ObjectHandle, a []*Attribute) ([]*Attribute, error) { + // copy the attribute list and make all the values nil, so that + // the C function can (allocate) fill them in + pa := make([]C.CK_ATTRIBUTE, len(a)) + for i := 0; i < len(a); i++ { + pa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type) + } + e := C.GetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), &pa[0], C.CK_ULONG(len(a))) + if err := toError(e); err != nil { + return nil, err + } + a1 := make([]*Attribute, len(a)) + for i, c := range pa { + x := new(Attribute) + x.Type = uint(c._type) + if int(c.ulValueLen) != -1 { + buf := unsafe.Pointer(C.getAttributePval(&c)) + x.Value = C.GoBytes(buf, C.int(c.ulValueLen)) + C.free(buf) + } + a1[i] = x + } + return a1, nil +} + +// SetAttributeValue modifies the value of one or more object attributes +func (c *Ctx) SetAttributeValue(sh SessionHandle, o ObjectHandle, a []*Attribute) error { + arena, pa, palen := cAttributeList(a) + defer arena.Free() + e := C.SetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), pa, palen) + return toError(e) +} + +// FindObjectsInit initializes a search for token and session +// objects that match a template. +func (c *Ctx) FindObjectsInit(sh SessionHandle, temp []*Attribute) error { + arena, t, tcount := cAttributeList(temp) + defer arena.Free() + e := C.FindObjectsInit(c.ctx, C.CK_SESSION_HANDLE(sh), t, tcount) + return toError(e) +} + +// FindObjects continues a search for token and session +// objects that match a template, obtaining additional object +// handles. Calling the function repeatedly may yield additional results until +// an empty slice is returned. +// +// The returned boolean value is deprecated and should be ignored. +func (c *Ctx) FindObjects(sh SessionHandle, max int) ([]ObjectHandle, bool, error) { + var ( + objectList C.CK_OBJECT_HANDLE_PTR + ulCount C.CK_ULONG + ) + e := C.FindObjects(c.ctx, C.CK_SESSION_HANDLE(sh), &objectList, C.CK_ULONG(max), &ulCount) + if toError(e) != nil { + return nil, false, toError(e) + } + l := toList(C.CK_ULONG_PTR(unsafe.Pointer(objectList)), ulCount) + // Make again a new list of the correct type. + // This is copying data, but this is not an often used function. + o := make([]ObjectHandle, len(l)) + for i, v := range l { + o[i] = ObjectHandle(v) + } + return o, ulCount > C.CK_ULONG(max), nil +} + +// FindObjectsFinal finishes a search for token and session objects. +func (c *Ctx) FindObjectsFinal(sh SessionHandle) error { + e := C.FindObjectsFinal(c.ctx, C.CK_SESSION_HANDLE(sh)) + return toError(e) +} + +// EncryptInit initializes an encryption operation. +func (c *Ctx) EncryptInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.EncryptInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Encrypt encrypts single-part data. +func (c *Ctx) Encrypt(sh SessionHandle, message []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.Encrypt(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return s, nil +} + +// EncryptUpdate continues a multiple-part encryption operation. +func (c *Ctx) EncryptUpdate(sh SessionHandle, plain []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.EncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(plain), C.CK_ULONG(len(plain)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// EncryptFinal finishes a multiple-part encryption operation. +func (c *Ctx) EncryptFinal(sh SessionHandle) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.EncryptFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptInit initializes a decryption operation. +func (c *Ctx) DecryptInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.DecryptInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Decrypt decrypts encrypted data in a single part. +func (c *Ctx) Decrypt(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + plain C.CK_BYTE_PTR + plainlen C.CK_ULONG + ) + e := C.Decrypt(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &plain, &plainlen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(plain), C.int(plainlen)) + C.free(unsafe.Pointer(plain)) + return s, nil +} + +// DecryptUpdate continues a multiple-part decryption operation. +func (c *Ctx) DecryptUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// DecryptFinal finishes a multiple-part decryption operation. +func (c *Ctx) DecryptFinal(sh SessionHandle) ([]byte, error) { + var ( + plain C.CK_BYTE_PTR + plainlen C.CK_ULONG + ) + e := C.DecryptFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &plain, &plainlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(plain), C.int(plainlen)) + C.free(unsafe.Pointer(plain)) + return h, nil +} + +// DigestInit initializes a message-digesting operation. +func (c *Ctx) DigestInit(sh SessionHandle, m []*Mechanism) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.DigestInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech) + return toError(e) +} + +// Digest digests message in a single part. +func (c *Ctx) Digest(sh SessionHandle, message []byte) ([]byte, error) { + var ( + hash C.CK_BYTE_PTR + hashlen C.CK_ULONG + ) + e := C.Digest(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &hash, &hashlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(hash), C.int(hashlen)) + C.free(unsafe.Pointer(hash)) + return h, nil +} + +// DigestUpdate continues a multiple-part message-digesting operation. +func (c *Ctx) DigestUpdate(sh SessionHandle, message []byte) error { + e := C.DigestUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message))) + if toError(e) != nil { + return toError(e) + } + return nil +} + +// DigestKey continues a multi-part message-digesting +// operation, by digesting the value of a secret key as part of +// the data already digested. +func (c *Ctx) DigestKey(sh SessionHandle, key ObjectHandle) error { + e := C.DigestKey(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(key)) + if toError(e) != nil { + return toError(e) + } + return nil +} + +// DigestFinal finishes a multiple-part message-digesting operation. +func (c *Ctx) DigestFinal(sh SessionHandle) ([]byte, error) { + var ( + hash C.CK_BYTE_PTR + hashlen C.CK_ULONG + ) + e := C.DigestFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &hash, &hashlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(hash), C.int(hashlen)) + C.free(unsafe.Pointer(hash)) + return h, nil +} + +// SignInit initializes a signature (private key encryption) +// operation, where the signature is (will be) an appendix to +// the data, and plaintext cannot be recovered from the signature. +func (c *Ctx) SignInit(sh SessionHandle, m []*Mechanism, o ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.SignInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(o)) + return toError(e) +} + +// Sign signs (encrypts with private key) data in a single part, where the signature +// is (will be) an appendix to the data, and plaintext cannot be recovered from the signature. +func (c *Ctx) Sign(sh SessionHandle, message []byte) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.Sign(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message)), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + s := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return s, nil +} + +// SignUpdate continues a multiple-part signature operation, +// where the signature is (will be) an appendix to the data, +// and plaintext cannot be recovered from the signature. +func (c *Ctx) SignUpdate(sh SessionHandle, message []byte) error { + e := C.SignUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(message), C.CK_ULONG(len(message))) + return toError(e) +} + +// SignFinal finishes a multiple-part signature operation returning the signature. +func (c *Ctx) SignFinal(sh SessionHandle) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.SignFinal(c.ctx, C.CK_SESSION_HANDLE(sh), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return h, nil +} + +// SignRecoverInit initializes a signature operation, where the data can be recovered from the signature. +func (c *Ctx) SignRecoverInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.SignRecoverInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// SignRecover signs data in a single operation, where the data can be recovered from the signature. +func (c *Ctx) SignRecover(sh SessionHandle, data []byte) ([]byte, error) { + var ( + sig C.CK_BYTE_PTR + siglen C.CK_ULONG + ) + e := C.SignRecover(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(data), C.CK_ULONG(len(data)), &sig, &siglen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(sig), C.int(siglen)) + C.free(unsafe.Pointer(sig)) + return h, nil +} + +// VerifyInit initializes a verification operation, where the +// signature is an appendix to the data, and plaintext cannot +// be recovered from the signature (e.g. DSA). +func (c *Ctx) VerifyInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.VerifyInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// Verify verifies a signature in a single-part operation, +// where the signature is an appendix to the data, and plaintext +// cannot be recovered from the signature. +func (c *Ctx) Verify(sh SessionHandle, data []byte, signature []byte) error { + e := C.Verify(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(data), C.CK_ULONG(len(data)), cMessage(signature), C.CK_ULONG(len(signature))) + return toError(e) +} + +// VerifyUpdate continues a multiple-part verification +// operation, where the signature is an appendix to the data, +// and plaintext cannot be recovered from the signature. +func (c *Ctx) VerifyUpdate(sh SessionHandle, part []byte) error { + e := C.VerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part))) + return toError(e) +} + +// VerifyFinal finishes a multiple-part verification +// operation, checking the signature. +func (c *Ctx) VerifyFinal(sh SessionHandle, signature []byte) error { + e := C.VerifyFinal(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(signature), C.CK_ULONG(len(signature))) + return toError(e) +} + +// VerifyRecoverInit initializes a signature verification +// operation, where the data is recovered from the signature. +func (c *Ctx) VerifyRecoverInit(sh SessionHandle, m []*Mechanism, key ObjectHandle) error { + arena, mech := cMechanism(m) + defer arena.Free() + e := C.VerifyRecoverInit(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(key)) + return toError(e) +} + +// VerifyRecover verifies a signature in a single-part +// operation, where the data is recovered from the signature. +func (c *Ctx) VerifyRecover(sh SessionHandle, signature []byte) ([]byte, error) { + var ( + data C.CK_BYTE_PTR + datalen C.CK_ULONG + ) + e := C.DecryptVerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(signature), C.CK_ULONG(len(signature)), &data, &datalen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(data), C.int(datalen)) + C.free(unsafe.Pointer(data)) + return h, nil +} + +// DigestEncryptUpdate continues a multiple-part digesting and encryption operation. +func (c *Ctx) DigestEncryptUpdate(sh SessionHandle, part []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.DigestEncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptDigestUpdate continues a multiple-part decryption and digesting operation. +func (c *Ctx) DecryptDigestUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptDigestUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// SignEncryptUpdate continues a multiple-part signing and encryption operation. +func (c *Ctx) SignEncryptUpdate(sh SessionHandle, part []byte) ([]byte, error) { + var ( + enc C.CK_BYTE_PTR + enclen C.CK_ULONG + ) + e := C.SignEncryptUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(part), C.CK_ULONG(len(part)), &enc, &enclen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(enc), C.int(enclen)) + C.free(unsafe.Pointer(enc)) + return h, nil +} + +// DecryptVerifyUpdate continues a multiple-part decryption and verify operation. +func (c *Ctx) DecryptVerifyUpdate(sh SessionHandle, cipher []byte) ([]byte, error) { + var ( + part C.CK_BYTE_PTR + partlen C.CK_ULONG + ) + e := C.DecryptVerifyUpdate(c.ctx, C.CK_SESSION_HANDLE(sh), cMessage(cipher), C.CK_ULONG(len(cipher)), &part, &partlen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(part), C.int(partlen)) + C.free(unsafe.Pointer(part)) + return h, nil +} + +// GenerateKey generates a secret key, creating a new key object. +func (c *Ctx) GenerateKey(sh SessionHandle, m []*Mechanism, temp []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, t, tcount := cAttributeList(temp) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.GenerateKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, t, tcount, C.CK_OBJECT_HANDLE_PTR(&key)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(key), nil + } + return 0, e1 +} + +// GenerateKeyPair generates a public-key/private-key pair creating new key objects. +func (c *Ctx) GenerateKeyPair(sh SessionHandle, m []*Mechanism, public, private []*Attribute) (ObjectHandle, ObjectHandle, error) { + var ( + pubkey C.CK_OBJECT_HANDLE + privkey C.CK_OBJECT_HANDLE + ) + pubarena, pub, pubcount := cAttributeList(public) + defer pubarena.Free() + privarena, priv, privcount := cAttributeList(private) + defer privarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.GenerateKeyPair(c.ctx, C.CK_SESSION_HANDLE(sh), mech, pub, pubcount, priv, privcount, C.CK_OBJECT_HANDLE_PTR(&pubkey), C.CK_OBJECT_HANDLE_PTR(&privkey)) + e1 := toError(e) + if e1 == nil { + return ObjectHandle(pubkey), ObjectHandle(privkey), nil + } + return 0, 0, e1 +} + +// WrapKey wraps (i.e., encrypts) a key. +func (c *Ctx) WrapKey(sh SessionHandle, m []*Mechanism, wrappingkey, key ObjectHandle) ([]byte, error) { + var ( + wrappedkey C.CK_BYTE_PTR + wrappedkeylen C.CK_ULONG + ) + arena, mech := cMechanism(m) + defer arena.Free() + e := C.WrapKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(wrappingkey), C.CK_OBJECT_HANDLE(key), &wrappedkey, &wrappedkeylen) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(wrappedkey), C.int(wrappedkeylen)) + C.free(unsafe.Pointer(wrappedkey)) + return h, nil +} + +// UnwrapKey unwraps (decrypts) a wrapped key, creating a new key object. +func (c *Ctx) UnwrapKey(sh SessionHandle, m []*Mechanism, unwrappingkey ObjectHandle, wrappedkey []byte, a []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, ac, aclen := cAttributeList(a) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.UnwrapKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(unwrappingkey), C.CK_BYTE_PTR(unsafe.Pointer(&wrappedkey[0])), C.CK_ULONG(len(wrappedkey)), ac, aclen, &key) + return ObjectHandle(key), toError(e) +} + +// DeriveKey derives a key from a base key, creating a new key object. +func (c *Ctx) DeriveKey(sh SessionHandle, m []*Mechanism, basekey ObjectHandle, a []*Attribute) (ObjectHandle, error) { + var key C.CK_OBJECT_HANDLE + attrarena, ac, aclen := cAttributeList(a) + defer attrarena.Free() + mecharena, mech := cMechanism(m) + defer mecharena.Free() + e := C.DeriveKey(c.ctx, C.CK_SESSION_HANDLE(sh), mech, C.CK_OBJECT_HANDLE(basekey), ac, aclen, &key) + return ObjectHandle(key), toError(e) +} + +// SeedRandom mixes additional seed material into the token's +// random number generator. +func (c *Ctx) SeedRandom(sh SessionHandle, seed []byte) error { + e := C.SeedRandom(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_BYTE_PTR(unsafe.Pointer(&seed[0])), C.CK_ULONG(len(seed))) + return toError(e) +} + +// GenerateRandom generates random data. +func (c *Ctx) GenerateRandom(sh SessionHandle, length int) ([]byte, error) { + var rand C.CK_BYTE_PTR + e := C.GenerateRandom(c.ctx, C.CK_SESSION_HANDLE(sh), &rand, C.CK_ULONG(length)) + if toError(e) != nil { + return nil, toError(e) + } + h := C.GoBytes(unsafe.Pointer(rand), C.int(length)) + C.free(unsafe.Pointer(rand)) + return h, nil +} + +// WaitForSlotEvent returns a channel which returns a slot event +// (token insertion, removal, etc.) when it occurs. +func (c *Ctx) WaitForSlotEvent(flags uint) chan SlotEvent { + sl := make(chan SlotEvent, 1) // hold one element + go c.waitForSlotEventHelper(flags, sl) + return sl +} + +func (c *Ctx) waitForSlotEventHelper(f uint, sl chan SlotEvent) { + var slotID C.CK_ULONG + C.WaitForSlotEvent(c.ctx, C.CK_FLAGS(f), &slotID) + sl <- SlotEvent{uint(slotID)} + close(sl) // TODO(miek): Sending and then closing ...? +} diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.h b/vendor/github.com/miekg/pkcs11/pkcs11.h new file mode 100644 index 000000000..0d78dd711 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/pkcs11.h @@ -0,0 +1,265 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +#ifndef _PKCS11_H_ +#define _PKCS11_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* Before including this file (pkcs11.h) (or pkcs11t.h by + * itself), 5 platform-specific macros must be defined. These + * macros are described below, and typical definitions for them + * are also given. Be advised that these definitions can depend + * on both the platform and the compiler used (and possibly also + * on whether a Cryptoki library is linked statically or + * dynamically). + * + * In addition to defining these 5 macros, the packing convention + * for Cryptoki structures should be set. The Cryptoki + * convention on packing is that structures should be 1-byte + * aligned. + * + * If you're using Microsoft Developer Studio 5.0 to produce + * Win32 stuff, this might be done by using the following + * preprocessor directive before including pkcs11.h or pkcs11t.h: + * + * #pragma pack(push, cryptoki, 1) + * + * and using the following preprocessor directive after including + * pkcs11.h or pkcs11t.h: + * + * #pragma pack(pop, cryptoki) + * + * If you're using an earlier version of Microsoft Developer + * Studio to produce Win16 stuff, this might be done by using + * the following preprocessor directive before including + * pkcs11.h or pkcs11t.h: + * + * #pragma pack(1) + * + * In a UNIX environment, you're on your own for this. You might + * not need to do (or be able to do!) anything. + * + * + * Now for the macros: + * + * + * 1. CK_PTR: The indirection string for making a pointer to an + * object. It can be used like this: + * + * typedef CK_BYTE CK_PTR CK_BYTE_PTR; + * + * If you're using Microsoft Developer Studio 5.0 to produce + * Win32 stuff, it might be defined by: + * + * #define CK_PTR * + * + * If you're using an earlier version of Microsoft Developer + * Studio to produce Win16 stuff, it might be defined by: + * + * #define CK_PTR far * + * + * In a typical UNIX environment, it might be defined by: + * + * #define CK_PTR * + * + * + * 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes + * an importable Cryptoki library function declaration out of a + * return type and a function name. It should be used in the + * following fashion: + * + * extern CK_DECLARE_FUNCTION(CK_RV, C_Initialize)( + * CK_VOID_PTR pReserved + * ); + * + * If you're using Microsoft Developer Studio 5.0 to declare a + * function in a Win32 Cryptoki .dll, it might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType __declspec(dllimport) name + * + * If you're using an earlier version of Microsoft Developer + * Studio to declare a function in a Win16 Cryptoki .dll, it + * might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType __export _far _pascal name + * + * In a UNIX environment, it might be defined by: + * + * #define CK_DECLARE_FUNCTION(returnType, name) \ + * returnType name + * + * + * 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro + * which makes a Cryptoki API function pointer declaration or + * function pointer type declaration out of a return type and a + * function name. It should be used in the following fashion: + * + * // Define funcPtr to be a pointer to a Cryptoki API function + * // taking arguments args and returning CK_RV. + * CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args); + * + * or + * + * // Define funcPtrType to be the type of a pointer to a + * // Cryptoki API function taking arguments args and returning + * // CK_RV, and then define funcPtr to be a variable of type + * // funcPtrType. + * typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args); + * funcPtrType funcPtr; + * + * If you're using Microsoft Developer Studio 5.0 to access + * functions in a Win32 Cryptoki .dll, in might be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType __declspec(dllimport) (* name) + * + * If you're using an earlier version of Microsoft Developer + * Studio to access functions in a Win16 Cryptoki .dll, it might + * be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType __export _far _pascal (* name) + * + * In a UNIX environment, it might be defined by: + * + * #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \ + * returnType (* name) + * + * + * 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes + * a function pointer type for an application callback out of + * a return type for the callback and a name for the callback. + * It should be used in the following fashion: + * + * CK_CALLBACK_FUNCTION(CK_RV, myCallback)(args); + * + * to declare a function pointer, myCallback, to a callback + * which takes arguments args and returns a CK_RV. It can also + * be used like this: + * + * typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args); + * myCallbackType myCallback; + * + * If you're using Microsoft Developer Studio 5.0 to do Win32 + * Cryptoki development, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType (* name) + * + * If you're using an earlier version of Microsoft Developer + * Studio to do Win16 development, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType _far _pascal (* name) + * + * In a UNIX environment, it might be defined by: + * + * #define CK_CALLBACK_FUNCTION(returnType, name) \ + * returnType (* name) + * + * + * 5. NULL_PTR: This macro is the value of a NULL pointer. + * + * In any ANSI/ISO C environment (and in many others as well), + * this should best be defined by + * + * #ifndef NULL_PTR + * #define NULL_PTR 0 + * #endif + */ + + +/* All the various Cryptoki types and #define'd values are in the + * file pkcs11t.h. + */ +#include "pkcs11t.h" + +#define __PASTE(x,y) x##y + + +/* ============================================================== + * Define the "extern" form of all the entry points. + * ============================================================== + */ + +#define CK_NEED_ARG_LIST 1 +#define CK_PKCS11_FUNCTION_INFO(name) \ + extern CK_DECLARE_FUNCTION(CK_RV, name) + +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +#undef CK_NEED_ARG_LIST +#undef CK_PKCS11_FUNCTION_INFO + + +/* ============================================================== + * Define the typedef form of all the entry points. That is, for + * each Cryptoki function C_XXX, define a type CK_C_XXX which is + * a pointer to that kind of function. + * ============================================================== + */ + +#define CK_NEED_ARG_LIST 1 +#define CK_PKCS11_FUNCTION_INFO(name) \ + typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name)) + +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +#undef CK_NEED_ARG_LIST +#undef CK_PKCS11_FUNCTION_INFO + + +/* ============================================================== + * Define structed vector of entry points. A CK_FUNCTION_LIST + * contains a CK_VERSION indicating a library's Cryptoki version + * and then a whole slew of function pointers to the routines in + * the library. This type was declared, but not defined, in + * pkcs11t.h. + * ============================================================== + */ + +#define CK_PKCS11_FUNCTION_INFO(name) \ + __PASTE(CK_,name) name; + +struct CK_FUNCTION_LIST { + + CK_VERSION version; /* Cryptoki version */ + +/* Pile all the function pointers into the CK_FUNCTION_LIST. */ +/* pkcs11f.h has all the information about the Cryptoki + * function prototypes. + */ +#include "pkcs11f.h" + +}; + +#undef CK_PKCS11_FUNCTION_INFO + + +#undef __PASTE + +#ifdef __cplusplus +} +#endif + +#endif /* _PKCS11_H_ */ + diff --git a/vendor/github.com/miekg/pkcs11/pkcs11f.h b/vendor/github.com/miekg/pkcs11/pkcs11f.h new file mode 100644 index 000000000..ed90affc5 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/pkcs11f.h @@ -0,0 +1,939 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +/* This header file contains pretty much everything about all the + * Cryptoki function prototypes. Because this information is + * used for more than just declaring function prototypes, the + * order of the functions appearing herein is important, and + * should not be altered. + */ + +/* General-purpose */ + +/* C_Initialize initializes the Cryptoki library. */ +CK_PKCS11_FUNCTION_INFO(C_Initialize) +#ifdef CK_NEED_ARG_LIST +( + CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets + * cast to CK_C_INITIALIZE_ARGS_PTR + * and dereferenced + */ +); +#endif + + +/* C_Finalize indicates that an application is done with the + * Cryptoki library. + */ +CK_PKCS11_FUNCTION_INFO(C_Finalize) +#ifdef CK_NEED_ARG_LIST +( + CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */ +); +#endif + + +/* C_GetInfo returns general information about Cryptoki. */ +CK_PKCS11_FUNCTION_INFO(C_GetInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_INFO_PTR pInfo /* location that receives information */ +); +#endif + + +/* C_GetFunctionList returns the function list. */ +CK_PKCS11_FUNCTION_INFO(C_GetFunctionList) +#ifdef CK_NEED_ARG_LIST +( + CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to + * function list + */ +); +#endif + + + +/* Slot and token management */ + +/* C_GetSlotList obtains a list of slots in the system. */ +CK_PKCS11_FUNCTION_INFO(C_GetSlotList) +#ifdef CK_NEED_ARG_LIST +( + CK_BBOOL tokenPresent, /* only slots with tokens */ + CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */ + CK_ULONG_PTR pulCount /* receives number of slots */ +); +#endif + + +/* C_GetSlotInfo obtains information about a particular slot in + * the system. + */ +CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* the ID of the slot */ + CK_SLOT_INFO_PTR pInfo /* receives the slot information */ +); +#endif + + +/* C_GetTokenInfo obtains information about a particular token + * in the system. + */ +CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_TOKEN_INFO_PTR pInfo /* receives the token information */ +); +#endif + + +/* C_GetMechanismList obtains a list of mechanism types + * supported by a token. + */ +CK_PKCS11_FUNCTION_INFO(C_GetMechanismList) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of token's slot */ + CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */ + CK_ULONG_PTR pulCount /* gets # of mechs. */ +); +#endif + + +/* C_GetMechanismInfo obtains information about a particular + * mechanism possibly supported by a token. + */ +CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_MECHANISM_TYPE type, /* type of mechanism */ + CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */ +); +#endif + + +/* C_InitToken initializes a token. */ +CK_PKCS11_FUNCTION_INFO(C_InitToken) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* ID of the token's slot */ + CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */ + CK_ULONG ulPinLen, /* length in bytes of the PIN */ + CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */ +); +#endif + + +/* C_InitPIN initializes the normal user's PIN. */ +CK_PKCS11_FUNCTION_INFO(C_InitPIN) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */ + CK_ULONG ulPinLen /* length in bytes of the PIN */ +); +#endif + + +/* C_SetPIN modifies the PIN of the user who is logged in. */ +CK_PKCS11_FUNCTION_INFO(C_SetPIN) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_UTF8CHAR_PTR pOldPin, /* the old PIN */ + CK_ULONG ulOldLen, /* length of the old PIN */ + CK_UTF8CHAR_PTR pNewPin, /* the new PIN */ + CK_ULONG ulNewLen /* length of the new PIN */ +); +#endif + + + +/* Session management */ + +/* C_OpenSession opens a session between an application and a + * token. + */ +CK_PKCS11_FUNCTION_INFO(C_OpenSession) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID, /* the slot's ID */ + CK_FLAGS flags, /* from CK_SESSION_INFO */ + CK_VOID_PTR pApplication, /* passed to callback */ + CK_NOTIFY Notify, /* callback function */ + CK_SESSION_HANDLE_PTR phSession /* gets session handle */ +); +#endif + + +/* C_CloseSession closes a session between an application and a + * token. + */ +CK_PKCS11_FUNCTION_INFO(C_CloseSession) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_CloseAllSessions closes all sessions with a token. */ +CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions) +#ifdef CK_NEED_ARG_LIST +( + CK_SLOT_ID slotID /* the token's slot */ +); +#endif + + +/* C_GetSessionInfo obtains information about the session. */ +CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_SESSION_INFO_PTR pInfo /* receives session info */ +); +#endif + + +/* C_GetOperationState obtains the state of the cryptographic operation + * in a session. + */ +CK_PKCS11_FUNCTION_INFO(C_GetOperationState) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pOperationState, /* gets state */ + CK_ULONG_PTR pulOperationStateLen /* gets state length */ +); +#endif + + +/* C_SetOperationState restores the state of the cryptographic + * operation in a session. + */ +CK_PKCS11_FUNCTION_INFO(C_SetOperationState) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pOperationState, /* holds state */ + CK_ULONG ulOperationStateLen, /* holds state length */ + CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */ + CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */ +); +#endif + + +/* C_Login logs a user into a token. */ +CK_PKCS11_FUNCTION_INFO(C_Login) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_USER_TYPE userType, /* the user type */ + CK_UTF8CHAR_PTR pPin, /* the user's PIN */ + CK_ULONG ulPinLen /* the length of the PIN */ +); +#endif + + +/* C_Logout logs a user out from a token. */ +CK_PKCS11_FUNCTION_INFO(C_Logout) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + + +/* Object management */ + +/* C_CreateObject creates a new object. */ +CK_PKCS11_FUNCTION_INFO(C_CreateObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* the object's template */ + CK_ULONG ulCount, /* attributes in template */ + CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */ +); +#endif + + +/* C_CopyObject copies an object, creating a new object for the + * copy. + */ +CK_PKCS11_FUNCTION_INFO(C_CopyObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* template for new object */ + CK_ULONG ulCount, /* attributes in template */ + CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */ +); +#endif + + +/* C_DestroyObject destroys an object. */ +CK_PKCS11_FUNCTION_INFO(C_DestroyObject) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject /* the object's handle */ +); +#endif + + +/* C_GetObjectSize gets the size of an object in bytes. */ +CK_PKCS11_FUNCTION_INFO(C_GetObjectSize) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ULONG_PTR pulSize /* receives size of object */ +); +#endif + + +/* C_GetAttributeValue obtains the value of one or more object + * attributes. + */ +CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */ + CK_ULONG ulCount /* attributes in template */ +); +#endif + + +/* C_SetAttributeValue modifies the value of one or more object + * attributes. + */ +CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hObject, /* the object's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */ + CK_ULONG ulCount /* attributes in template */ +); +#endif + + +/* C_FindObjectsInit initializes a search for token and session + * objects that match a template. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */ + CK_ULONG ulCount /* attrs in search template */ +); +#endif + + +/* C_FindObjects continues a search for token and session + * objects that match a template, obtaining additional object + * handles. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjects) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */ + CK_ULONG ulMaxObjectCount, /* max handles to get */ + CK_ULONG_PTR pulObjectCount /* actual # returned */ +); +#endif + + +/* C_FindObjectsFinal finishes a search for token and session + * objects. + */ +CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + + +/* Encryption and decryption */ + +/* C_EncryptInit initializes an encryption operation. */ +CK_PKCS11_FUNCTION_INFO(C_EncryptInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */ + CK_OBJECT_HANDLE hKey /* handle of encryption key */ +); +#endif + + +/* C_Encrypt encrypts single-part data. */ +CK_PKCS11_FUNCTION_INFO(C_Encrypt) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pData, /* the plaintext data */ + CK_ULONG ulDataLen, /* bytes of plaintext */ + CK_BYTE_PTR pEncryptedData, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */ +); +#endif + + +/* C_EncryptUpdate continues a multiple-part encryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext data len */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */ +); +#endif + + +/* C_EncryptFinal finishes a multiple-part encryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_EncryptFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session handle */ + CK_BYTE_PTR pLastEncryptedPart, /* last c-text */ + CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */ +); +#endif + + +/* C_DecryptInit initializes a decryption operation. */ +CK_PKCS11_FUNCTION_INFO(C_DecryptInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */ + CK_OBJECT_HANDLE hKey /* handle of decryption key */ +); +#endif + + +/* C_Decrypt decrypts encrypted data in a single part. */ +CK_PKCS11_FUNCTION_INFO(C_Decrypt) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedData, /* ciphertext */ + CK_ULONG ulEncryptedDataLen, /* ciphertext length */ + CK_BYTE_PTR pData, /* gets plaintext */ + CK_ULONG_PTR pulDataLen /* gets p-text size */ +); +#endif + + +/* C_DecryptUpdate continues a multiple-part decryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* encrypted data */ + CK_ULONG ulEncryptedPartLen, /* input length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* p-text size */ +); +#endif + + +/* C_DecryptFinal finishes a multiple-part decryption + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pLastPart, /* gets plaintext */ + CK_ULONG_PTR pulLastPartLen /* p-text size */ +); +#endif + + + +/* Message digesting */ + +/* C_DigestInit initializes a message-digesting operation. */ +CK_PKCS11_FUNCTION_INFO(C_DigestInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism /* the digesting mechanism */ +); +#endif + + +/* C_Digest digests data in a single part. */ +CK_PKCS11_FUNCTION_INFO(C_Digest) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* data to be digested */ + CK_ULONG ulDataLen, /* bytes of data to digest */ + CK_BYTE_PTR pDigest, /* gets the message digest */ + CK_ULONG_PTR pulDigestLen /* gets digest length */ +); +#endif + + +/* C_DigestUpdate continues a multiple-part message-digesting + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* data to be digested */ + CK_ULONG ulPartLen /* bytes of data to be digested */ +); +#endif + + +/* C_DigestKey continues a multi-part message-digesting + * operation, by digesting the value of a secret key as part of + * the data already digested. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_OBJECT_HANDLE hKey /* secret key to digest */ +); +#endif + + +/* C_DigestFinal finishes a multiple-part message-digesting + * operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pDigest, /* gets the message digest */ + CK_ULONG_PTR pulDigestLen /* gets byte count of digest */ +); +#endif + + + +/* Signing and MACing */ + +/* C_SignInit initializes a signature (private key encryption) + * operation, where the signature is (will be) an appendix to + * the data, and plaintext cannot be recovered from the + * signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ + CK_OBJECT_HANDLE hKey /* handle of signature key */ +); +#endif + + +/* C_Sign signs (encrypts with private key) data in a single + * part, where the signature is (will be) an appendix to the + * data, and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_Sign) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* the data to sign */ + CK_ULONG ulDataLen, /* count of bytes to sign */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + +/* C_SignUpdate continues a multiple-part signature operation, + * where the signature is (will be) an appendix to the data, + * and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* the data to sign */ + CK_ULONG ulPartLen /* count of bytes to sign */ +); +#endif + + +/* C_SignFinal finishes a multiple-part signature operation, + * returning the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + +/* C_SignRecoverInit initializes a signature operation, where + * the data can be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the signature mechanism */ + CK_OBJECT_HANDLE hKey /* handle of the signature key */ +); +#endif + + +/* C_SignRecover signs data in a single operation, where the + * data can be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_SignRecover) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* the data to sign */ + CK_ULONG ulDataLen, /* count of bytes to sign */ + CK_BYTE_PTR pSignature, /* gets the signature */ + CK_ULONG_PTR pulSignatureLen /* gets signature length */ +); +#endif + + + +/* Verifying signatures and MACs */ + +/* C_VerifyInit initializes a verification operation, where the + * signature is an appendix to the data, and plaintext cannot + * cannot be recovered from the signature (e.g. DSA). + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ + CK_OBJECT_HANDLE hKey /* verification key */ +); +#endif + + +/* C_Verify verifies a signature in a single-part operation, + * where the signature is an appendix to the data, and plaintext + * cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_Verify) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pData, /* signed data */ + CK_ULONG ulDataLen, /* length of signed data */ + CK_BYTE_PTR pSignature, /* signature */ + CK_ULONG ulSignatureLen /* signature length*/ +); +#endif + + +/* C_VerifyUpdate continues a multiple-part verification + * operation, where the signature is an appendix to the data, + * and plaintext cannot be recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pPart, /* signed data */ + CK_ULONG ulPartLen /* length of signed data */ +); +#endif + + +/* C_VerifyFinal finishes a multiple-part verification + * operation, checking the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyFinal) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* signature to verify */ + CK_ULONG ulSignatureLen /* signature length */ +); +#endif + + +/* C_VerifyRecoverInit initializes a signature verification + * operation, where the data is recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the verification mechanism */ + CK_OBJECT_HANDLE hKey /* verification key */ +); +#endif + + +/* C_VerifyRecover verifies a signature in a single-part + * operation, where the data is recovered from the signature. + */ +CK_PKCS11_FUNCTION_INFO(C_VerifyRecover) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSignature, /* signature to verify */ + CK_ULONG ulSignatureLen, /* signature length */ + CK_BYTE_PTR pData, /* gets signed data */ + CK_ULONG_PTR pulDataLen /* gets signed data len */ +); +#endif + + + +/* Dual-function cryptographic operations */ + +/* C_DigestEncryptUpdate continues a multiple-part digesting + * and encryption operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext length */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +); +#endif + + +/* C_DecryptDigestUpdate continues a multiple-part decryption and + * digesting operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* ciphertext */ + CK_ULONG ulEncryptedPartLen, /* ciphertext length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* gets plaintext len */ +); +#endif + + +/* C_SignEncryptUpdate continues a multiple-part signing and + * encryption operation. + */ +CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pPart, /* the plaintext data */ + CK_ULONG ulPartLen, /* plaintext length */ + CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */ + CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */ +); +#endif + + +/* C_DecryptVerifyUpdate continues a multiple-part decryption and + * verify operation. + */ +CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_BYTE_PTR pEncryptedPart, /* ciphertext */ + CK_ULONG ulEncryptedPartLen, /* ciphertext length */ + CK_BYTE_PTR pPart, /* gets plaintext */ + CK_ULONG_PTR pulPartLen /* gets p-text length */ +); +#endif + + + +/* Key management */ + +/* C_GenerateKey generates a secret key, creating a new key + * object. + */ +CK_PKCS11_FUNCTION_INFO(C_GenerateKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* key generation mech. */ + CK_ATTRIBUTE_PTR pTemplate, /* template for new key */ + CK_ULONG ulCount, /* # of attrs in template */ + CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */ +); +#endif + + +/* C_GenerateKeyPair generates a public-key/private-key pair, + * creating new key objects. + */ +CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session handle */ + CK_MECHANISM_PTR pMechanism, /* key-gen mech. */ + CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */ + CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */ + CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */ + CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */ + CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */ + CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */ +); +#endif + + +/* C_WrapKey wraps (i.e., encrypts) a key. */ +CK_PKCS11_FUNCTION_INFO(C_WrapKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */ + CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */ + CK_OBJECT_HANDLE hKey, /* key to be wrapped */ + CK_BYTE_PTR pWrappedKey, /* gets wrapped key */ + CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */ +); +#endif + + +/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new + * key object. + */ +CK_PKCS11_FUNCTION_INFO(C_UnwrapKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */ + CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */ + CK_BYTE_PTR pWrappedKey, /* the wrapped key */ + CK_ULONG ulWrappedKeyLen, /* wrapped key len */ + CK_ATTRIBUTE_PTR pTemplate, /* new key template */ + CK_ULONG ulAttributeCount, /* template length */ + CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +); +#endif + + +/* C_DeriveKey derives a key from a base key, creating a new key + * object. + */ +CK_PKCS11_FUNCTION_INFO(C_DeriveKey) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* session's handle */ + CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */ + CK_OBJECT_HANDLE hBaseKey, /* base key */ + CK_ATTRIBUTE_PTR pTemplate, /* new key template */ + CK_ULONG ulAttributeCount, /* template length */ + CK_OBJECT_HANDLE_PTR phKey /* gets new handle */ +); +#endif + + + +/* Random number generation */ + +/* C_SeedRandom mixes additional seed material into the token's + * random number generator. + */ +CK_PKCS11_FUNCTION_INFO(C_SeedRandom) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR pSeed, /* the seed material */ + CK_ULONG ulSeedLen /* length of seed material */ +); +#endif + + +/* C_GenerateRandom generates random data. */ +CK_PKCS11_FUNCTION_INFO(C_GenerateRandom) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_BYTE_PTR RandomData, /* receives the random data */ + CK_ULONG ulRandomLen /* # of bytes to generate */ +); +#endif + + + +/* Parallel function management */ + +/* C_GetFunctionStatus is a legacy function; it obtains an + * updated status of a function running in parallel with an + * application. + */ +CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_CancelFunction is a legacy function; it cancels a function + * running in parallel. + */ +CK_PKCS11_FUNCTION_INFO(C_CancelFunction) +#ifdef CK_NEED_ARG_LIST +( + CK_SESSION_HANDLE hSession /* the session's handle */ +); +#endif + + +/* C_WaitForSlotEvent waits for a slot event (token insertion, + * removal, etc.) to occur. + */ +CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent) +#ifdef CK_NEED_ARG_LIST +( + CK_FLAGS flags, /* blocking/nonblocking flag */ + CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */ + CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */ +); +#endif + diff --git a/vendor/github.com/miekg/pkcs11/pkcs11go.h b/vendor/github.com/miekg/pkcs11/pkcs11go.h new file mode 100644 index 000000000..1b98bad21 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/pkcs11go.h @@ -0,0 +1,33 @@ +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// + +#define CK_PTR * +#ifndef NULL_PTR +#define NULL_PTR 0 +#endif +#define CK_DEFINE_FUNCTION(returnType, name) returnType name +#define CK_DECLARE_FUNCTION(returnType, name) returnType name +#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name) +#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name) + +#include <unistd.h> +#ifdef PACKED_STRUCTURES +# pragma pack(push, 1) +# include "pkcs11.h" +# pragma pack(pop) +#else +# include "pkcs11.h" +#endif + +// Copy of CK_INFO but with default alignment (not packed). Go hides unaligned +// struct fields so copying to an aligned struct is necessary to read CK_INFO +// from Go on Windows where packing is required. +typedef struct ckInfo { + CK_VERSION cryptokiVersion; + CK_UTF8CHAR manufacturerID[32]; + CK_FLAGS flags; + CK_UTF8CHAR libraryDescription[32]; + CK_VERSION libraryVersion; +} ckInfo, *ckInfoPtr; diff --git a/vendor/github.com/miekg/pkcs11/pkcs11t.h b/vendor/github.com/miekg/pkcs11/pkcs11t.h new file mode 100644 index 000000000..321c3075a --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/pkcs11t.h @@ -0,0 +1,2047 @@ +/* Copyright (c) OASIS Open 2016. All Rights Reserved./ + * /Distributed under the terms of the OASIS IPR Policy, + * [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY + * IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A + * PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others. + */ + +/* Latest version of the specification: + * http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html + */ + +/* See top of pkcs11.h for information about the macros that + * must be defined and the structure-packing conventions that + * must be set before including this file. + */ + +#ifndef _PKCS11T_H_ +#define _PKCS11T_H_ 1 + +#define CRYPTOKI_VERSION_MAJOR 2 +#define CRYPTOKI_VERSION_MINOR 40 +#define CRYPTOKI_VERSION_AMENDMENT 0 + +#define CK_TRUE 1 +#define CK_FALSE 0 + +#ifndef CK_DISABLE_TRUE_FALSE +#ifndef FALSE +#define FALSE CK_FALSE +#endif +#ifndef TRUE +#define TRUE CK_TRUE +#endif +#endif + +/* an unsigned 8-bit value */ +typedef unsigned char CK_BYTE; + +/* an unsigned 8-bit character */ +typedef CK_BYTE CK_CHAR; + +/* an 8-bit UTF-8 character */ +typedef CK_BYTE CK_UTF8CHAR; + +/* a BYTE-sized Boolean flag */ +typedef CK_BYTE CK_BBOOL; + +/* an unsigned value, at least 32 bits long */ +typedef unsigned long int CK_ULONG; + +/* a signed value, the same size as a CK_ULONG */ +typedef long int CK_LONG; + +/* at least 32 bits; each bit is a Boolean flag */ +typedef CK_ULONG CK_FLAGS; + + +/* some special values for certain CK_ULONG variables */ +#define CK_UNAVAILABLE_INFORMATION (~0UL) +#define CK_EFFECTIVELY_INFINITE 0UL + + +typedef CK_BYTE CK_PTR CK_BYTE_PTR; +typedef CK_CHAR CK_PTR CK_CHAR_PTR; +typedef CK_UTF8CHAR CK_PTR CK_UTF8CHAR_PTR; +typedef CK_ULONG CK_PTR CK_ULONG_PTR; +typedef void CK_PTR CK_VOID_PTR; + +/* Pointer to a CK_VOID_PTR-- i.e., pointer to pointer to void */ +typedef CK_VOID_PTR CK_PTR CK_VOID_PTR_PTR; + + +/* The following value is always invalid if used as a session + * handle or object handle + */ +#define CK_INVALID_HANDLE 0UL + + +typedef struct CK_VERSION { + CK_BYTE major; /* integer portion of version number */ + CK_BYTE minor; /* 1/100ths portion of version number */ +} CK_VERSION; + +typedef CK_VERSION CK_PTR CK_VERSION_PTR; + + +typedef struct CK_INFO { + CK_VERSION cryptokiVersion; /* Cryptoki interface ver */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_FLAGS flags; /* must be zero */ + CK_UTF8CHAR libraryDescription[32]; /* blank padded */ + CK_VERSION libraryVersion; /* version of library */ +} CK_INFO; + +typedef CK_INFO CK_PTR CK_INFO_PTR; + + +/* CK_NOTIFICATION enumerates the types of notifications that + * Cryptoki provides to an application + */ +typedef CK_ULONG CK_NOTIFICATION; +#define CKN_SURRENDER 0UL +#define CKN_OTP_CHANGED 1UL + +typedef CK_ULONG CK_SLOT_ID; + +typedef CK_SLOT_ID CK_PTR CK_SLOT_ID_PTR; + + +/* CK_SLOT_INFO provides information about a slot */ +typedef struct CK_SLOT_INFO { + CK_UTF8CHAR slotDescription[64]; /* blank padded */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_FLAGS flags; + + CK_VERSION hardwareVersion; /* version of hardware */ + CK_VERSION firmwareVersion; /* version of firmware */ +} CK_SLOT_INFO; + +/* flags: bit flags that provide capabilities of the slot + * Bit Flag Mask Meaning + */ +#define CKF_TOKEN_PRESENT 0x00000001UL /* a token is there */ +#define CKF_REMOVABLE_DEVICE 0x00000002UL /* removable devices*/ +#define CKF_HW_SLOT 0x00000004UL /* hardware slot */ + +typedef CK_SLOT_INFO CK_PTR CK_SLOT_INFO_PTR; + + +/* CK_TOKEN_INFO provides information about a token */ +typedef struct CK_TOKEN_INFO { + CK_UTF8CHAR label[32]; /* blank padded */ + CK_UTF8CHAR manufacturerID[32]; /* blank padded */ + CK_UTF8CHAR model[16]; /* blank padded */ + CK_CHAR serialNumber[16]; /* blank padded */ + CK_FLAGS flags; /* see below */ + + CK_ULONG ulMaxSessionCount; /* max open sessions */ + CK_ULONG ulSessionCount; /* sess. now open */ + CK_ULONG ulMaxRwSessionCount; /* max R/W sessions */ + CK_ULONG ulRwSessionCount; /* R/W sess. now open */ + CK_ULONG ulMaxPinLen; /* in bytes */ + CK_ULONG ulMinPinLen; /* in bytes */ + CK_ULONG ulTotalPublicMemory; /* in bytes */ + CK_ULONG ulFreePublicMemory; /* in bytes */ + CK_ULONG ulTotalPrivateMemory; /* in bytes */ + CK_ULONG ulFreePrivateMemory; /* in bytes */ + CK_VERSION hardwareVersion; /* version of hardware */ + CK_VERSION firmwareVersion; /* version of firmware */ + CK_CHAR utcTime[16]; /* time */ +} CK_TOKEN_INFO; + +/* The flags parameter is defined as follows: + * Bit Flag Mask Meaning + */ +#define CKF_RNG 0x00000001UL /* has random # generator */ +#define CKF_WRITE_PROTECTED 0x00000002UL /* token is write-protected */ +#define CKF_LOGIN_REQUIRED 0x00000004UL /* user must login */ +#define CKF_USER_PIN_INITIALIZED 0x00000008UL /* normal user's PIN is set */ + +/* CKF_RESTORE_KEY_NOT_NEEDED. If it is set, + * that means that *every* time the state of cryptographic + * operations of a session is successfully saved, all keys + * needed to continue those operations are stored in the state + */ +#define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL + +/* CKF_CLOCK_ON_TOKEN. If it is set, that means + * that the token has some sort of clock. The time on that + * clock is returned in the token info structure + */ +#define CKF_CLOCK_ON_TOKEN 0x00000040UL + +/* CKF_PROTECTED_AUTHENTICATION_PATH. If it is + * set, that means that there is some way for the user to login + * without sending a PIN through the Cryptoki library itself + */ +#define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL + +/* CKF_DUAL_CRYPTO_OPERATIONS. If it is true, + * that means that a single session with the token can perform + * dual simultaneous cryptographic operations (digest and + * encrypt; decrypt and digest; sign and encrypt; and decrypt + * and sign) + */ +#define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL + +/* CKF_TOKEN_INITIALIZED. If it is true, the + * token has been initialized using C_InitializeToken or an + * equivalent mechanism outside the scope of PKCS #11. + * Calling C_InitializeToken when this flag is set will cause + * the token to be reinitialized. + */ +#define CKF_TOKEN_INITIALIZED 0x00000400UL + +/* CKF_SECONDARY_AUTHENTICATION. If it is + * true, the token supports secondary authentication for + * private key objects. + */ +#define CKF_SECONDARY_AUTHENTICATION 0x00000800UL + +/* CKF_USER_PIN_COUNT_LOW. If it is true, an + * incorrect user login PIN has been entered at least once + * since the last successful authentication. + */ +#define CKF_USER_PIN_COUNT_LOW 0x00010000UL + +/* CKF_USER_PIN_FINAL_TRY. If it is true, + * supplying an incorrect user PIN will it to become locked. + */ +#define CKF_USER_PIN_FINAL_TRY 0x00020000UL + +/* CKF_USER_PIN_LOCKED. If it is true, the + * user PIN has been locked. User login to the token is not + * possible. + */ +#define CKF_USER_PIN_LOCKED 0x00040000UL + +/* CKF_USER_PIN_TO_BE_CHANGED. If it is true, + * the user PIN value is the default value set by token + * initialization or manufacturing, or the PIN has been + * expired by the card. + */ +#define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL + +/* CKF_SO_PIN_COUNT_LOW. If it is true, an + * incorrect SO login PIN has been entered at least once since + * the last successful authentication. + */ +#define CKF_SO_PIN_COUNT_LOW 0x00100000UL + +/* CKF_SO_PIN_FINAL_TRY. If it is true, + * supplying an incorrect SO PIN will it to become locked. + */ +#define CKF_SO_PIN_FINAL_TRY 0x00200000UL + +/* CKF_SO_PIN_LOCKED. If it is true, the SO + * PIN has been locked. SO login to the token is not possible. + */ +#define CKF_SO_PIN_LOCKED 0x00400000UL + +/* CKF_SO_PIN_TO_BE_CHANGED. If it is true, + * the SO PIN value is the default value set by token + * initialization or manufacturing, or the PIN has been + * expired by the card. + */ +#define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL + +#define CKF_ERROR_STATE 0x01000000UL + +typedef CK_TOKEN_INFO CK_PTR CK_TOKEN_INFO_PTR; + + +/* CK_SESSION_HANDLE is a Cryptoki-assigned value that + * identifies a session + */ +typedef CK_ULONG CK_SESSION_HANDLE; + +typedef CK_SESSION_HANDLE CK_PTR CK_SESSION_HANDLE_PTR; + + +/* CK_USER_TYPE enumerates the types of Cryptoki users */ +typedef CK_ULONG CK_USER_TYPE; +/* Security Officer */ +#define CKU_SO 0UL +/* Normal user */ +#define CKU_USER 1UL +/* Context specific */ +#define CKU_CONTEXT_SPECIFIC 2UL + +/* CK_STATE enumerates the session states */ +typedef CK_ULONG CK_STATE; +#define CKS_RO_PUBLIC_SESSION 0UL +#define CKS_RO_USER_FUNCTIONS 1UL +#define CKS_RW_PUBLIC_SESSION 2UL +#define CKS_RW_USER_FUNCTIONS 3UL +#define CKS_RW_SO_FUNCTIONS 4UL + +/* CK_SESSION_INFO provides information about a session */ +typedef struct CK_SESSION_INFO { + CK_SLOT_ID slotID; + CK_STATE state; + CK_FLAGS flags; /* see below */ + CK_ULONG ulDeviceError; /* device-dependent error code */ +} CK_SESSION_INFO; + +/* The flags are defined in the following table: + * Bit Flag Mask Meaning + */ +#define CKF_RW_SESSION 0x00000002UL /* session is r/w */ +#define CKF_SERIAL_SESSION 0x00000004UL /* no parallel */ + +typedef CK_SESSION_INFO CK_PTR CK_SESSION_INFO_PTR; + + +/* CK_OBJECT_HANDLE is a token-specific identifier for an + * object + */ +typedef CK_ULONG CK_OBJECT_HANDLE; + +typedef CK_OBJECT_HANDLE CK_PTR CK_OBJECT_HANDLE_PTR; + + +/* CK_OBJECT_CLASS is a value that identifies the classes (or + * types) of objects that Cryptoki recognizes. It is defined + * as follows: + */ +typedef CK_ULONG CK_OBJECT_CLASS; + +/* The following classes of objects are defined: */ +#define CKO_DATA 0x00000000UL +#define CKO_CERTIFICATE 0x00000001UL +#define CKO_PUBLIC_KEY 0x00000002UL +#define CKO_PRIVATE_KEY 0x00000003UL +#define CKO_SECRET_KEY 0x00000004UL +#define CKO_HW_FEATURE 0x00000005UL +#define CKO_DOMAIN_PARAMETERS 0x00000006UL +#define CKO_MECHANISM 0x00000007UL +#define CKO_OTP_KEY 0x00000008UL + +#define CKO_VENDOR_DEFINED 0x80000000UL + +typedef CK_OBJECT_CLASS CK_PTR CK_OBJECT_CLASS_PTR; + +/* CK_HW_FEATURE_TYPE is a value that identifies the hardware feature type + * of an object with CK_OBJECT_CLASS equal to CKO_HW_FEATURE. + */ +typedef CK_ULONG CK_HW_FEATURE_TYPE; + +/* The following hardware feature types are defined */ +#define CKH_MONOTONIC_COUNTER 0x00000001UL +#define CKH_CLOCK 0x00000002UL +#define CKH_USER_INTERFACE 0x00000003UL +#define CKH_VENDOR_DEFINED 0x80000000UL + +/* CK_KEY_TYPE is a value that identifies a key type */ +typedef CK_ULONG CK_KEY_TYPE; + +/* the following key types are defined: */ +#define CKK_RSA 0x00000000UL +#define CKK_DSA 0x00000001UL +#define CKK_DH 0x00000002UL +#define CKK_ECDSA 0x00000003UL /* Deprecated */ +#define CKK_EC 0x00000003UL +#define CKK_X9_42_DH 0x00000004UL +#define CKK_KEA 0x00000005UL +#define CKK_GENERIC_SECRET 0x00000010UL +#define CKK_RC2 0x00000011UL +#define CKK_RC4 0x00000012UL +#define CKK_DES 0x00000013UL +#define CKK_DES2 0x00000014UL +#define CKK_DES3 0x00000015UL +#define CKK_CAST 0x00000016UL +#define CKK_CAST3 0x00000017UL +#define CKK_CAST5 0x00000018UL /* Deprecated */ +#define CKK_CAST128 0x00000018UL +#define CKK_RC5 0x00000019UL +#define CKK_IDEA 0x0000001AUL +#define CKK_SKIPJACK 0x0000001BUL +#define CKK_BATON 0x0000001CUL +#define CKK_JUNIPER 0x0000001DUL +#define CKK_CDMF 0x0000001EUL +#define CKK_AES 0x0000001FUL +#define CKK_BLOWFISH 0x00000020UL +#define CKK_TWOFISH 0x00000021UL +#define CKK_SECURID 0x00000022UL +#define CKK_HOTP 0x00000023UL +#define CKK_ACTI 0x00000024UL +#define CKK_CAMELLIA 0x00000025UL +#define CKK_ARIA 0x00000026UL + +#define CKK_MD5_HMAC 0x00000027UL +#define CKK_SHA_1_HMAC 0x00000028UL +#define CKK_RIPEMD128_HMAC 0x00000029UL +#define CKK_RIPEMD160_HMAC 0x0000002AUL +#define CKK_SHA256_HMAC 0x0000002BUL +#define CKK_SHA384_HMAC 0x0000002CUL +#define CKK_SHA512_HMAC 0x0000002DUL +#define CKK_SHA224_HMAC 0x0000002EUL + +#define CKK_SEED 0x0000002FUL +#define CKK_GOSTR3410 0x00000030UL +#define CKK_GOSTR3411 0x00000031UL +#define CKK_GOST28147 0x00000032UL + +#define CKK_SHA3_224_HMAC 0x00000033UL +#define CKK_SHA3_256_HMAC 0x00000034UL +#define CKK_SHA3_384_HMAC 0x00000035UL +#define CKK_SHA3_512_HMAC 0x00000036UL + + + +#define CKK_VENDOR_DEFINED 0x80000000UL + + +/* CK_CERTIFICATE_TYPE is a value that identifies a certificate + * type + */ +typedef CK_ULONG CK_CERTIFICATE_TYPE; + +#define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL +#define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL +#define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL +#define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL + +#define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL +#define CK_SECURITY_DOMAIN_MANUFACTURER 1UL +#define CK_SECURITY_DOMAIN_OPERATOR 2UL +#define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL + + +/* The following certificate types are defined: */ +#define CKC_X_509 0x00000000UL +#define CKC_X_509_ATTR_CERT 0x00000001UL +#define CKC_WTLS 0x00000002UL +#define CKC_VENDOR_DEFINED 0x80000000UL + + +/* CK_ATTRIBUTE_TYPE is a value that identifies an attribute + * type + */ +typedef CK_ULONG CK_ATTRIBUTE_TYPE; + +/* The CKF_ARRAY_ATTRIBUTE flag identifies an attribute which + * consists of an array of values. + */ +#define CKF_ARRAY_ATTRIBUTE 0x40000000UL + +/* The following OTP-related defines relate to the CKA_OTP_FORMAT attribute */ +#define CK_OTP_FORMAT_DECIMAL 0UL +#define CK_OTP_FORMAT_HEXADECIMAL 1UL +#define CK_OTP_FORMAT_ALPHANUMERIC 2UL +#define CK_OTP_FORMAT_BINARY 3UL + +/* The following OTP-related defines relate to the CKA_OTP_..._REQUIREMENT + * attributes + */ +#define CK_OTP_PARAM_IGNORED 0UL +#define CK_OTP_PARAM_OPTIONAL 1UL +#define CK_OTP_PARAM_MANDATORY 2UL + +/* The following attribute types are defined: */ +#define CKA_CLASS 0x00000000UL +#define CKA_TOKEN 0x00000001UL +#define CKA_PRIVATE 0x00000002UL +#define CKA_LABEL 0x00000003UL +#define CKA_APPLICATION 0x00000010UL +#define CKA_VALUE 0x00000011UL +#define CKA_OBJECT_ID 0x00000012UL +#define CKA_CERTIFICATE_TYPE 0x00000080UL +#define CKA_ISSUER 0x00000081UL +#define CKA_SERIAL_NUMBER 0x00000082UL +#define CKA_AC_ISSUER 0x00000083UL +#define CKA_OWNER 0x00000084UL +#define CKA_ATTR_TYPES 0x00000085UL +#define CKA_TRUSTED 0x00000086UL +#define CKA_CERTIFICATE_CATEGORY 0x00000087UL +#define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL +#define CKA_URL 0x00000089UL +#define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL +#define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL +#define CKA_NAME_HASH_ALGORITHM 0x0000008CUL +#define CKA_CHECK_VALUE 0x00000090UL + +#define CKA_KEY_TYPE 0x00000100UL +#define CKA_SUBJECT 0x00000101UL +#define CKA_ID 0x00000102UL +#define CKA_SENSITIVE 0x00000103UL +#define CKA_ENCRYPT 0x00000104UL +#define CKA_DECRYPT 0x00000105UL +#define CKA_WRAP 0x00000106UL +#define CKA_UNWRAP 0x00000107UL +#define CKA_SIGN 0x00000108UL +#define CKA_SIGN_RECOVER 0x00000109UL +#define CKA_VERIFY 0x0000010AUL +#define CKA_VERIFY_RECOVER 0x0000010BUL +#define CKA_DERIVE 0x0000010CUL +#define CKA_START_DATE 0x00000110UL +#define CKA_END_DATE 0x00000111UL +#define CKA_MODULUS 0x00000120UL +#define CKA_MODULUS_BITS 0x00000121UL +#define CKA_PUBLIC_EXPONENT 0x00000122UL +#define CKA_PRIVATE_EXPONENT 0x00000123UL +#define CKA_PRIME_1 0x00000124UL +#define CKA_PRIME_2 0x00000125UL +#define CKA_EXPONENT_1 0x00000126UL +#define CKA_EXPONENT_2 0x00000127UL +#define CKA_COEFFICIENT 0x00000128UL +#define CKA_PUBLIC_KEY_INFO 0x00000129UL +#define CKA_PRIME 0x00000130UL +#define CKA_SUBPRIME 0x00000131UL +#define CKA_BASE 0x00000132UL + +#define CKA_PRIME_BITS 0x00000133UL +#define CKA_SUBPRIME_BITS 0x00000134UL +#define CKA_SUB_PRIME_BITS CKA_SUBPRIME_BITS + +#define CKA_VALUE_BITS 0x00000160UL +#define CKA_VALUE_LEN 0x00000161UL +#define CKA_EXTRACTABLE 0x00000162UL +#define CKA_LOCAL 0x00000163UL +#define CKA_NEVER_EXTRACTABLE 0x00000164UL +#define CKA_ALWAYS_SENSITIVE 0x00000165UL +#define CKA_KEY_GEN_MECHANISM 0x00000166UL + +#define CKA_MODIFIABLE 0x00000170UL +#define CKA_COPYABLE 0x00000171UL + +#define CKA_DESTROYABLE 0x00000172UL + +#define CKA_ECDSA_PARAMS 0x00000180UL /* Deprecated */ +#define CKA_EC_PARAMS 0x00000180UL + +#define CKA_EC_POINT 0x00000181UL + +#define CKA_SECONDARY_AUTH 0x00000200UL /* Deprecated */ +#define CKA_AUTH_PIN_FLAGS 0x00000201UL /* Deprecated */ + +#define CKA_ALWAYS_AUTHENTICATE 0x00000202UL + +#define CKA_WRAP_WITH_TRUSTED 0x00000210UL +#define CKA_WRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000211UL) +#define CKA_UNWRAP_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000212UL) +#define CKA_DERIVE_TEMPLATE (CKF_ARRAY_ATTRIBUTE|0x00000213UL) + +#define CKA_OTP_FORMAT 0x00000220UL +#define CKA_OTP_LENGTH 0x00000221UL +#define CKA_OTP_TIME_INTERVAL 0x00000222UL +#define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL +#define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL +#define CKA_OTP_TIME_REQUIREMENT 0x00000225UL +#define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL +#define CKA_OTP_PIN_REQUIREMENT 0x00000227UL +#define CKA_OTP_COUNTER 0x0000022EUL +#define CKA_OTP_TIME 0x0000022FUL +#define CKA_OTP_USER_IDENTIFIER 0x0000022AUL +#define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL +#define CKA_OTP_SERVICE_LOGO 0x0000022CUL +#define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL + +#define CKA_GOSTR3410_PARAMS 0x00000250UL +#define CKA_GOSTR3411_PARAMS 0x00000251UL +#define CKA_GOST28147_PARAMS 0x00000252UL + +#define CKA_HW_FEATURE_TYPE 0x00000300UL +#define CKA_RESET_ON_INIT 0x00000301UL +#define CKA_HAS_RESET 0x00000302UL + +#define CKA_PIXEL_X 0x00000400UL +#define CKA_PIXEL_Y 0x00000401UL +#define CKA_RESOLUTION 0x00000402UL +#define CKA_CHAR_ROWS 0x00000403UL +#define CKA_CHAR_COLUMNS 0x00000404UL +#define CKA_COLOR 0x00000405UL +#define CKA_BITS_PER_PIXEL 0x00000406UL +#define CKA_CHAR_SETS 0x00000480UL +#define CKA_ENCODING_METHODS 0x00000481UL +#define CKA_MIME_TYPES 0x00000482UL +#define CKA_MECHANISM_TYPE 0x00000500UL +#define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL +#define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL +#define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL +#define CKA_ALLOWED_MECHANISMS (CKF_ARRAY_ATTRIBUTE|0x00000600UL) + +#define CKA_VENDOR_DEFINED 0x80000000UL + +/* CK_ATTRIBUTE is a structure that includes the type, length + * and value of an attribute + */ +typedef struct CK_ATTRIBUTE { + CK_ATTRIBUTE_TYPE type; + CK_VOID_PTR pValue; + CK_ULONG ulValueLen; /* in bytes */ +} CK_ATTRIBUTE; + +typedef CK_ATTRIBUTE CK_PTR CK_ATTRIBUTE_PTR; + +/* CK_DATE is a structure that defines a date */ +typedef struct CK_DATE{ + CK_CHAR year[4]; /* the year ("1900" - "9999") */ + CK_CHAR month[2]; /* the month ("01" - "12") */ + CK_CHAR day[2]; /* the day ("01" - "31") */ +} CK_DATE; + + +/* CK_MECHANISM_TYPE is a value that identifies a mechanism + * type + */ +typedef CK_ULONG CK_MECHANISM_TYPE; + +/* the following mechanism types are defined: */ +#define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL +#define CKM_RSA_PKCS 0x00000001UL +#define CKM_RSA_9796 0x00000002UL +#define CKM_RSA_X_509 0x00000003UL + +#define CKM_MD2_RSA_PKCS 0x00000004UL +#define CKM_MD5_RSA_PKCS 0x00000005UL +#define CKM_SHA1_RSA_PKCS 0x00000006UL + +#define CKM_RIPEMD128_RSA_PKCS 0x00000007UL +#define CKM_RIPEMD160_RSA_PKCS 0x00000008UL +#define CKM_RSA_PKCS_OAEP 0x00000009UL + +#define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL +#define CKM_RSA_X9_31 0x0000000BUL +#define CKM_SHA1_RSA_X9_31 0x0000000CUL +#define CKM_RSA_PKCS_PSS 0x0000000DUL +#define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL + +#define CKM_DSA_KEY_PAIR_GEN 0x00000010UL +#define CKM_DSA 0x00000011UL +#define CKM_DSA_SHA1 0x00000012UL +#define CKM_DSA_SHA224 0x00000013UL +#define CKM_DSA_SHA256 0x00000014UL +#define CKM_DSA_SHA384 0x00000015UL +#define CKM_DSA_SHA512 0x00000016UL +#define CKM_DSA_SHA3_224 0x00000018UL +#define CKM_DSA_SHA3_256 0x00000019UL +#define CKM_DSA_SHA3_384 0x0000001AUL +#define CKM_DSA_SHA3_512 0x0000001BUL + +#define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL +#define CKM_DH_PKCS_DERIVE 0x00000021UL + +#define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL +#define CKM_X9_42_DH_DERIVE 0x00000031UL +#define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL +#define CKM_X9_42_MQV_DERIVE 0x00000033UL + +#define CKM_SHA256_RSA_PKCS 0x00000040UL +#define CKM_SHA384_RSA_PKCS 0x00000041UL +#define CKM_SHA512_RSA_PKCS 0x00000042UL +#define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL +#define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL +#define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL + +#define CKM_SHA224_RSA_PKCS 0x00000046UL +#define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL + +#define CKM_SHA512_224 0x00000048UL +#define CKM_SHA512_224_HMAC 0x00000049UL +#define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL +#define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL +#define CKM_SHA512_256 0x0000004CUL +#define CKM_SHA512_256_HMAC 0x0000004DUL +#define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL +#define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL + +#define CKM_SHA512_T 0x00000050UL +#define CKM_SHA512_T_HMAC 0x00000051UL +#define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL +#define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL + +#define CKM_SHA3_256_RSA_PKCS 0x00000060UL +#define CKM_SHA3_384_RSA_PKCS 0x00000061UL +#define CKM_SHA3_512_RSA_PKCS 0x00000062UL +#define CKM_SHA3_256_RSA_PKCS_PSS 0x00000063UL +#define CKM_SHA3_384_RSA_PKCS_PSS 0x00000064UL +#define CKM_SHA3_512_RSA_PKCS_PSS 0x00000065UL +#define CKM_SHA3_224_RSA_PKCS 0x00000066UL +#define CKM_SHA3_224_RSA_PKCS_PSS 0x00000067UL + +#define CKM_RC2_KEY_GEN 0x00000100UL +#define CKM_RC2_ECB 0x00000101UL +#define CKM_RC2_CBC 0x00000102UL +#define CKM_RC2_MAC 0x00000103UL + +#define CKM_RC2_MAC_GENERAL 0x00000104UL +#define CKM_RC2_CBC_PAD 0x00000105UL + +#define CKM_RC4_KEY_GEN 0x00000110UL +#define CKM_RC4 0x00000111UL +#define CKM_DES_KEY_GEN 0x00000120UL +#define CKM_DES_ECB 0x00000121UL +#define CKM_DES_CBC 0x00000122UL +#define CKM_DES_MAC 0x00000123UL + +#define CKM_DES_MAC_GENERAL 0x00000124UL +#define CKM_DES_CBC_PAD 0x00000125UL + +#define CKM_DES2_KEY_GEN 0x00000130UL +#define CKM_DES3_KEY_GEN 0x00000131UL +#define CKM_DES3_ECB 0x00000132UL +#define CKM_DES3_CBC 0x00000133UL +#define CKM_DES3_MAC 0x00000134UL + +#define CKM_DES3_MAC_GENERAL 0x00000135UL +#define CKM_DES3_CBC_PAD 0x00000136UL +#define CKM_DES3_CMAC_GENERAL 0x00000137UL +#define CKM_DES3_CMAC 0x00000138UL +#define CKM_CDMF_KEY_GEN 0x00000140UL +#define CKM_CDMF_ECB 0x00000141UL +#define CKM_CDMF_CBC 0x00000142UL +#define CKM_CDMF_MAC 0x00000143UL +#define CKM_CDMF_MAC_GENERAL 0x00000144UL +#define CKM_CDMF_CBC_PAD 0x00000145UL + +#define CKM_DES_OFB64 0x00000150UL +#define CKM_DES_OFB8 0x00000151UL +#define CKM_DES_CFB64 0x00000152UL +#define CKM_DES_CFB8 0x00000153UL + +#define CKM_MD2 0x00000200UL + +#define CKM_MD2_HMAC 0x00000201UL +#define CKM_MD2_HMAC_GENERAL 0x00000202UL + +#define CKM_MD5 0x00000210UL + +#define CKM_MD5_HMAC 0x00000211UL +#define CKM_MD5_HMAC_GENERAL 0x00000212UL + +#define CKM_SHA_1 0x00000220UL + +#define CKM_SHA_1_HMAC 0x00000221UL +#define CKM_SHA_1_HMAC_GENERAL 0x00000222UL + +#define CKM_RIPEMD128 0x00000230UL +#define CKM_RIPEMD128_HMAC 0x00000231UL +#define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL +#define CKM_RIPEMD160 0x00000240UL +#define CKM_RIPEMD160_HMAC 0x00000241UL +#define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL + +#define CKM_SHA256 0x00000250UL +#define CKM_SHA256_HMAC 0x00000251UL +#define CKM_SHA256_HMAC_GENERAL 0x00000252UL +#define CKM_SHA224 0x00000255UL +#define CKM_SHA224_HMAC 0x00000256UL +#define CKM_SHA224_HMAC_GENERAL 0x00000257UL +#define CKM_SHA384 0x00000260UL +#define CKM_SHA384_HMAC 0x00000261UL +#define CKM_SHA384_HMAC_GENERAL 0x00000262UL +#define CKM_SHA512 0x00000270UL +#define CKM_SHA512_HMAC 0x00000271UL +#define CKM_SHA512_HMAC_GENERAL 0x00000272UL +#define CKM_SECURID_KEY_GEN 0x00000280UL +#define CKM_SECURID 0x00000282UL +#define CKM_HOTP_KEY_GEN 0x00000290UL +#define CKM_HOTP 0x00000291UL +#define CKM_ACTI 0x000002A0UL +#define CKM_ACTI_KEY_GEN 0x000002A1UL + +#define CKM_SHA3_256 0x000002B0UL +#define CKM_SHA3_256_HMAC 0x000002B1UL +#define CKM_SHA3_256_HMAC_GENERAL 0x000002B2UL +#define CKM_SHA3_256_KEY_GEN 0x000002B3UL +#define CKM_SHA3_224 0x000002B5UL +#define CKM_SHA3_224_HMAC 0x000002B6UL +#define CKM_SHA3_224_HMAC_GENERAL 0x000002B7UL +#define CKM_SHA3_224_KEY_GEN 0x000002B8UL +#define CKM_SHA3_384 0x000002C0UL +#define CKM_SHA3_384_HMAC 0x000002C1UL +#define CKM_SHA3_384_HMAC_GENERAL 0x000002C2UL +#define CKM_SHA3_384_KEY_GEN 0x000002C3UL +#define CKM_SHA3_512 0x000002D0UL +#define CKM_SHA3_512_HMAC 0x000002D1UL +#define CKM_SHA3_512_HMAC_GENERAL 0x000002D2UL +#define CKM_SHA3_512_KEY_GEN 0x000002D3UL + +#define CKM_CAST_KEY_GEN 0x00000300UL +#define CKM_CAST_ECB 0x00000301UL +#define CKM_CAST_CBC 0x00000302UL +#define CKM_CAST_MAC 0x00000303UL +#define CKM_CAST_MAC_GENERAL 0x00000304UL +#define CKM_CAST_CBC_PAD 0x00000305UL +#define CKM_CAST3_KEY_GEN 0x00000310UL +#define CKM_CAST3_ECB 0x00000311UL +#define CKM_CAST3_CBC 0x00000312UL +#define CKM_CAST3_MAC 0x00000313UL +#define CKM_CAST3_MAC_GENERAL 0x00000314UL +#define CKM_CAST3_CBC_PAD 0x00000315UL +/* Note that CAST128 and CAST5 are the same algorithm */ +#define CKM_CAST5_KEY_GEN 0x00000320UL +#define CKM_CAST128_KEY_GEN 0x00000320UL +#define CKM_CAST5_ECB 0x00000321UL +#define CKM_CAST128_ECB 0x00000321UL +#define CKM_CAST5_CBC 0x00000322UL /* Deprecated */ +#define CKM_CAST128_CBC 0x00000322UL +#define CKM_CAST5_MAC 0x00000323UL /* Deprecated */ +#define CKM_CAST128_MAC 0x00000323UL +#define CKM_CAST5_MAC_GENERAL 0x00000324UL /* Deprecated */ +#define CKM_CAST128_MAC_GENERAL 0x00000324UL +#define CKM_CAST5_CBC_PAD 0x00000325UL /* Deprecated */ +#define CKM_CAST128_CBC_PAD 0x00000325UL +#define CKM_RC5_KEY_GEN 0x00000330UL +#define CKM_RC5_ECB 0x00000331UL +#define CKM_RC5_CBC 0x00000332UL +#define CKM_RC5_MAC 0x00000333UL +#define CKM_RC5_MAC_GENERAL 0x00000334UL +#define CKM_RC5_CBC_PAD 0x00000335UL +#define CKM_IDEA_KEY_GEN 0x00000340UL +#define CKM_IDEA_ECB 0x00000341UL +#define CKM_IDEA_CBC 0x00000342UL +#define CKM_IDEA_MAC 0x00000343UL +#define CKM_IDEA_MAC_GENERAL 0x00000344UL +#define CKM_IDEA_CBC_PAD 0x00000345UL +#define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL +#define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL +#define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL +#define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL +#define CKM_XOR_BASE_AND_DATA 0x00000364UL +#define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL +#define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL +#define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL +#define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL + +#define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL +#define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL +#define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL +#define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL +#define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL + +#define CKM_TLS_PRF 0x00000378UL + +#define CKM_SSL3_MD5_MAC 0x00000380UL +#define CKM_SSL3_SHA1_MAC 0x00000381UL +#define CKM_MD5_KEY_DERIVATION 0x00000390UL +#define CKM_MD2_KEY_DERIVATION 0x00000391UL +#define CKM_SHA1_KEY_DERIVATION 0x00000392UL + +#define CKM_SHA256_KEY_DERIVATION 0x00000393UL +#define CKM_SHA384_KEY_DERIVATION 0x00000394UL +#define CKM_SHA512_KEY_DERIVATION 0x00000395UL +#define CKM_SHA224_KEY_DERIVATION 0x00000396UL +#define CKM_SHA3_256_KEY_DERIVE 0x00000397UL +#define CKM_SHA3_224_KEY_DERIVE 0x00000398UL +#define CKM_SHA3_384_KEY_DERIVE 0x00000399UL +#define CKM_SHA3_512_KEY_DERIVE 0x0000039AUL +#define CKM_SHAKE_128_KEY_DERIVE 0x0000039BUL +#define CKM_SHAKE_256_KEY_DERIVE 0x0000039CUL + +#define CKM_PBE_MD2_DES_CBC 0x000003A0UL +#define CKM_PBE_MD5_DES_CBC 0x000003A1UL +#define CKM_PBE_MD5_CAST_CBC 0x000003A2UL +#define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL +#define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL /* Deprecated */ +#define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL +#define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL /* Deprecated */ +#define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL +#define CKM_PBE_SHA1_RC4_128 0x000003A6UL +#define CKM_PBE_SHA1_RC4_40 0x000003A7UL +#define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL +#define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL +#define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL +#define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL + +#define CKM_PKCS5_PBKD2 0x000003B0UL + +#define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL + +#define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL +#define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL +#define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL +#define CKM_WTLS_PRF 0x000003D3UL +#define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL +#define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL + +#define CKM_TLS10_MAC_SERVER 0x000003D6UL +#define CKM_TLS10_MAC_CLIENT 0x000003D7UL +#define CKM_TLS12_MAC 0x000003D8UL +#define CKM_TLS12_KDF 0x000003D9UL +#define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL +#define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL +#define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL +#define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL +#define CKM_TLS_MAC 0x000003E4UL +#define CKM_TLS_KDF 0x000003E5UL + +#define CKM_KEY_WRAP_LYNKS 0x00000400UL +#define CKM_KEY_WRAP_SET_OAEP 0x00000401UL + +#define CKM_CMS_SIG 0x00000500UL +#define CKM_KIP_DERIVE 0x00000510UL +#define CKM_KIP_WRAP 0x00000511UL +#define CKM_KIP_MAC 0x00000512UL + +#define CKM_CAMELLIA_KEY_GEN 0x00000550UL +#define CKM_CAMELLIA_ECB 0x00000551UL +#define CKM_CAMELLIA_CBC 0x00000552UL +#define CKM_CAMELLIA_MAC 0x00000553UL +#define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL +#define CKM_CAMELLIA_CBC_PAD 0x00000555UL +#define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL +#define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL +#define CKM_CAMELLIA_CTR 0x00000558UL + +#define CKM_ARIA_KEY_GEN 0x00000560UL +#define CKM_ARIA_ECB 0x00000561UL +#define CKM_ARIA_CBC 0x00000562UL +#define CKM_ARIA_MAC 0x00000563UL +#define CKM_ARIA_MAC_GENERAL 0x00000564UL +#define CKM_ARIA_CBC_PAD 0x00000565UL +#define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL +#define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL + +#define CKM_SEED_KEY_GEN 0x00000650UL +#define CKM_SEED_ECB 0x00000651UL +#define CKM_SEED_CBC 0x00000652UL +#define CKM_SEED_MAC 0x00000653UL +#define CKM_SEED_MAC_GENERAL 0x00000654UL +#define CKM_SEED_CBC_PAD 0x00000655UL +#define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL +#define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL + +#define CKM_SKIPJACK_KEY_GEN 0x00001000UL +#define CKM_SKIPJACK_ECB64 0x00001001UL +#define CKM_SKIPJACK_CBC64 0x00001002UL +#define CKM_SKIPJACK_OFB64 0x00001003UL +#define CKM_SKIPJACK_CFB64 0x00001004UL +#define CKM_SKIPJACK_CFB32 0x00001005UL +#define CKM_SKIPJACK_CFB16 0x00001006UL +#define CKM_SKIPJACK_CFB8 0x00001007UL +#define CKM_SKIPJACK_WRAP 0x00001008UL +#define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL +#define CKM_SKIPJACK_RELAYX 0x0000100aUL +#define CKM_KEA_KEY_PAIR_GEN 0x00001010UL +#define CKM_KEA_KEY_DERIVE 0x00001011UL +#define CKM_KEA_DERIVE 0x00001012UL +#define CKM_FORTEZZA_TIMESTAMP 0x00001020UL +#define CKM_BATON_KEY_GEN 0x00001030UL +#define CKM_BATON_ECB128 0x00001031UL +#define CKM_BATON_ECB96 0x00001032UL +#define CKM_BATON_CBC128 0x00001033UL +#define CKM_BATON_COUNTER 0x00001034UL +#define CKM_BATON_SHUFFLE 0x00001035UL +#define CKM_BATON_WRAP 0x00001036UL + +#define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL /* Deprecated */ +#define CKM_EC_KEY_PAIR_GEN 0x00001040UL + +#define CKM_ECDSA 0x00001041UL +#define CKM_ECDSA_SHA1 0x00001042UL +#define CKM_ECDSA_SHA224 0x00001043UL +#define CKM_ECDSA_SHA256 0x00001044UL +#define CKM_ECDSA_SHA384 0x00001045UL +#define CKM_ECDSA_SHA512 0x00001046UL + +#define CKM_ECDH1_DERIVE 0x00001050UL +#define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL +#define CKM_ECMQV_DERIVE 0x00001052UL + +#define CKM_ECDH_AES_KEY_WRAP 0x00001053UL +#define CKM_RSA_AES_KEY_WRAP 0x00001054UL + +#define CKM_JUNIPER_KEY_GEN 0x00001060UL +#define CKM_JUNIPER_ECB128 0x00001061UL +#define CKM_JUNIPER_CBC128 0x00001062UL +#define CKM_JUNIPER_COUNTER 0x00001063UL +#define CKM_JUNIPER_SHUFFLE 0x00001064UL +#define CKM_JUNIPER_WRAP 0x00001065UL +#define CKM_FASTHASH 0x00001070UL + +#define CKM_AES_KEY_GEN 0x00001080UL +#define CKM_AES_ECB 0x00001081UL +#define CKM_AES_CBC 0x00001082UL +#define CKM_AES_MAC 0x00001083UL +#define CKM_AES_MAC_GENERAL 0x00001084UL +#define CKM_AES_CBC_PAD 0x00001085UL +#define CKM_AES_CTR 0x00001086UL +#define CKM_AES_GCM 0x00001087UL +#define CKM_AES_CCM 0x00001088UL +#define CKM_AES_CTS 0x00001089UL +#define CKM_AES_CMAC 0x0000108AUL +#define CKM_AES_CMAC_GENERAL 0x0000108BUL + +#define CKM_AES_XCBC_MAC 0x0000108CUL +#define CKM_AES_XCBC_MAC_96 0x0000108DUL +#define CKM_AES_GMAC 0x0000108EUL + +#define CKM_BLOWFISH_KEY_GEN 0x00001090UL +#define CKM_BLOWFISH_CBC 0x00001091UL +#define CKM_TWOFISH_KEY_GEN 0x00001092UL +#define CKM_TWOFISH_CBC 0x00001093UL +#define CKM_BLOWFISH_CBC_PAD 0x00001094UL +#define CKM_TWOFISH_CBC_PAD 0x00001095UL + +#define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL +#define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL +#define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL +#define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL +#define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL +#define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL + +#define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL +#define CKM_GOSTR3410 0x00001201UL +#define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL +#define CKM_GOSTR3410_KEY_WRAP 0x00001203UL +#define CKM_GOSTR3410_DERIVE 0x00001204UL +#define CKM_GOSTR3411 0x00001210UL +#define CKM_GOSTR3411_HMAC 0x00001211UL +#define CKM_GOST28147_KEY_GEN 0x00001220UL +#define CKM_GOST28147_ECB 0x00001221UL +#define CKM_GOST28147 0x00001222UL +#define CKM_GOST28147_MAC 0x00001223UL +#define CKM_GOST28147_KEY_WRAP 0x00001224UL + +#define CKM_DSA_PARAMETER_GEN 0x00002000UL +#define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL +#define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL +#define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL +#define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL + +#define CKM_AES_OFB 0x00002104UL +#define CKM_AES_CFB64 0x00002105UL +#define CKM_AES_CFB8 0x00002106UL +#define CKM_AES_CFB128 0x00002107UL + +#define CKM_AES_CFB1 0x00002108UL +#define CKM_AES_KEY_WRAP 0x00002109UL /* WAS: 0x00001090 */ +#define CKM_AES_KEY_WRAP_PAD 0x0000210AUL /* WAS: 0x00001091 */ + +#define CKM_RSA_PKCS_TPM_1_1 0x00004001UL +#define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL + +#define CKM_VENDOR_DEFINED 0x80000000UL + +typedef CK_MECHANISM_TYPE CK_PTR CK_MECHANISM_TYPE_PTR; + + +/* CK_MECHANISM is a structure that specifies a particular + * mechanism + */ +typedef struct CK_MECHANISM { + CK_MECHANISM_TYPE mechanism; + CK_VOID_PTR pParameter; + CK_ULONG ulParameterLen; /* in bytes */ +} CK_MECHANISM; + +typedef CK_MECHANISM CK_PTR CK_MECHANISM_PTR; + + +/* CK_MECHANISM_INFO provides information about a particular + * mechanism + */ +typedef struct CK_MECHANISM_INFO { + CK_ULONG ulMinKeySize; + CK_ULONG ulMaxKeySize; + CK_FLAGS flags; +} CK_MECHANISM_INFO; + +/* The flags are defined as follows: + * Bit Flag Mask Meaning */ +#define CKF_HW 0x00000001UL /* performed by HW */ + +/* Specify whether or not a mechanism can be used for a particular task */ +#define CKF_ENCRYPT 0x00000100UL +#define CKF_DECRYPT 0x00000200UL +#define CKF_DIGEST 0x00000400UL +#define CKF_SIGN 0x00000800UL +#define CKF_SIGN_RECOVER 0x00001000UL +#define CKF_VERIFY 0x00002000UL +#define CKF_VERIFY_RECOVER 0x00004000UL +#define CKF_GENERATE 0x00008000UL +#define CKF_GENERATE_KEY_PAIR 0x00010000UL +#define CKF_WRAP 0x00020000UL +#define CKF_UNWRAP 0x00040000UL +#define CKF_DERIVE 0x00080000UL + +/* Describe a token's EC capabilities not available in mechanism + * information. + */ +#define CKF_EC_F_P 0x00100000UL +#define CKF_EC_F_2M 0x00200000UL +#define CKF_EC_ECPARAMETERS 0x00400000UL +#define CKF_EC_NAMEDCURVE 0x00800000UL +#define CKF_EC_UNCOMPRESS 0x01000000UL +#define CKF_EC_COMPRESS 0x02000000UL + +#define CKF_EXTENSION 0x80000000UL + +typedef CK_MECHANISM_INFO CK_PTR CK_MECHANISM_INFO_PTR; + +/* CK_RV is a value that identifies the return value of a + * Cryptoki function + */ +typedef CK_ULONG CK_RV; + +#define CKR_OK 0x00000000UL +#define CKR_CANCEL 0x00000001UL +#define CKR_HOST_MEMORY 0x00000002UL +#define CKR_SLOT_ID_INVALID 0x00000003UL + +#define CKR_GENERAL_ERROR 0x00000005UL +#define CKR_FUNCTION_FAILED 0x00000006UL + +#define CKR_ARGUMENTS_BAD 0x00000007UL +#define CKR_NO_EVENT 0x00000008UL +#define CKR_NEED_TO_CREATE_THREADS 0x00000009UL +#define CKR_CANT_LOCK 0x0000000AUL + +#define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL +#define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL +#define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL +#define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL + +#define CKR_ACTION_PROHIBITED 0x0000001BUL + +#define CKR_DATA_INVALID 0x00000020UL +#define CKR_DATA_LEN_RANGE 0x00000021UL +#define CKR_DEVICE_ERROR 0x00000030UL +#define CKR_DEVICE_MEMORY 0x00000031UL +#define CKR_DEVICE_REMOVED 0x00000032UL +#define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL +#define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL +#define CKR_FUNCTION_CANCELED 0x00000050UL +#define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL + +#define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL + +#define CKR_KEY_HANDLE_INVALID 0x00000060UL + +#define CKR_KEY_SIZE_RANGE 0x00000062UL +#define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL + +#define CKR_KEY_NOT_NEEDED 0x00000064UL +#define CKR_KEY_CHANGED 0x00000065UL +#define CKR_KEY_NEEDED 0x00000066UL +#define CKR_KEY_INDIGESTIBLE 0x00000067UL +#define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL +#define CKR_KEY_NOT_WRAPPABLE 0x00000069UL +#define CKR_KEY_UNEXTRACTABLE 0x0000006AUL + +#define CKR_MECHANISM_INVALID 0x00000070UL +#define CKR_MECHANISM_PARAM_INVALID 0x00000071UL + +#define CKR_OBJECT_HANDLE_INVALID 0x00000082UL +#define CKR_OPERATION_ACTIVE 0x00000090UL +#define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL +#define CKR_PIN_INCORRECT 0x000000A0UL +#define CKR_PIN_INVALID 0x000000A1UL +#define CKR_PIN_LEN_RANGE 0x000000A2UL + +#define CKR_PIN_EXPIRED 0x000000A3UL +#define CKR_PIN_LOCKED 0x000000A4UL + +#define CKR_SESSION_CLOSED 0x000000B0UL +#define CKR_SESSION_COUNT 0x000000B1UL +#define CKR_SESSION_HANDLE_INVALID 0x000000B3UL +#define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL +#define CKR_SESSION_READ_ONLY 0x000000B5UL +#define CKR_SESSION_EXISTS 0x000000B6UL + +#define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL +#define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL + +#define CKR_SIGNATURE_INVALID 0x000000C0UL +#define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL +#define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL +#define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL +#define CKR_TOKEN_NOT_PRESENT 0x000000E0UL +#define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL +#define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL +#define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL +#define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL +#define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL +#define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL +#define CKR_USER_NOT_LOGGED_IN 0x00000101UL +#define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL +#define CKR_USER_TYPE_INVALID 0x00000103UL + +#define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL +#define CKR_USER_TOO_MANY_TYPES 0x00000105UL + +#define CKR_WRAPPED_KEY_INVALID 0x00000110UL +#define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL +#define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL +#define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL +#define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL +#define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL + +#define CKR_RANDOM_NO_RNG 0x00000121UL + +#define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL + +#define CKR_CURVE_NOT_SUPPORTED 0x00000140UL + +#define CKR_BUFFER_TOO_SMALL 0x00000150UL +#define CKR_SAVED_STATE_INVALID 0x00000160UL +#define CKR_INFORMATION_SENSITIVE 0x00000170UL +#define CKR_STATE_UNSAVEABLE 0x00000180UL + +#define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL +#define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL +#define CKR_MUTEX_BAD 0x000001A0UL +#define CKR_MUTEX_NOT_LOCKED 0x000001A1UL + +#define CKR_NEW_PIN_MODE 0x000001B0UL +#define CKR_NEXT_OTP 0x000001B1UL + +#define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL +#define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL +#define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL +#define CKR_PIN_TOO_WEAK 0x000001B8UL +#define CKR_PUBLIC_KEY_INVALID 0x000001B9UL + +#define CKR_FUNCTION_REJECTED 0x00000200UL + +#define CKR_VENDOR_DEFINED 0x80000000UL + + +/* CK_NOTIFY is an application callback that processes events */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_NOTIFY)( + CK_SESSION_HANDLE hSession, /* the session's handle */ + CK_NOTIFICATION event, + CK_VOID_PTR pApplication /* passed to C_OpenSession */ +); + + +/* CK_FUNCTION_LIST is a structure holding a Cryptoki spec + * version and pointers of appropriate types to all the + * Cryptoki functions + */ +typedef struct CK_FUNCTION_LIST CK_FUNCTION_LIST; + +typedef CK_FUNCTION_LIST CK_PTR CK_FUNCTION_LIST_PTR; + +typedef CK_FUNCTION_LIST_PTR CK_PTR CK_FUNCTION_LIST_PTR_PTR; + + +/* CK_CREATEMUTEX is an application callback for creating a + * mutex object + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_CREATEMUTEX)( + CK_VOID_PTR_PTR ppMutex /* location to receive ptr to mutex */ +); + + +/* CK_DESTROYMUTEX is an application callback for destroying a + * mutex object + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_DESTROYMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_LOCKMUTEX is an application callback for locking a mutex */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_LOCKMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_UNLOCKMUTEX is an application callback for unlocking a + * mutex + */ +typedef CK_CALLBACK_FUNCTION(CK_RV, CK_UNLOCKMUTEX)( + CK_VOID_PTR pMutex /* pointer to mutex */ +); + + +/* CK_C_INITIALIZE_ARGS provides the optional arguments to + * C_Initialize + */ +typedef struct CK_C_INITIALIZE_ARGS { + CK_CREATEMUTEX CreateMutex; + CK_DESTROYMUTEX DestroyMutex; + CK_LOCKMUTEX LockMutex; + CK_UNLOCKMUTEX UnlockMutex; + CK_FLAGS flags; + CK_VOID_PTR pReserved; +} CK_C_INITIALIZE_ARGS; + +/* flags: bit flags that provide capabilities of the slot + * Bit Flag Mask Meaning + */ +#define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL +#define CKF_OS_LOCKING_OK 0x00000002UL + +typedef CK_C_INITIALIZE_ARGS CK_PTR CK_C_INITIALIZE_ARGS_PTR; + + +/* additional flags for parameters to functions */ + +/* CKF_DONT_BLOCK is for the function C_WaitForSlotEvent */ +#define CKF_DONT_BLOCK 1 + +/* CK_RSA_PKCS_MGF_TYPE is used to indicate the Message + * Generation Function (MGF) applied to a message block when + * formatting a message block for the PKCS #1 OAEP encryption + * scheme. + */ +typedef CK_ULONG CK_RSA_PKCS_MGF_TYPE; + +typedef CK_RSA_PKCS_MGF_TYPE CK_PTR CK_RSA_PKCS_MGF_TYPE_PTR; + +/* The following MGFs are defined */ +#define CKG_MGF1_SHA1 0x00000001UL +#define CKG_MGF1_SHA256 0x00000002UL +#define CKG_MGF1_SHA384 0x00000003UL +#define CKG_MGF1_SHA512 0x00000004UL +#define CKG_MGF1_SHA224 0x00000005UL + +/* CK_RSA_PKCS_OAEP_SOURCE_TYPE is used to indicate the source + * of the encoding parameter when formatting a message block + * for the PKCS #1 OAEP encryption scheme. + */ +typedef CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE; + +typedef CK_RSA_PKCS_OAEP_SOURCE_TYPE CK_PTR CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR; + +/* The following encoding parameter sources are defined */ +#define CKZ_DATA_SPECIFIED 0x00000001UL + +/* CK_RSA_PKCS_OAEP_PARAMS provides the parameters to the + * CKM_RSA_PKCS_OAEP mechanism. + */ +typedef struct CK_RSA_PKCS_OAEP_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_RSA_PKCS_OAEP_SOURCE_TYPE source; + CK_VOID_PTR pSourceData; + CK_ULONG ulSourceDataLen; +} CK_RSA_PKCS_OAEP_PARAMS; + +typedef CK_RSA_PKCS_OAEP_PARAMS CK_PTR CK_RSA_PKCS_OAEP_PARAMS_PTR; + +/* CK_RSA_PKCS_PSS_PARAMS provides the parameters to the + * CKM_RSA_PKCS_PSS mechanism(s). + */ +typedef struct CK_RSA_PKCS_PSS_PARAMS { + CK_MECHANISM_TYPE hashAlg; + CK_RSA_PKCS_MGF_TYPE mgf; + CK_ULONG sLen; +} CK_RSA_PKCS_PSS_PARAMS; + +typedef CK_RSA_PKCS_PSS_PARAMS CK_PTR CK_RSA_PKCS_PSS_PARAMS_PTR; + +typedef CK_ULONG CK_EC_KDF_TYPE; + +/* The following EC Key Derivation Functions are defined */ +#define CKD_NULL 0x00000001UL +#define CKD_SHA1_KDF 0x00000002UL + +/* The following X9.42 DH key derivation functions are defined */ +#define CKD_SHA1_KDF_ASN1 0x00000003UL +#define CKD_SHA1_KDF_CONCATENATE 0x00000004UL +#define CKD_SHA224_KDF 0x00000005UL +#define CKD_SHA256_KDF 0x00000006UL +#define CKD_SHA384_KDF 0x00000007UL +#define CKD_SHA512_KDF 0x00000008UL +#define CKD_CPDIVERSIFY_KDF 0x00000009UL +#define CKD_SHA3_224_KDF 0x0000000AUL +#define CKD_SHA3_256_KDF 0x0000000BUL +#define CKD_SHA3_384_KDF 0x0000000CUL +#define CKD_SHA3_512_KDF 0x0000000DUL + +/* CK_ECDH1_DERIVE_PARAMS provides the parameters to the + * CKM_ECDH1_DERIVE and CKM_ECDH1_COFACTOR_DERIVE mechanisms, + * where each party contributes one key pair. + */ +typedef struct CK_ECDH1_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_ECDH1_DERIVE_PARAMS; + +typedef CK_ECDH1_DERIVE_PARAMS CK_PTR CK_ECDH1_DERIVE_PARAMS_PTR; + +/* + * CK_ECDH2_DERIVE_PARAMS provides the parameters to the + * CKM_ECMQV_DERIVE mechanism, where each party contributes two key pairs. + */ +typedef struct CK_ECDH2_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; +} CK_ECDH2_DERIVE_PARAMS; + +typedef CK_ECDH2_DERIVE_PARAMS CK_PTR CK_ECDH2_DERIVE_PARAMS_PTR; + +typedef struct CK_ECMQV_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; + CK_OBJECT_HANDLE publicKey; +} CK_ECMQV_DERIVE_PARAMS; + +typedef CK_ECMQV_DERIVE_PARAMS CK_PTR CK_ECMQV_DERIVE_PARAMS_PTR; + +/* Typedefs and defines for the CKM_X9_42_DH_KEY_PAIR_GEN and the + * CKM_X9_42_DH_PARAMETER_GEN mechanisms + */ +typedef CK_ULONG CK_X9_42_DH_KDF_TYPE; +typedef CK_X9_42_DH_KDF_TYPE CK_PTR CK_X9_42_DH_KDF_TYPE_PTR; + +/* CK_X9_42_DH1_DERIVE_PARAMS provides the parameters to the + * CKM_X9_42_DH_DERIVE key derivation mechanism, where each party + * contributes one key pair + */ +typedef struct CK_X9_42_DH1_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_X9_42_DH1_DERIVE_PARAMS; + +typedef struct CK_X9_42_DH1_DERIVE_PARAMS CK_PTR CK_X9_42_DH1_DERIVE_PARAMS_PTR; + +/* CK_X9_42_DH2_DERIVE_PARAMS provides the parameters to the + * CKM_X9_42_DH_HYBRID_DERIVE and CKM_X9_42_MQV_DERIVE key derivation + * mechanisms, where each party contributes two key pairs + */ +typedef struct CK_X9_42_DH2_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; +} CK_X9_42_DH2_DERIVE_PARAMS; + +typedef CK_X9_42_DH2_DERIVE_PARAMS CK_PTR CK_X9_42_DH2_DERIVE_PARAMS_PTR; + +typedef struct CK_X9_42_MQV_DERIVE_PARAMS { + CK_X9_42_DH_KDF_TYPE kdf; + CK_ULONG ulOtherInfoLen; + CK_BYTE_PTR pOtherInfo; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPrivateDataLen; + CK_OBJECT_HANDLE hPrivateData; + CK_ULONG ulPublicDataLen2; + CK_BYTE_PTR pPublicData2; + CK_OBJECT_HANDLE publicKey; +} CK_X9_42_MQV_DERIVE_PARAMS; + +typedef CK_X9_42_MQV_DERIVE_PARAMS CK_PTR CK_X9_42_MQV_DERIVE_PARAMS_PTR; + +/* CK_KEA_DERIVE_PARAMS provides the parameters to the + * CKM_KEA_DERIVE mechanism + */ +typedef struct CK_KEA_DERIVE_PARAMS { + CK_BBOOL isSender; + CK_ULONG ulRandomLen; + CK_BYTE_PTR pRandomA; + CK_BYTE_PTR pRandomB; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; +} CK_KEA_DERIVE_PARAMS; + +typedef CK_KEA_DERIVE_PARAMS CK_PTR CK_KEA_DERIVE_PARAMS_PTR; + + +/* CK_RC2_PARAMS provides the parameters to the CKM_RC2_ECB and + * CKM_RC2_MAC mechanisms. An instance of CK_RC2_PARAMS just + * holds the effective keysize + */ +typedef CK_ULONG CK_RC2_PARAMS; + +typedef CK_RC2_PARAMS CK_PTR CK_RC2_PARAMS_PTR; + + +/* CK_RC2_CBC_PARAMS provides the parameters to the CKM_RC2_CBC + * mechanism + */ +typedef struct CK_RC2_CBC_PARAMS { + CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ + CK_BYTE iv[8]; /* IV for CBC mode */ +} CK_RC2_CBC_PARAMS; + +typedef CK_RC2_CBC_PARAMS CK_PTR CK_RC2_CBC_PARAMS_PTR; + + +/* CK_RC2_MAC_GENERAL_PARAMS provides the parameters for the + * CKM_RC2_MAC_GENERAL mechanism + */ +typedef struct CK_RC2_MAC_GENERAL_PARAMS { + CK_ULONG ulEffectiveBits; /* effective bits (1-1024) */ + CK_ULONG ulMacLength; /* Length of MAC in bytes */ +} CK_RC2_MAC_GENERAL_PARAMS; + +typedef CK_RC2_MAC_GENERAL_PARAMS CK_PTR \ + CK_RC2_MAC_GENERAL_PARAMS_PTR; + + +/* CK_RC5_PARAMS provides the parameters to the CKM_RC5_ECB and + * CKM_RC5_MAC mechanisms + */ +typedef struct CK_RC5_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ +} CK_RC5_PARAMS; + +typedef CK_RC5_PARAMS CK_PTR CK_RC5_PARAMS_PTR; + + +/* CK_RC5_CBC_PARAMS provides the parameters to the CKM_RC5_CBC + * mechanism + */ +typedef struct CK_RC5_CBC_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ + CK_BYTE_PTR pIv; /* pointer to IV */ + CK_ULONG ulIvLen; /* length of IV in bytes */ +} CK_RC5_CBC_PARAMS; + +typedef CK_RC5_CBC_PARAMS CK_PTR CK_RC5_CBC_PARAMS_PTR; + + +/* CK_RC5_MAC_GENERAL_PARAMS provides the parameters for the + * CKM_RC5_MAC_GENERAL mechanism + */ +typedef struct CK_RC5_MAC_GENERAL_PARAMS { + CK_ULONG ulWordsize; /* wordsize in bits */ + CK_ULONG ulRounds; /* number of rounds */ + CK_ULONG ulMacLength; /* Length of MAC in bytes */ +} CK_RC5_MAC_GENERAL_PARAMS; + +typedef CK_RC5_MAC_GENERAL_PARAMS CK_PTR \ + CK_RC5_MAC_GENERAL_PARAMS_PTR; + +/* CK_MAC_GENERAL_PARAMS provides the parameters to most block + * ciphers' MAC_GENERAL mechanisms. Its value is the length of + * the MAC + */ +typedef CK_ULONG CK_MAC_GENERAL_PARAMS; + +typedef CK_MAC_GENERAL_PARAMS CK_PTR CK_MAC_GENERAL_PARAMS_PTR; + +typedef struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[8]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_DES_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_DES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_AES_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_AES_CBC_ENCRYPT_DATA_PARAMS CK_PTR CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR; + +/* CK_SKIPJACK_PRIVATE_WRAP_PARAMS provides the parameters to the + * CKM_SKIPJACK_PRIVATE_WRAP mechanism + */ +typedef struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { + CK_ULONG ulPasswordLen; + CK_BYTE_PTR pPassword; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPAndGLen; + CK_ULONG ulQLen; + CK_ULONG ulRandomLen; + CK_BYTE_PTR pRandomA; + CK_BYTE_PTR pPrimeP; + CK_BYTE_PTR pBaseG; + CK_BYTE_PTR pSubprimeQ; +} CK_SKIPJACK_PRIVATE_WRAP_PARAMS; + +typedef CK_SKIPJACK_PRIVATE_WRAP_PARAMS CK_PTR \ + CK_SKIPJACK_PRIVATE_WRAP_PARAMS_PTR; + + +/* CK_SKIPJACK_RELAYX_PARAMS provides the parameters to the + * CKM_SKIPJACK_RELAYX mechanism + */ +typedef struct CK_SKIPJACK_RELAYX_PARAMS { + CK_ULONG ulOldWrappedXLen; + CK_BYTE_PTR pOldWrappedX; + CK_ULONG ulOldPasswordLen; + CK_BYTE_PTR pOldPassword; + CK_ULONG ulOldPublicDataLen; + CK_BYTE_PTR pOldPublicData; + CK_ULONG ulOldRandomLen; + CK_BYTE_PTR pOldRandomA; + CK_ULONG ulNewPasswordLen; + CK_BYTE_PTR pNewPassword; + CK_ULONG ulNewPublicDataLen; + CK_BYTE_PTR pNewPublicData; + CK_ULONG ulNewRandomLen; + CK_BYTE_PTR pNewRandomA; +} CK_SKIPJACK_RELAYX_PARAMS; + +typedef CK_SKIPJACK_RELAYX_PARAMS CK_PTR \ + CK_SKIPJACK_RELAYX_PARAMS_PTR; + + +typedef struct CK_PBE_PARAMS { + CK_BYTE_PTR pInitVector; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG ulPasswordLen; + CK_BYTE_PTR pSalt; + CK_ULONG ulSaltLen; + CK_ULONG ulIteration; +} CK_PBE_PARAMS; + +typedef CK_PBE_PARAMS CK_PTR CK_PBE_PARAMS_PTR; + + +/* CK_KEY_WRAP_SET_OAEP_PARAMS provides the parameters to the + * CKM_KEY_WRAP_SET_OAEP mechanism + */ +typedef struct CK_KEY_WRAP_SET_OAEP_PARAMS { + CK_BYTE bBC; /* block contents byte */ + CK_BYTE_PTR pX; /* extra data */ + CK_ULONG ulXLen; /* length of extra data in bytes */ +} CK_KEY_WRAP_SET_OAEP_PARAMS; + +typedef CK_KEY_WRAP_SET_OAEP_PARAMS CK_PTR CK_KEY_WRAP_SET_OAEP_PARAMS_PTR; + +typedef struct CK_SSL3_RANDOM_DATA { + CK_BYTE_PTR pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE_PTR pServerRandom; + CK_ULONG ulServerRandomLen; +} CK_SSL3_RANDOM_DATA; + + +typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION_PTR pVersion; +} CK_SSL3_MASTER_KEY_DERIVE_PARAMS; + +typedef struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_SSL3_KEY_MAT_OUT { + CK_OBJECT_HANDLE hClientMacSecret; + CK_OBJECT_HANDLE hServerMacSecret; + CK_OBJECT_HANDLE hClientKey; + CK_OBJECT_HANDLE hServerKey; + CK_BYTE_PTR pIVClient; + CK_BYTE_PTR pIVServer; +} CK_SSL3_KEY_MAT_OUT; + +typedef CK_SSL3_KEY_MAT_OUT CK_PTR CK_SSL3_KEY_MAT_OUT_PTR; + + +typedef struct CK_SSL3_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; +} CK_SSL3_KEY_MAT_PARAMS; + +typedef CK_SSL3_KEY_MAT_PARAMS CK_PTR CK_SSL3_KEY_MAT_PARAMS_PTR; + +typedef struct CK_TLS_PRF_PARAMS { + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLen; + CK_BYTE_PTR pOutput; + CK_ULONG_PTR pulOutputLen; +} CK_TLS_PRF_PARAMS; + +typedef CK_TLS_PRF_PARAMS CK_PTR CK_TLS_PRF_PARAMS_PTR; + +typedef struct CK_WTLS_RANDOM_DATA { + CK_BYTE_PTR pClientRandom; + CK_ULONG ulClientRandomLen; + CK_BYTE_PTR pServerRandom; + CK_ULONG ulServerRandomLen; +} CK_WTLS_RANDOM_DATA; + +typedef CK_WTLS_RANDOM_DATA CK_PTR CK_WTLS_RANDOM_DATA_PTR; + +typedef struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_BYTE_PTR pVersion; +} CK_WTLS_MASTER_KEY_DERIVE_PARAMS; + +typedef CK_WTLS_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_WTLS_PRF_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLen; + CK_BYTE_PTR pOutput; + CK_ULONG_PTR pulOutputLen; +} CK_WTLS_PRF_PARAMS; + +typedef CK_WTLS_PRF_PARAMS CK_PTR CK_WTLS_PRF_PARAMS_PTR; + +typedef struct CK_WTLS_KEY_MAT_OUT { + CK_OBJECT_HANDLE hMacSecret; + CK_OBJECT_HANDLE hKey; + CK_BYTE_PTR pIV; +} CK_WTLS_KEY_MAT_OUT; + +typedef CK_WTLS_KEY_MAT_OUT CK_PTR CK_WTLS_KEY_MAT_OUT_PTR; + +typedef struct CK_WTLS_KEY_MAT_PARAMS { + CK_MECHANISM_TYPE DigestMechanism; + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_ULONG ulSequenceNumber; + CK_BBOOL bIsExport; + CK_WTLS_RANDOM_DATA RandomInfo; + CK_WTLS_KEY_MAT_OUT_PTR pReturnedKeyMaterial; +} CK_WTLS_KEY_MAT_PARAMS; + +typedef CK_WTLS_KEY_MAT_PARAMS CK_PTR CK_WTLS_KEY_MAT_PARAMS_PTR; + +typedef struct CK_CMS_SIG_PARAMS { + CK_OBJECT_HANDLE certificateHandle; + CK_MECHANISM_PTR pSigningMechanism; + CK_MECHANISM_PTR pDigestMechanism; + CK_UTF8CHAR_PTR pContentType; + CK_BYTE_PTR pRequestedAttributes; + CK_ULONG ulRequestedAttributesLen; + CK_BYTE_PTR pRequiredAttributes; + CK_ULONG ulRequiredAttributesLen; +} CK_CMS_SIG_PARAMS; + +typedef CK_CMS_SIG_PARAMS CK_PTR CK_CMS_SIG_PARAMS_PTR; + +typedef struct CK_KEY_DERIVATION_STRING_DATA { + CK_BYTE_PTR pData; + CK_ULONG ulLen; +} CK_KEY_DERIVATION_STRING_DATA; + +typedef CK_KEY_DERIVATION_STRING_DATA CK_PTR \ + CK_KEY_DERIVATION_STRING_DATA_PTR; + + +/* The CK_EXTRACT_PARAMS is used for the + * CKM_EXTRACT_KEY_FROM_KEY mechanism. It specifies which bit + * of the base key should be used as the first bit of the + * derived key + */ +typedef CK_ULONG CK_EXTRACT_PARAMS; + +typedef CK_EXTRACT_PARAMS CK_PTR CK_EXTRACT_PARAMS_PTR; + +/* CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE is used to + * indicate the Pseudo-Random Function (PRF) used to generate + * key bits using PKCS #5 PBKDF2. + */ +typedef CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE; + +typedef CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE CK_PTR \ + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR; + +#define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL +#define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL +#define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL +#define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL +#define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL +#define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL + +/* CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE is used to indicate the + * source of the salt value when deriving a key using PKCS #5 + * PBKDF2. + */ +typedef CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE; + +typedef CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE CK_PTR \ + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR; + +/* The following salt value sources are defined in PKCS #5 v2.0. */ +#define CKZ_SALT_SPECIFIED 0x00000001UL + +/* CK_PKCS5_PBKD2_PARAMS is a structure that provides the + * parameters to the CKM_PKCS5_PBKD2 mechanism. + */ +typedef struct CK_PKCS5_PBKD2_PARAMS { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + CK_VOID_PTR pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + CK_VOID_PTR pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG_PTR ulPasswordLen; +} CK_PKCS5_PBKD2_PARAMS; + +typedef CK_PKCS5_PBKD2_PARAMS CK_PTR CK_PKCS5_PBKD2_PARAMS_PTR; + +/* CK_PKCS5_PBKD2_PARAMS2 is a corrected version of the CK_PKCS5_PBKD2_PARAMS + * structure that provides the parameters to the CKM_PKCS5_PBKD2 mechanism + * noting that the ulPasswordLen field is a CK_ULONG and not a CK_ULONG_PTR. + */ +typedef struct CK_PKCS5_PBKD2_PARAMS2 { + CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; + CK_VOID_PTR pSaltSourceData; + CK_ULONG ulSaltSourceDataLen; + CK_ULONG iterations; + CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; + CK_VOID_PTR pPrfData; + CK_ULONG ulPrfDataLen; + CK_UTF8CHAR_PTR pPassword; + CK_ULONG ulPasswordLen; +} CK_PKCS5_PBKD2_PARAMS2; + +typedef CK_PKCS5_PBKD2_PARAMS2 CK_PTR CK_PKCS5_PBKD2_PARAMS2_PTR; + +typedef CK_ULONG CK_OTP_PARAM_TYPE; +typedef CK_OTP_PARAM_TYPE CK_PARAM_TYPE; /* backward compatibility */ + +typedef struct CK_OTP_PARAM { + CK_OTP_PARAM_TYPE type; + CK_VOID_PTR pValue; + CK_ULONG ulValueLen; +} CK_OTP_PARAM; + +typedef CK_OTP_PARAM CK_PTR CK_OTP_PARAM_PTR; + +typedef struct CK_OTP_PARAMS { + CK_OTP_PARAM_PTR pParams; + CK_ULONG ulCount; +} CK_OTP_PARAMS; + +typedef CK_OTP_PARAMS CK_PTR CK_OTP_PARAMS_PTR; + +typedef struct CK_OTP_SIGNATURE_INFO { + CK_OTP_PARAM_PTR pParams; + CK_ULONG ulCount; +} CK_OTP_SIGNATURE_INFO; + +typedef CK_OTP_SIGNATURE_INFO CK_PTR CK_OTP_SIGNATURE_INFO_PTR; + +#define CK_OTP_VALUE 0UL +#define CK_OTP_PIN 1UL +#define CK_OTP_CHALLENGE 2UL +#define CK_OTP_TIME 3UL +#define CK_OTP_COUNTER 4UL +#define CK_OTP_FLAGS 5UL +#define CK_OTP_OUTPUT_LENGTH 6UL +#define CK_OTP_OUTPUT_FORMAT 7UL + +#define CKF_NEXT_OTP 0x00000001UL +#define CKF_EXCLUDE_TIME 0x00000002UL +#define CKF_EXCLUDE_COUNTER 0x00000004UL +#define CKF_EXCLUDE_CHALLENGE 0x00000008UL +#define CKF_EXCLUDE_PIN 0x00000010UL +#define CKF_USER_FRIENDLY_OTP 0x00000020UL + +typedef struct CK_KIP_PARAMS { + CK_MECHANISM_PTR pMechanism; + CK_OBJECT_HANDLE hKey; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; +} CK_KIP_PARAMS; + +typedef CK_KIP_PARAMS CK_PTR CK_KIP_PARAMS_PTR; + +typedef struct CK_AES_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +} CK_AES_CTR_PARAMS; + +typedef CK_AES_CTR_PARAMS CK_PTR CK_AES_CTR_PARAMS_PTR; + +typedef struct CK_GCM_PARAMS { + CK_BYTE_PTR pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +} CK_GCM_PARAMS; + +typedef CK_GCM_PARAMS CK_PTR CK_GCM_PARAMS_PTR; + +typedef struct CK_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE_PTR pNonce; + CK_ULONG ulNonceLen; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +} CK_CCM_PARAMS; + +typedef CK_CCM_PARAMS CK_PTR CK_CCM_PARAMS_PTR; + +/* Deprecated. Use CK_GCM_PARAMS */ +typedef struct CK_AES_GCM_PARAMS { + CK_BYTE_PTR pIv; + CK_ULONG ulIvLen; + CK_ULONG ulIvBits; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulTagBits; +} CK_AES_GCM_PARAMS; + +typedef CK_AES_GCM_PARAMS CK_PTR CK_AES_GCM_PARAMS_PTR; + +/* Deprecated. Use CK_CCM_PARAMS */ +typedef struct CK_AES_CCM_PARAMS { + CK_ULONG ulDataLen; + CK_BYTE_PTR pNonce; + CK_ULONG ulNonceLen; + CK_BYTE_PTR pAAD; + CK_ULONG ulAADLen; + CK_ULONG ulMACLen; +} CK_AES_CCM_PARAMS; + +typedef CK_AES_CCM_PARAMS CK_PTR CK_AES_CCM_PARAMS_PTR; + +typedef struct CK_CAMELLIA_CTR_PARAMS { + CK_ULONG ulCounterBits; + CK_BYTE cb[16]; +} CK_CAMELLIA_CTR_PARAMS; + +typedef CK_CAMELLIA_CTR_PARAMS CK_PTR CK_CAMELLIA_CTR_PARAMS_PTR; + +typedef struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_ARIA_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_ARIA_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR; + +typedef struct CK_DSA_PARAMETER_GEN_PARAM { + CK_MECHANISM_TYPE hash; + CK_BYTE_PTR pSeed; + CK_ULONG ulSeedLen; + CK_ULONG ulIndex; +} CK_DSA_PARAMETER_GEN_PARAM; + +typedef CK_DSA_PARAMETER_GEN_PARAM CK_PTR CK_DSA_PARAMETER_GEN_PARAM_PTR; + +typedef struct CK_ECDH_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_EC_KDF_TYPE kdf; + CK_ULONG ulSharedDataLen; + CK_BYTE_PTR pSharedData; +} CK_ECDH_AES_KEY_WRAP_PARAMS; + +typedef CK_ECDH_AES_KEY_WRAP_PARAMS CK_PTR CK_ECDH_AES_KEY_WRAP_PARAMS_PTR; + +typedef CK_ULONG CK_JAVA_MIDP_SECURITY_DOMAIN; + +typedef CK_ULONG CK_CERTIFICATE_CATEGORY; + +typedef struct CK_RSA_AES_KEY_WRAP_PARAMS { + CK_ULONG ulAESKeyBits; + CK_RSA_PKCS_OAEP_PARAMS_PTR pOAEPParams; +} CK_RSA_AES_KEY_WRAP_PARAMS; + +typedef CK_RSA_AES_KEY_WRAP_PARAMS CK_PTR CK_RSA_AES_KEY_WRAP_PARAMS_PTR; + +typedef struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { + CK_SSL3_RANDOM_DATA RandomInfo; + CK_VERSION_PTR pVersion; + CK_MECHANISM_TYPE prfHashMechanism; +} CK_TLS12_MASTER_KEY_DERIVE_PARAMS; + +typedef CK_TLS12_MASTER_KEY_DERIVE_PARAMS CK_PTR \ + CK_TLS12_MASTER_KEY_DERIVE_PARAMS_PTR; + +typedef struct CK_TLS12_KEY_MAT_PARAMS { + CK_ULONG ulMacSizeInBits; + CK_ULONG ulKeySizeInBits; + CK_ULONG ulIVSizeInBits; + CK_BBOOL bIsExport; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_SSL3_KEY_MAT_OUT_PTR pReturnedKeyMaterial; + CK_MECHANISM_TYPE prfHashMechanism; +} CK_TLS12_KEY_MAT_PARAMS; + +typedef CK_TLS12_KEY_MAT_PARAMS CK_PTR CK_TLS12_KEY_MAT_PARAMS_PTR; + +typedef struct CK_TLS_KDF_PARAMS { + CK_MECHANISM_TYPE prfMechanism; + CK_BYTE_PTR pLabel; + CK_ULONG ulLabelLength; + CK_SSL3_RANDOM_DATA RandomInfo; + CK_BYTE_PTR pContextData; + CK_ULONG ulContextDataLength; +} CK_TLS_KDF_PARAMS; + +typedef CK_TLS_KDF_PARAMS CK_PTR CK_TLS_KDF_PARAMS_PTR; + +typedef struct CK_TLS_MAC_PARAMS { + CK_MECHANISM_TYPE prfHashMechanism; + CK_ULONG ulMacLength; + CK_ULONG ulServerOrClient; +} CK_TLS_MAC_PARAMS; + +typedef CK_TLS_MAC_PARAMS CK_PTR CK_TLS_MAC_PARAMS_PTR; + +typedef struct CK_GOSTR3410_DERIVE_PARAMS { + CK_EC_KDF_TYPE kdf; + CK_BYTE_PTR pPublicData; + CK_ULONG ulPublicDataLen; + CK_BYTE_PTR pUKM; + CK_ULONG ulUKMLen; +} CK_GOSTR3410_DERIVE_PARAMS; + +typedef CK_GOSTR3410_DERIVE_PARAMS CK_PTR CK_GOSTR3410_DERIVE_PARAMS_PTR; + +typedef struct CK_GOSTR3410_KEY_WRAP_PARAMS { + CK_BYTE_PTR pWrapOID; + CK_ULONG ulWrapOIDLen; + CK_BYTE_PTR pUKM; + CK_ULONG ulUKMLen; + CK_OBJECT_HANDLE hKey; +} CK_GOSTR3410_KEY_WRAP_PARAMS; + +typedef CK_GOSTR3410_KEY_WRAP_PARAMS CK_PTR CK_GOSTR3410_KEY_WRAP_PARAMS_PTR; + +typedef struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { + CK_BYTE iv[16]; + CK_BYTE_PTR pData; + CK_ULONG length; +} CK_SEED_CBC_ENCRYPT_DATA_PARAMS; + +typedef CK_SEED_CBC_ENCRYPT_DATA_PARAMS CK_PTR \ + CK_SEED_CBC_ENCRYPT_DATA_PARAMS_PTR; + +#endif /* _PKCS11T_H_ */ + diff --git a/vendor/github.com/miekg/pkcs11/release.go b/vendor/github.com/miekg/pkcs11/release.go new file mode 100644 index 000000000..4380f374d --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/release.go @@ -0,0 +1,17 @@ +// +build release + +package pkcs11 + +import "fmt" + +// Release is current version of the pkcs11 library. +var Release = R{1, 0, 3} + +// R holds the version of this library. +type R struct { + Major, Minor, Patch int +} + +func (r R) String() string { + return fmt.Sprintf("%d.%d.%d", r.Major, r.Minor, r.Patch) +} diff --git a/vendor/github.com/miekg/pkcs11/softhsm.conf b/vendor/github.com/miekg/pkcs11/softhsm.conf new file mode 100644 index 000000000..f95862b10 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/softhsm.conf @@ -0,0 +1 @@ +0:hsm.db diff --git a/vendor/github.com/miekg/pkcs11/softhsm2.conf b/vendor/github.com/miekg/pkcs11/softhsm2.conf new file mode 100644 index 000000000..876990cdd --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/softhsm2.conf @@ -0,0 +1,4 @@ +log.level = INFO +objectstore.backend = file +directories.tokendir = test_data +slots.removable = false diff --git a/vendor/github.com/miekg/pkcs11/types.go b/vendor/github.com/miekg/pkcs11/types.go new file mode 100644 index 000000000..970db9061 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/types.go @@ -0,0 +1,303 @@ +// Copyright 2013 Miek Gieben. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs11 + +/* +#include <stdlib.h> +#include <string.h> +#include "pkcs11go.h" + +CK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i) +{ + return array[i]; +} + +static inline void putAttributePval(CK_ATTRIBUTE_PTR a, CK_VOID_PTR pValue) +{ + a->pValue = pValue; +} + +static inline void putMechanismParam(CK_MECHANISM_PTR m, CK_VOID_PTR pParameter) +{ + m->pParameter = pParameter; +} +*/ +import "C" + +import ( + "fmt" + "time" + "unsafe" +) + +type arena []unsafe.Pointer + +func (a *arena) Allocate(obj []byte) (C.CK_VOID_PTR, C.CK_ULONG) { + cobj := C.calloc(C.size_t(len(obj)), 1) + *a = append(*a, cobj) + C.memmove(cobj, unsafe.Pointer(&obj[0]), C.size_t(len(obj))) + return C.CK_VOID_PTR(cobj), C.CK_ULONG(len(obj)) +} + +func (a arena) Free() { + for _, p := range a { + C.free(p) + } +} + +// toList converts from a C style array to a []uint. +func toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint { + l := make([]uint, int(size)) + for i := 0; i < len(l); i++ { + l[i] = uint(C.Index(clist, C.CK_ULONG(i))) + } + defer C.free(unsafe.Pointer(clist)) + return l +} + +// cBBool converts a bool to a CK_BBOOL. +func cBBool(x bool) C.CK_BBOOL { + if x { + return C.CK_BBOOL(C.CK_TRUE) + } + return C.CK_BBOOL(C.CK_FALSE) +} + +func uintToBytes(x uint64) []byte { + ul := C.CK_ULONG(x) + return C.GoBytes(unsafe.Pointer(&ul), C.int(unsafe.Sizeof(ul))) +} + +// Error represents an PKCS#11 error. +type Error uint + +func (e Error) Error() string { + return fmt.Sprintf("pkcs11: 0x%X: %s", uint(e), strerror[uint(e)]) +} + +func toError(e C.CK_RV) error { + if e == C.CKR_OK { + return nil + } + return Error(e) +} + +// SessionHandle is a Cryptoki-assigned value that identifies a session. +type SessionHandle uint + +// ObjectHandle is a token-specific identifier for an object. +type ObjectHandle uint + +// Version represents any version information from the library. +type Version struct { + Major byte + Minor byte +} + +func toVersion(version C.CK_VERSION) Version { + return Version{byte(version.major), byte(version.minor)} +} + +// SlotEvent holds the SlotID which for which an slot event (token insertion, +// removal, etc.) occurred. +type SlotEvent struct { + SlotID uint +} + +// Info provides information about the library and hardware used. +type Info struct { + CryptokiVersion Version + ManufacturerID string + Flags uint + LibraryDescription string + LibraryVersion Version +} + +// SlotInfo provides information about a slot. +type SlotInfo struct { + SlotDescription string // 64 bytes. + ManufacturerID string // 32 bytes. + Flags uint + HardwareVersion Version + FirmwareVersion Version +} + +// TokenInfo provides information about a token. +type TokenInfo struct { + Label string + ManufacturerID string + Model string + SerialNumber string + Flags uint + MaxSessionCount uint + SessionCount uint + MaxRwSessionCount uint + RwSessionCount uint + MaxPinLen uint + MinPinLen uint + TotalPublicMemory uint + FreePublicMemory uint + TotalPrivateMemory uint + FreePrivateMemory uint + HardwareVersion Version + FirmwareVersion Version + UTCTime string +} + +// SessionInfo provides information about a session. +type SessionInfo struct { + SlotID uint + State uint + Flags uint + DeviceError uint +} + +// Attribute holds an attribute type/value combination. +type Attribute struct { + Type uint + Value []byte +} + +// NewAttribute allocates a Attribute and returns a pointer to it. +// Note that this is merely a convenience function, as values returned +// from the HSM are not converted back to Go values, those are just raw +// byte slices. +func NewAttribute(typ uint, x interface{}) *Attribute { + // This function nicely transforms *to* an attribute, but there is + // no corresponding function that transform back *from* an attribute, + // which in PKCS#11 is just an byte array. + a := new(Attribute) + a.Type = typ + if x == nil { + return a + } + switch v := x.(type) { + case bool: + if v { + a.Value = []byte{1} + } else { + a.Value = []byte{0} + } + case int: + a.Value = uintToBytes(uint64(v)) + case uint: + a.Value = uintToBytes(uint64(v)) + case string: + a.Value = []byte(v) + case []byte: + a.Value = v + case time.Time: // for CKA_DATE + a.Value = cDate(v) + default: + panic("pkcs11: unhandled attribute type") + } + return a +} + +// cAttribute returns the start address and the length of an attribute list. +func cAttributeList(a []*Attribute) (arena, C.CK_ATTRIBUTE_PTR, C.CK_ULONG) { + var arena arena + if len(a) == 0 { + return nil, nil, 0 + } + pa := make([]C.CK_ATTRIBUTE, len(a)) + for i, attr := range a { + pa[i]._type = C.CK_ATTRIBUTE_TYPE(attr.Type) + if len(attr.Value) != 0 { + buf, len := arena.Allocate(attr.Value) + // field is unaligned on windows so this has to call into C + C.putAttributePval(&pa[i], buf) + pa[i].ulValueLen = len + } + } + return arena, &pa[0], C.CK_ULONG(len(a)) +} + +func cDate(t time.Time) []byte { + b := make([]byte, 8) + year, month, day := t.Date() + y := fmt.Sprintf("%4d", year) + m := fmt.Sprintf("%02d", month) + d1 := fmt.Sprintf("%02d", day) + b[0], b[1], b[2], b[3] = y[0], y[1], y[2], y[3] + b[4], b[5] = m[0], m[1] + b[6], b[7] = d1[0], d1[1] + return b +} + +// Mechanism holds an mechanism type/value combination. +type Mechanism struct { + Mechanism uint + Parameter []byte + generator interface{} +} + +// NewMechanism returns a pointer to an initialized Mechanism. +func NewMechanism(mech uint, x interface{}) *Mechanism { + m := new(Mechanism) + m.Mechanism = mech + if x == nil { + return m + } + + switch p := x.(type) { + case *GCMParams, *OAEPParams, *ECDH1DeriveParams: + // contains pointers; defer serialization until cMechanism + m.generator = p + case []byte: + m.Parameter = p + default: + panic("parameter must be one of type: []byte, *GCMParams, *OAEPParams, *ECDH1DeriveParams") + } + + return m +} + +func cMechanism(mechList []*Mechanism) (arena, *C.CK_MECHANISM) { + if len(mechList) != 1 { + panic("expected exactly one mechanism") + } + mech := mechList[0] + cmech := &C.CK_MECHANISM{mechanism: C.CK_MECHANISM_TYPE(mech.Mechanism)} + // params that contain pointers are allocated here + param := mech.Parameter + var arena arena + switch p := mech.generator.(type) { + case *GCMParams: + // uses its own arena because it has to outlive this function call (yuck) + param = cGCMParams(p) + case *OAEPParams: + param, arena = cOAEPParams(p, arena) + case *ECDH1DeriveParams: + param, arena = cECDH1DeriveParams(p, arena) + } + if len(param) != 0 { + buf, len := arena.Allocate(param) + // field is unaligned on windows so this has to call into C + C.putMechanismParam(cmech, buf) + cmech.ulParameterLen = len + } + return arena, cmech +} + +// MechanismInfo provides information about a particular mechanism. +type MechanismInfo struct { + MinKeySize uint + MaxKeySize uint + Flags uint +} + +// stubData is a persistent nonempty byte array used by cMessage. +var stubData = []byte{0} + +// cMessage returns the pointer/length pair corresponding to data. +func cMessage(data []byte) (dataPtr C.CK_BYTE_PTR) { + l := len(data) + if l == 0 { + // &data[0] is forbidden in this case, so use a nontrivial array instead. + data = stubData + } + return C.CK_BYTE_PTR(unsafe.Pointer(&data[0])) +} diff --git a/vendor/github.com/miekg/pkcs11/vendor.go b/vendor/github.com/miekg/pkcs11/vendor.go new file mode 100644 index 000000000..83188e500 --- /dev/null +++ b/vendor/github.com/miekg/pkcs11/vendor.go @@ -0,0 +1,127 @@ +package pkcs11 + +// Vendor specific range for Ncipher network HSM. +const ( + NFCK_VENDOR_NCIPHER = 0xde436972 + CKA_NCIPHER = NFCK_VENDOR_NCIPHER + CKM_NCIPHER = NFCK_VENDOR_NCIPHER + CKK_NCIPHER = NFCK_VENDOR_NCIPHER +) + +// Vendor specific mechanisms for HMAC on Ncipher HSMs where Ncipher does not allow use of generic_secret keys. +const ( + CKM_NC_SHA_1_HMAC_KEY_GEN = CKM_NCIPHER + 0x3 /* no params */ + CKM_NC_MD5_HMAC_KEY_GEN = CKM_NCIPHER + 0x6 /* no params */ + CKM_NC_SHA224_HMAC_KEY_GEN = CKM_NCIPHER + 0x24 /* no params */ + CKM_NC_SHA256_HMAC_KEY_GEN = CKM_NCIPHER + 0x25 /* no params */ + CKM_NC_SHA384_HMAC_KEY_GEN = CKM_NCIPHER + 0x26 /* no params */ + CKM_NC_SHA512_HMAC_KEY_GEN = CKM_NCIPHER + 0x27 /* no params */ +) + +// Vendor specific range for Mozilla NSS. +const ( + NSSCK_VENDOR_NSS = 0x4E534350 + CKO_NSS = CKO_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKK_NSS = CKK_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKC_NSS = CKC_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKA_NSS = CKA_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKA_TRUST = CKA_NSS + 0x2000 + CKM_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKR_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS + CKT_VENDOR_DEFINED = 0x80000000 + CKT_NSS = CKT_VENDOR_DEFINED | NSSCK_VENDOR_NSS +) + +// Vendor specific values for Mozilla NSS. +const ( + CKO_NSS_CRL = CKO_NSS + 1 + CKO_NSS_SMIME = CKO_NSS + 2 + CKO_NSS_TRUST = CKO_NSS + 3 + CKO_NSS_BUILTIN_ROOT_LIST = CKO_NSS + 4 + CKO_NSS_NEWSLOT = CKO_NSS + 5 + CKO_NSS_DELSLOT = CKO_NSS + 6 + CKK_NSS_PKCS8 = CKK_NSS + 1 + CKK_NSS_JPAKE_ROUND1 = CKK_NSS + 2 + CKK_NSS_JPAKE_ROUND2 = CKK_NSS + 3 + CKK_NSS_CHACHA20 = CKK_NSS + 4 + CKA_NSS_URL = CKA_NSS + 1 + CKA_NSS_EMAIL = CKA_NSS + 2 + CKA_NSS_SMIME_INFO = CKA_NSS + 3 + CKA_NSS_SMIME_TIMESTAMP = CKA_NSS + 4 + CKA_NSS_PKCS8_SALT = CKA_NSS + 5 + CKA_NSS_PASSWORD_CHECK = CKA_NSS + 6 + CKA_NSS_EXPIRES = CKA_NSS + 7 + CKA_NSS_KRL = CKA_NSS + 8 + CKA_NSS_PQG_COUNTER = CKA_NSS + 20 + CKA_NSS_PQG_SEED = CKA_NSS + 21 + CKA_NSS_PQG_H = CKA_NSS + 22 + CKA_NSS_PQG_SEED_BITS = CKA_NSS + 23 + CKA_NSS_MODULE_SPEC = CKA_NSS + 24 + CKA_NSS_OVERRIDE_EXTENSIONS = CKA_NSS + 25 + CKA_NSS_JPAKE_SIGNERID = CKA_NSS + 26 + CKA_NSS_JPAKE_PEERID = CKA_NSS + 27 + CKA_NSS_JPAKE_GX1 = CKA_NSS + 28 + CKA_NSS_JPAKE_GX2 = CKA_NSS + 29 + CKA_NSS_JPAKE_GX3 = CKA_NSS + 30 + CKA_NSS_JPAKE_GX4 = CKA_NSS + 31 + CKA_NSS_JPAKE_X2 = CKA_NSS + 32 + CKA_NSS_JPAKE_X2S = CKA_NSS + 33 + CKA_NSS_MOZILLA_CA_POLICY = CKA_NSS + 34 + CKA_TRUST_DIGITAL_SIGNATURE = CKA_TRUST + 1 + CKA_TRUST_NON_REPUDIATION = CKA_TRUST + 2 + CKA_TRUST_KEY_ENCIPHERMENT = CKA_TRUST + 3 + CKA_TRUST_DATA_ENCIPHERMENT = CKA_TRUST + 4 + CKA_TRUST_KEY_AGREEMENT = CKA_TRUST + 5 + CKA_TRUST_KEY_CERT_SIGN = CKA_TRUST + 6 + CKA_TRUST_CRL_SIGN = CKA_TRUST + 7 + CKA_TRUST_SERVER_AUTH = CKA_TRUST + 8 + CKA_TRUST_CLIENT_AUTH = CKA_TRUST + 9 + CKA_TRUST_CODE_SIGNING = CKA_TRUST + 10 + CKA_TRUST_EMAIL_PROTECTION = CKA_TRUST + 11 + CKA_TRUST_IPSEC_END_SYSTEM = CKA_TRUST + 12 + CKA_TRUST_IPSEC_TUNNEL = CKA_TRUST + 13 + CKA_TRUST_IPSEC_USER = CKA_TRUST + 14 + CKA_TRUST_TIME_STAMPING = CKA_TRUST + 15 + CKA_TRUST_STEP_UP_APPROVED = CKA_TRUST + 16 + CKA_CERT_SHA1_HASH = CKA_TRUST + 100 + CKA_CERT_MD5_HASH = CKA_TRUST + 101 + CKM_NSS_AES_KEY_WRAP = CKM_NSS + 1 + CKM_NSS_AES_KEY_WRAP_PAD = CKM_NSS + 2 + CKM_NSS_HKDF_SHA1 = CKM_NSS + 3 + CKM_NSS_HKDF_SHA256 = CKM_NSS + 4 + CKM_NSS_HKDF_SHA384 = CKM_NSS + 5 + CKM_NSS_HKDF_SHA512 = CKM_NSS + 6 + CKM_NSS_JPAKE_ROUND1_SHA1 = CKM_NSS + 7 + CKM_NSS_JPAKE_ROUND1_SHA256 = CKM_NSS + 8 + CKM_NSS_JPAKE_ROUND1_SHA384 = CKM_NSS + 9 + CKM_NSS_JPAKE_ROUND1_SHA512 = CKM_NSS + 10 + CKM_NSS_JPAKE_ROUND2_SHA1 = CKM_NSS + 11 + CKM_NSS_JPAKE_ROUND2_SHA256 = CKM_NSS + 12 + CKM_NSS_JPAKE_ROUND2_SHA384 = CKM_NSS + 13 + CKM_NSS_JPAKE_ROUND2_SHA512 = CKM_NSS + 14 + CKM_NSS_JPAKE_FINAL_SHA1 = CKM_NSS + 15 + CKM_NSS_JPAKE_FINAL_SHA256 = CKM_NSS + 16 + CKM_NSS_JPAKE_FINAL_SHA384 = CKM_NSS + 17 + CKM_NSS_JPAKE_FINAL_SHA512 = CKM_NSS + 18 + CKM_NSS_HMAC_CONSTANT_TIME = CKM_NSS + 19 + CKM_NSS_SSL3_MAC_CONSTANT_TIME = CKM_NSS + 20 + CKM_NSS_TLS_PRF_GENERAL_SHA256 = CKM_NSS + 21 + CKM_NSS_TLS_MASTER_KEY_DERIVE_SHA256 = CKM_NSS + 22 + CKM_NSS_TLS_KEY_AND_MAC_DERIVE_SHA256 = CKM_NSS + 23 + CKM_NSS_TLS_MASTER_KEY_DERIVE_DH_SHA256 = CKM_NSS + 24 + CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE = CKM_NSS + 25 + CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_DH = CKM_NSS + 26 + CKM_NSS_CHACHA20_KEY_GEN = CKM_NSS + 27 + CKM_NSS_CHACHA20_POLY1305 = CKM_NSS + 28 + CKM_NSS_PKCS12_PBE_SHA224_HMAC_KEY_GEN = CKM_NSS + 29 + CKM_NSS_PKCS12_PBE_SHA256_HMAC_KEY_GEN = CKM_NSS + 30 + CKM_NSS_PKCS12_PBE_SHA384_HMAC_KEY_GEN = CKM_NSS + 31 + CKM_NSS_PKCS12_PBE_SHA512_HMAC_KEY_GEN = CKM_NSS + 32 + CKR_NSS_CERTDB_FAILED = CKR_NSS + 1 + CKR_NSS_KEYDB_FAILED = CKR_NSS + 2 + CKT_NSS_TRUSTED = CKT_NSS + 1 + CKT_NSS_TRUSTED_DELEGATOR = CKT_NSS + 2 + CKT_NSS_MUST_VERIFY_TRUST = CKT_NSS + 3 + CKT_NSS_NOT_TRUSTED = CKT_NSS + 10 + CKT_NSS_TRUST_UNKNOWN = CKT_NSS + 5 +) diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore b/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore new file mode 100644 index 000000000..1823f5d48 --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/.gitignore @@ -0,0 +1,2 @@ +*~ +pkcs11uri diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml b/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml new file mode 100644 index 000000000..f5f274f96 --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml @@ -0,0 +1,25 @@ +dist: bionic +language: go + +os: +- linux + +go: + - "1.13.x" + +matrix: + include: + - os: linux + +addons: + apt: + packages: + - softhsm2 + +install: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 + +script: + - make + - make check + - make test diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE b/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE new file mode 100644 index 000000000..49cc83d2e --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/Makefile b/vendor/github.com/stefanberger/go-pkcs11uri/Makefile new file mode 100644 index 000000000..1a1051524 --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/Makefile @@ -0,0 +1,28 @@ +# Copyright IBM Corporation, 2020 + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: check build test + +all: build + +FORCE: + +check: + golangci-lint run + +build: + go build ./... + +test: + go test ./... -test.v diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/README.md b/vendor/github.com/stefanberger/go-pkcs11uri/README.md new file mode 100644 index 000000000..c1fc6e911 --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/README.md @@ -0,0 +1,102 @@ +# go-pkcs11uri + +Welcome to the go-pkcs11uri library. The implementation follows [RFC 7512](https://tools.ietf.org/html/rfc7512) and this [errata](https://www.rfc-editor.org/errata/rfc7512). + +# Exampe usage: + +The following example builds on this library [here](https://github.com/miekg/pkcs11) and are using softhsm2 on Fedora. + +## Example + +This example program extending the one found [here](https://github.com/miekg/pkcs11/blob/master/README.md#examples): + +``` +package main + +import ( + "fmt" + "os" + "strconv" + + "github.com/miekg/pkcs11" + pkcs11uri "github.com/stefanberger/go-pkcs11uri" +) + +func main() { + if len(os.Args) < 2 { + panic("Missing pkcs11 URI argument") + } + uristr := os.Args[1] + + uri, err := pkcs11uri.New() + if err != nil { + panic(err) + } + err = uri.Parse(uristr) + if err != nil { + panic(err) + } + + module, err := uri.GetModule() + if err != nil { + panic(err) + } + + slot, ok := uri.GetPathAttribute("slot-id", false) + if !ok { + panic("No slot-id in pkcs11 URI") + } + slotid, err := strconv.Atoi(slot) + if err != nil { + panic(err) + } + + pin, err := uri.GetPIN() + if err != nil { + panic(err) + } + + p := pkcs11.New(module) + err = p.Initialize() + if err != nil { + panic(err) + } + + defer p.Destroy() + defer p.Finalize() + + session, err := p.OpenSession(uint(slotid), pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + panic(err) + } + defer p.CloseSession(session) + + err = p.Login(session, pkcs11.CKU_USER, pin) + if err != nil { + panic(err) + } + defer p.Logout(session) + + p.DigestInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_SHA_1, nil)}) + hash, err := p.Digest(session, []byte("this is a string")) + if err != nil { + panic(err) + } + + for _, d := range hash { + fmt.Printf("%x", d) + } + fmt.Println() +} +``` + +## Exampe Usage + +``` +$ sudo softhsm2-util --init-token --slot 1 --label test --pin 1234 --so-pin 1234 +The token has been initialized and is reassigned to slot 2053753261 +$ go build ./... +$ sudo ./pkcs11-example 'pkcs11:slot-id=2053753261?module-path=/usr/lib64/pkcs11/libsofthsm2.so&pin-value=1234' +517592df8fec3ad146a79a9af153db2a4d784ec5 +``` + diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go b/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go new file mode 100644 index 000000000..39b06548e --- /dev/null +++ b/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go @@ -0,0 +1,453 @@ +/* + (c) Copyright IBM Corporation, 2020 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11uri + +import ( + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +// Pkcs11URI holds a pkcs11 URI object +type Pkcs11URI struct { + // path and query attributes may have custom attributes that either + // have to be in the query or in the path part, so we use two maps + pathAttributes map[string]string + queryAttributes map[string]string + // directories to search for pkcs11 modules + moduleDirectories []string + // file paths of allowed pkcs11 modules + allowedModulePaths []string + // whether any module is allowed to be loaded + allowAnyModule bool + // A map of environment variables needed by the pkcs11 module using this URI. + // This map is not needed by this implementation but is there for convenience. + env map[string]string +} + +// upper character hex digits needed for pct-encoding +const hex = "0123456789ABCDEF" + +// escapeAll pct-escapes all characters in the string +func escapeAll(s string) string { + res := make([]byte, len(s)*3) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + res[j] = '%' + res[j+1] = hex[c>>4] + res[j+2] = hex[c&0xf] + j += 3 + } + return string(res) +} + +// escape pct-escapes the path and query part of the pkcs11 URI following the different rules of the +// path and query part as decribed in RFC 7512 sec. 2.3 +func escape(s string, isPath bool) string { + res := make([]byte, len(s)*3) + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + // unreserved per RFC 3986 sec. 2.3 + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') { + res[j] = c + } else if isPath && c == '&' { + res[j] = c + } else if !isPath && (c == '/' || c == '?' || c == '|') { + res[j] = c + } else { + switch c { + case '-', '.', '_', '~': // unreserved per RFC 3986 sec. 2.3 + res[j] = c + case ':', '[', ']', '@', '!', '$', '\'', '(', ')', '*', '+', ',', '=': + res[j] = c + default: + res[j] = '%' + res[j+1] = hex[c>>4] + res[j+2] = hex[c&0xf] + j += 2 + } + } + j++ + } + return string(res[:j]) +} + +// New creates a new Pkcs11URI object +func New() *Pkcs11URI { + return &Pkcs11URI{ + pathAttributes: make(map[string]string), + queryAttributes: make(map[string]string), + env: make(map[string]string), + } +} + +func (uri *Pkcs11URI) setAttribute(attrMap map[string]string, name, value string) error { + v, err := url.PathUnescape(value) + if err != nil { + return err + } + attrMap[name] = v + return nil +} + +// GetPathAttribute returns the value of a path attribute in unescaped form or +// pct-encoded form +func (uri *Pkcs11URI) GetPathAttribute(name string, pctencode bool) (string, bool) { + v, ok := uri.pathAttributes[name] + if ok && pctencode { + v = escape(v, true) + } + return v, ok +} + +// SetPathAttribute sets the value for a path attribute; this function may return an error +// if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) SetPathAttribute(name, value string) error { + return uri.setAttribute(uri.pathAttributes, name, value) +} + +// AddPathAttribute adds a path attribute; it returns an error if an attribute with the same +// name already existed or if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) AddPathAttribute(name, value string) error { + if _, ok := uri.pathAttributes[name]; ok { + return errors.New("duplicate path attribute") + } + return uri.SetPathAttribute(name, value) +} + +// RemovePathAttribute removes a path attribute +func (uri *Pkcs11URI) RemovePathAttribute(name string) { + delete(uri.pathAttributes, name) +} + +// AddEnv adds an environment variable for the pkcs11 module +func (uri *Pkcs11URI) AddEnv(name, value string) { + uri.env[name] = value +} + +// SetEnvMap sets the environment variables for the pkcs11 module +func (uri *Pkcs11URI) SetEnvMap(env map[string]string) { + uri.env = env +} + +// GetEnvMap returns the map of environment variables +func (uri *Pkcs11URI) GetEnvMap() map[string]string { + return uri.env +} + +// GetQueryAttribute returns the value of a query attribute in unescaped or +// pct-encoded form +func (uri *Pkcs11URI) GetQueryAttribute(name string, pctencode bool) (string, bool) { + v, ok := uri.queryAttributes[name] + if ok && pctencode { + v = escape(v, false) + } + return v, ok +} + +// SetQueryAttribute sets the value for a query attribute; this function may return an error +// if the given value cannot pct-unescaped +func (uri *Pkcs11URI) SetQueryAttribute(name, value string) error { + return uri.setAttribute(uri.queryAttributes, name, value) +} + +// AddQueryAttribute adds a query attribute; it returns an error if an attribute with the same +// name already existed or if the given value cannot be pct-unescaped +func (uri *Pkcs11URI) AddQueryAttribute(name, value string) error { + if _, ok := uri.queryAttributes[name]; ok { + return errors.New("duplicate query attribute") + } + return uri.SetQueryAttribute(name, value) +} + +// RemoveQueryAttribute removes a path attribute +func (uri *Pkcs11URI) RemoveQueryAttribute(name string) { + delete(uri.queryAttributes, name) +} + +// Validate validates a Pkcs11URI object's attributes following RFC 7512 rules and proper formatting of +// their values +func (uri *Pkcs11URI) Validate() error { + /* RFC 7512: 2.3 */ + /* slot-id should be DIGIT, but we go for number */ + if v, ok := uri.pathAttributes["slot-id"]; ok { + if _, err := strconv.Atoi(v); err != nil { + return fmt.Errorf("slot-id must be a number: %s", v) + } + } + + /* library-version should 1*DIGIT [ "." 1 *DIGIT ]; allow NUMBERS for DIGIT */ + if v, ok := uri.pathAttributes["library-version"]; ok { + m, err := regexp.Match("^[0-9]+(\\.[0-9]+)?$", []byte(v)) + if err != nil || !m { + return fmt.Errorf("Invalid format for library-version '%s'", v) + } + } + + if v, ok := uri.pathAttributes["type"]; ok { + m, err := regexp.Match("^(public|private|cert|secret-key}data)?$", []byte(v)) + if err != nil || !m { + return fmt.Errorf("Invalid type '%s'", v) + } + } + + /* RFC 7512: 2.4 */ + _, ok1 := uri.queryAttributes["pin-source"] + _, ok2 := uri.queryAttributes["pin-value"] + if ok1 && ok2 { + return errors.New("URI must not contain pin-source and pin-value") + } + + if v, ok := uri.queryAttributes["module-path"]; ok { + if !filepath.IsAbs(v) { + return fmt.Errorf("path %s of module-name attribute must be absolute", v) + } + } + + return nil +} + +// HasPIN allows the user to check whether a PIN has been provided either by the pin-value or the pin-source +// attributes. It should be called before GetPIN(), which may still fail getting the PIN from a file for example. +func (uri *Pkcs11URI) HasPIN() bool { + _, ok := uri.queryAttributes["pin-value"] + if ok { + return true + } + _, ok = uri.queryAttributes["pin-source"] + return ok +} + +// GetPIN gets the PIN from either the pin-value or pin-source attribute; a user may want to call HasPIN() +// before calling this function to determine whether a PIN has been provided at all so that an error code +// returned by this function indicates that the PIN value could not be retrieved. +func (uri *Pkcs11URI) GetPIN() (string, error) { + if v, ok := uri.queryAttributes["pin-value"]; ok { + return v, nil + } + if v, ok := uri.queryAttributes["pin-source"]; ok { + pinuri, err := url.ParseRequestURI(v) + if err != nil { + return "", fmt.Errorf("Could not parse pin-source: %s ", err) + } + switch pinuri.Scheme { + case "", "file": + if !filepath.IsAbs(pinuri.Path) { + return "", fmt.Errorf("PIN URI path '%s' is not absolute", pinuri.Path) + } + pin, err := ioutil.ReadFile(pinuri.Path) + if err != nil { + return "", fmt.Errorf("Could not open PIN file: %s", err) + } + return string(pin), nil + default: + return "", fmt.Errorf("PIN URI scheme %s is not supported", pinuri.Scheme) + } + } + return "", fmt.Errorf("Neither pin-source nor pin-value are available") +} + +// Parse parses a pkcs11: URI string +func (uri *Pkcs11URI) Parse(uristring string) error { + if !strings.HasPrefix(uristring, "pkcs11:") { + return errors.New("Malformed pkcs11 URI: missing pcks11: prefix") + } + + parts := strings.SplitN(uristring[7:], "?", 2) + + uri.pathAttributes = make(map[string]string) + uri.queryAttributes = make(map[string]string) + + if len(parts[0]) > 0 { + /* parse path part */ + for _, part := range strings.Split(parts[0], ";") { + p := strings.SplitN(part, "=", 2) + if len(p) != 2 { + return errors.New("Malformed pkcs11 URI: malformed path attribute") + } + if err := uri.AddPathAttribute(p[0], p[1]); err != nil { + return fmt.Errorf("Malformed pkcs11 URI: %s", err) + } + } + } + + if len(parts) == 2 { + /* parse query part */ + for _, part := range strings.Split(parts[1], "&") { + p := strings.SplitN(part, "=", 2) + if len(p) != 2 { + return errors.New("Malformed pkcs11 URI: malformed query attribute") + } + if err := uri.AddQueryAttribute(p[0], p[1]); err != nil { + return fmt.Errorf("Malformed pkcs11 URI: %s", err) + } + } + } + return uri.Validate() +} + +// formatAttribute formats attributes and escapes their values as needed +func formatAttributes(attrMap map[string]string, ispath bool) string { + res := "" + for key, value := range attrMap { + switch key { + case "id": + /* id is always pct-encoded */ + value = escapeAll(value) + default: + if ispath { + value = escape(value, true) + } else { + value = escape(value, false) + } + } + if len(res) > 0 { + if ispath { + res += ";" + } else { + res += "&" + } + } + res += key + "=" + value + } + return res +} + +// Format formats a Pkcs11URI to it string representaion +func (uri *Pkcs11URI) Format() (string, error) { + if err := uri.Validate(); err != nil { + return "", err + } + result := "pkcs11:" + formatAttributes(uri.pathAttributes, true) + if len(uri.queryAttributes) > 0 { + result += "?" + formatAttributes(uri.queryAttributes, false) + } + return result, nil +} + +// SetModuleDirectories sets the search directories for pkcs11 modules +func (uri *Pkcs11URI) SetModuleDirectories(moduleDirectories []string) { + uri.moduleDirectories = moduleDirectories +} + +// GetModuleDirectories gets the search directories for pkcs11 modules +func (uri *Pkcs11URI) GetModuleDirectories() []string { + return uri.moduleDirectories +} + +// SetAllowedModulePaths sets allowed module paths to restrict access to modules. +// Directory entries must end with a '/', all other ones are assumed to be file entries. +// Allowed modules are filtered by string matching. +func (uri *Pkcs11URI) SetAllowedModulePaths(allowedModulePaths []string) { + uri.allowedModulePaths = allowedModulePaths +} + +// SetAllowAnyModule allows any module to be loaded; by default this is not allowed +func (uri *Pkcs11URI) SetAllowAnyModule(allowAnyModule bool) { + uri.allowAnyModule = allowAnyModule +} + +func (uri *Pkcs11URI) isAllowedPath(path string, allowedPaths []string) bool { + if uri.allowAnyModule { + return true + } + for _, allowedPath := range allowedPaths { + if allowedPath == path { + // exact filename match + return true + } + if allowedPath[len(allowedPath)-1] == '/' && strings.HasPrefix(path, allowedPath) { + // allowedPath no subdirectory is allowed + idx := strings.IndexRune(path[len(allowedPath):], os.PathSeparator) + if idx < 0 { + return true + } + } + } + return false +} + +// GetModule returns the module to use or an error in case no module could be found. +// First the module-path is checked for whether it holds an absolute that can be read +// by the current user. If this is the case the module is returned. Otherwise either the module-path +// is used or the user-provided module path is used to match a module containing what is set in the +// attribute module-name. +func (uri *Pkcs11URI) GetModule() (string, error) { + var searchdirs []string + v, ok := uri.queryAttributes["module-path"] + + if ok { + info, err := os.Stat(v) + if err != nil { + return "", fmt.Errorf("module-path '%s' is not accessible", v) + } + if err == nil && info.Mode().IsRegular() { + // it's a file + if uri.isAllowedPath(v, uri.allowedModulePaths) { + return v, nil + } + return "", fmt.Errorf("module-path '%s' is not allowed by policy", v) + } + if !info.IsDir() { + return "", fmt.Errorf("module-path '%s' points to an invalid file type", v) + } + // v is a directory + searchdirs = []string{v} + } else { + searchdirs = uri.GetModuleDirectories() + } + + moduleName, ok := uri.queryAttributes["module-name"] + if !ok { + return "", fmt.Errorf("module-name attribute is not set") + } + moduleName = strings.ToLower(moduleName) + + for _, dir := range searchdirs { + files, err := ioutil.ReadDir(dir) + if err != nil { + continue + } + for _, file := range files { + fileLower := strings.ToLower(file.Name()) + + i := strings.Index(fileLower, moduleName) + if i < 0 { + continue + } + // we require that the fileLower ends with moduleName or that + // a suffix follows so that softhsm will not match libsofthsm2.so but only + // libsofthsm.so + if len(fileLower) == i+len(moduleName) || fileLower[i+len(moduleName)] == '.' { + f := filepath.Join(dir, file.Name()) + if uri.isAllowedPath(f, uri.allowedModulePaths) { + return f, nil + } + return "", fmt.Errorf("module '%s' is not allowed by policy", f) + } + } + } + return "", fmt.Errorf("No module could be found") +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..dc5225b6d --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` +<html> + <head> + <title>events</title> + </head> + <style type="text/css"> + body { + font-family: sans-serif; + } + table#req-status td.family { + padding-right: 2em; + } + table#req-status td.active { + padding-right: 1em; + } + table#req-status td.empty { + color: #aaa; + } + table#reqs { + margin-top: 1em; + } + table#reqs tr.first { + {{if $.Expanded}}font-weight: bold;{{end}} + } + table#reqs td { + font-family: monospace; + } + table#reqs td.when { + text-align: right; + white-space: nowrap; + } + table#reqs td.elapsed { + padding: 0 0.5em; + text-align: right; + white-space: pre; + width: 10em; + } + address { + font-size: smaller; + margin-top: 5em; + } + </style> + <body> + +<h1>/debug/events</h1> + +<table id="req-status"> + {{range $i, $fam := .Families}} + <tr> + <td class="family">{{$fam}}</td> + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}"> + {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}</a>{{end}} + </td> + {{end}} + + </tr>{{end}} +</table> + +{{if $.EventLogs}} +<hr /> +<h3>Family: {{$.Family}}</h3> + +{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}} +[Summary]{{if $.Expanded}}</a>{{end}} + +{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}} +[Expanded]{{if not $.Expanded}}</a>{{end}} + +<table id="reqs"> + <tr><th>When</th><th>Elapsed</th></tr> + {{range $el := $.EventLogs}} + <tr class="first"> + <td class="when">{{$el.When}}</td> + <td class="elapsed">{{$el.ElapsedTime}}</td> + <td>{{$el.Title}} + </tr> + {{if $.Expanded}} + <tr> + <td class="when"></td> + <td class="elapsed"></td> + <td><pre>{{$el.Stack|trimSpace}}</pre></td> + </tr> + {{range $el.Events}} + <tr> + <td class="when">{{.WhenString}}</td> + <td class="elapsed">{{elapsed .Elapsed}}</td> + <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td> + </tr> + {{end}} + {{end}} + {{end}} +</table> +{{end}} + </body> +</html> +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` +<table> +<tr> + <td style="padding:0.25em">Count: {{.Count}}</td> + <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td> + <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td> + <td style="padding:0.25em">Median: {{.Median}}</td> +</tr> +</table> +<hr> +<table> +{{range $b := .Buckets}} +{{if $b}} + <tr> + <td style="padding:0 0 0 0.25em">[</td> + <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td> + <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td> + <td style="text-align:right;padding:0 0.25em">{{.N}}</td> + <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td> + <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td> + <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td> + </tr> +{{end}} +{{end}} +</table> +`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3ebf6f2da --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} +<html> + <head> + <title>/debug/requests</title> + <style type="text/css"> + body { + font-family: sans-serif; + } + table#tr-status td.family { + padding-right: 2em; + } + table#tr-status td.active { + padding-right: 1em; + } + table#tr-status td.latency-first { + padding-left: 1em; + } + table#tr-status td.empty { + color: #aaa; + } + table#reqs { + margin-top: 1em; + } + table#reqs tr.first { + {{if $.Expanded}}font-weight: bold;{{end}} + } + table#reqs td { + font-family: monospace; + } + table#reqs td.when { + text-align: right; + white-space: nowrap; + } + table#reqs td.elapsed { + padding: 0 0.5em; + text-align: right; + white-space: pre; + width: 10em; + } + address { + font-size: smaller; + margin-top: 5em; + } + </style> + </head> + <body> + +<h1>/debug/requests</h1> +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} +<table id="tr-status"> + {{range $fam := .Families}} + <tr> + <td class="family">{{$fam}}</td> + + {{$n := index $.ActiveTraceCount $fam}} + <td class="active {{if not $n}}empty{{end}}"> + {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}} + [{{$n}} active] + {{if $n}}</a>{{end}} + </td> + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + <td {{if $empty}}class="empty"{{end}}> + {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}} + [{{.Cond}}] + {{if not $empty}}</a>{{end}} + </td> + {{end}} + + {{$nb := len $f.Buckets}} + <td class="latency-first"> + <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a> + </td> + <td> + <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a> + </td> + <td> + <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a> + </td> + + </tr> + {{end}} +</table> +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +<hr /> +<h3>Family: {{$.Family}}</h3> + +{{if or $.Expanded $.Traced}} + <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a> +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a> +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a> + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a> + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p> +{{end}} + +<table id="reqs"> + <caption> + {{if $.Active}}Active{{else}}Completed{{end}} Requests + </caption> + <tr><th>When</th><th>Elapsed (s)</th></tr> + {{range $tr := $.Traces}} + <tr class="first"> + <td class="when">{{$tr.When}}</td> + <td class="elapsed">{{$tr.ElapsedTime}}</td> + <td>{{$tr.Title}}</td> + {{/* TODO: include traceID/spanID */}} + </tr> + {{if $.Expanded}} + {{range $tr.Events}} + <tr> + <td class="when">{{.WhenString}}</td> + <td class="elapsed">{{elapsed .Elapsed}}</td> + <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td> + </tr> + {{end}} + {{end}} + {{end}} +</table> +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +<h4>Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}</h4> +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + </body> +</html> +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 000000000..3e495fa23 --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,42 @@ +language: go + +matrix: + include: + - go: 1.14.x + env: VET=1 GO111MODULE=on + - go: 1.14.x + env: RACE=1 GO111MODULE=on + - go: 1.14.x + env: RUN386=1 + - go: 1.14.x + env: GRPC_GO_RETRY=on + - go: 1.14.x + env: TESTEXTRAS=1 + - go: 1.13.x + env: GO111MODULE=on + - go: 1.12.x + env: GO111MODULE=on + - go: 1.11.x # Keep until interop tests no longer require Go1.11 + env: GO111MODULE=on + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi + - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi + +install: + - try3() { eval "$*" || eval "$*" || eval "$*"; } + - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' + - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi + +script: + - set -e + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${VET}" ]]; then ./vet.sh; fi + - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi + - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi + - make test diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..cd03f8c76 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..093c82b3a --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..3f661a787 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,64 @@ +all: vet test testrace + +build: deps + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testappengine: testappenginedeps + goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testappenginedeps: + goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testappengine \ + testappenginedeps \ + testdeps \ + testrace \ + updatedeps \ + updatetestdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..3949a683f --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" +``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ```sh + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module <your module name> + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..3220d87be --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..542594f5c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..0787d0b50 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..8bf359dbf --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,374 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..32d782f1c --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,237 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..e31d76e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..a24264a34 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..43c2a1537 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..11e592aab --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,246 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break + } + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) + ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): + } + + if ccb.done.HasFired() { + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.scBuffer.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + return ccb.balancer.UpdateClientConnState(*ccs) +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..da0472967 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1187 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.3.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // /<service>/<method> + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // <host> or <host>:<port> . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *duration.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamp.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*duration.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..9e20e4d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..cbd671a85 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1571 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtInfo, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.Info(logger, cc.channelzID, "Channel Created") + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + } + if cc.dopts.withProxy { + cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) + } + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + unixScheme := strings.HasPrefix(cc.target, "unix:") + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) + } + } + + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else if unixScheme { + cc.authority = "localhost" + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + if m, ok := cc.sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := cc.sc.Methods[method[:i+1]]; ok { + return m + } + return cc.sc.Methods[""] +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, firstConnErr +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(time.Until(connectDeadline)): + // We didn't get the preface in time. + newTr.Close() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..4cdc6ba7c --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839d..11b106182 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,6 +33,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +43,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +61,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +84,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +116,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +126,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +145,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +174,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..010156261 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..02766443a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,289 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // Invalid indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + Invalid SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo + } + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == Invalid { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } + internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go new file mode 100644 index 000000000..ccbf35b33 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go12.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..8ee7124f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/url" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..a93fcab8f --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,638 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder + withProxy bool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.withProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + withProxy: true, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..0022859ad --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..6d84f74c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..66b97a6f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod new file mode 100644 index 000000000..d97276556 --- /dev/null +++ b/vendor/google.golang.org/grpc/go.mod @@ -0,0 +1,17 @@ +module google.golang.org/grpc + +go 1.11 + +require ( + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f + github.com/envoyproxy/go-control-plane v0.9.4 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/protobuf v1.4.1 + github.com/google/go-cmp v0.5.0 + github.com/google/uuid v1.1.2 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 + google.golang.org/protobuf v1.25.0 +) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum new file mode 100644 index 000000000..356a82e3d --- /dev/null +++ b/vendor/google.golang.org/grpc/go.sum @@ -0,0 +1,84 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..8358dd6e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..c8bb2be34 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..ef06a4822 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..4ee33171e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,221 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh new file mode 100644 index 000000000..15ff9facd --- /dev/null +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +TMP=$(mktemp -d /tmp/sdk.XXX) \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +&& unzip -q $TMP.zip -d $TMP \ +&& export PATH="$PATH:$TMP/go_appengine"
\ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..d1a609e48 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..5fc0ee3da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..3a905d966 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..5cc3aeddb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +var grpclogLogger = grpclog.Component("binarylog") + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..d8f4e7602 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..0cdb41831 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,422 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..7d7a3056b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + fs.mu.Unlock() + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..9f6a0c120 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..f73141393 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,737 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d.Desc) + case CtWarning: + l.WarningDepth(depth+1, d.Desc) + case CtError: + l.ErrorDepth(depth+1, d.Desc) + } + } + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..b0013f9c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, args...) + } +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, msg) + } +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, args...) + } +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, msg) + } +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + l.ErrorDepth(1, args...) + } +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + l.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..3c595d154 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,701 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..692dd6181 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..19c2fc521 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,42 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 000000000..fdf409d55 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 000000000..8864a0811 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..be70b6cdf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,77 @@ +// +build !appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go new file mode 100644 index 000000000..af6f57719 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go @@ -0,0 +1,31 @@ +// +build appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" + "net/url" +) + +// SPIFFEIDFromState is a no-op for appengine builds. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 000000000..f499a614c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,60 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go new file mode 100644 index 000000000..a6144cd66 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..55664fa46 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..73931a94b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..e6f975cbf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..82af70e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..200b115ca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..4e7475060 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..80b33cdaf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..716d92800 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,81 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // NewClientHandshakeInfoContext returns a copy of the input context with + // the passed in ClientHandshakeInfo struct added to it. + NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..304235566 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,446 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 000000000..8783a8cf8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..520d9229e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..af3e2b5f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 681260692..710223b8d 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return (*Error)(s.Proto()) + return &Error{e: s.Proto()} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,26 +136,27 @@ func (s *Status) Details() []interface{} { return details } -// Error is an alias of a status proto. It implements error and Status, -// and a nil Error should never be returned by this package. -type Error spb.Status +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} -func (se *Error) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) } // GRPCStatus returns the Status represented by se. -func (se *Error) GRPCStatus() *Status { - return FromProto((*spb.Status)(se)) +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) } // Is implements future error.Is functionality. // A Error is equivalent if the code and message are identical. -func (se *Error) Is(target error) bool { +func (e *Error) Is(target error) bool { tse, ok := target.(*Error) if !ok { return false } - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) + return proto.Equal(e.e, tse.e) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..2fdcb76e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,116 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..adae60d65 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,76 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..40ef23923 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,942 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..9fa306b2e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..f262edd8e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..05d3871e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,463 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..e73b77a15 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1491 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) + connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if h2code, err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..0cf1cc320 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1268 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + done := make(chan struct{}) + t := &http2Server{ + ctx: context.Background(), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if h2code, err := state.decodeHeader(frame); err != nil { + if _, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: h2code, + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + s.cancel() + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.drain(http2.ErrCodeNo, []byte{}) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..4d15afbf7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,600 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } + logger = grpclog.Component("transport") +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return http2.ErrCodeProtocol, d.data.grpcErr + } + if d.serverSide { + return http2.ErrCodeNo, nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return http2.ErrCodeNo, nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return http2.ErrCodeProtocol, d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + if logger.V(logLevel) { + logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + } + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..b74030a96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,804 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("<stream: %p, %v>", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..34d31b5e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..cf6d1b947 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..e01d219ff --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..a58174b6f --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{})} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + return nil, nil, err + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..56e33f6c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,136 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) + } + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + } + if b.sc != sc { + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: s.ConnectionError}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..0a1e975ad --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 000000000..f8f69bfb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..3443ad975 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,139 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +# Pull in the following repos to build the MeshCA config proto. +ENVOY_API_REPOS=( + "https://github.com/envoyproxy/data-plane-api" + "https://github.com/cncf/udpa" + "https://github.com/envoyproxy/protoc-gen-validate" +) +for repo in ${ENVOY_API_REPOS[@]}; do + dirname=$(basename ${repo}) + mkdir -p ${WORKDIR}/${dirname} + echo "git clone ${repo}" + git clone --quiet ${repo} ${WORKDIR}/${dirname} +done + +# Pull in the MeshCA service proto. +mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 +echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" +curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto + ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3 + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/data-plane-api \ + -I${WORKDIR}/udpa \ + -I${WORKDIR}/protoc-gen-validate \ + -I${WORKDIR}/istio \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/data-plane-api \ + -I${WORKDIR}/udpa \ + -I${WORKDIR}/protoc-gen-validate \ + -I${WORKDIR}/istio \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ +mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..e9fa8e33d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. + GRPCLB +) + +// Address represents a server the client connects to. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..f2d81968f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,222 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + ccr.poll(balancer.ErrBadResolverState) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtInfo, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..f0609f2a4 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,938 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..968eb598e --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1780 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData + + serverWorkerChannels []chan *serverWorkerData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of <package>.<service>. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + + var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..5e434ca7f --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancerName will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfigForTesting = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..73a2f9266 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..63e476ee7 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,312 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..5fd856a38 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1551 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + + ctx := cs.ctx + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return nil + } + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..caea1ebed --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..07a2d26b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..144b0bf9e --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.33.2" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 000000000..48652f604 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,220 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" + +if [[ "$1" = "-install" ]]; then + # Check for module support + if go help mod >& /dev/null; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + else + # Ye olde `go get` incantation. + # Note: this gets the latest version of all tools (vs. the pinned versions + # with Go modules). + go get -u \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + fi + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +# - Check imports that are illegal in appengine (until Go 1.11). +# TODO: Remove when we drop Go 1.10 support +go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go + +# - gofmt, goimports, golint (with exceptions for generated code), go vet. +gofmt -s -d -l . 2>&1 | fail_on_output +goimports -l . 2>&1 | not grep -vE "\.pb\.go" +golint ./... 2>&1 | not grep -vE "\.pb\.go:" +go vet -all ./... + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Check that our modules are tidy. +if go help mod >& /dev/null; then + find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +balancer.ErrTransientFailure +balancer.Picker +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +resolver.Backend +resolver.GRPCLB +extDesc.Filename is deprecated +BuildVersion is deprecated +github.com/golang/protobuf/jsonpb is deprecated +proto is deprecated +xxx_messageInfo_ +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index e8b5edf8c..9d950f0dd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,7 +70,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr github.com/containernetworking/plugins/pkg/utils/sysctl github.com/containernetworking/plugins/plugins/ipam/host-local/backend github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator -# github.com/containers/buildah v1.19.3 +# github.com/containers/buildah v1.19.4 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -102,6 +102,8 @@ github.com/containers/common/pkg/report github.com/containers/common/pkg/report/camelcase github.com/containers/common/pkg/retry github.com/containers/common/pkg/seccomp +github.com/containers/common/pkg/secrets +github.com/containers/common/pkg/secrets/filedriver github.com/containers/common/pkg/subscriptions github.com/containers/common/pkg/sysinfo github.com/containers/common/pkg/umask @@ -155,16 +157,23 @@ github.com/containers/image/v5/types github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b github.com/containers/libtrust -# github.com/containers/ocicrypt v1.0.3 +# github.com/containers/ocicrypt v1.1.0 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher github.com/containers/ocicrypt/config +github.com/containers/ocicrypt/config/keyprovider-config +github.com/containers/ocicrypt/config/pkcs11config +github.com/containers/ocicrypt/crypto/pkcs11 +github.com/containers/ocicrypt/helpers github.com/containers/ocicrypt/keywrap github.com/containers/ocicrypt/keywrap/jwe +github.com/containers/ocicrypt/keywrap/keyprovider github.com/containers/ocicrypt/keywrap/pgp +github.com/containers/ocicrypt/keywrap/pkcs11 github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils +github.com/containers/ocicrypt/utils/keyprovider # github.com/containers/psgo v1.5.2 github.com/containers/psgo github.com/containers/psgo/internal/capabilities @@ -374,6 +383,8 @@ github.com/mattn/go-runewidth github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/miekg/pkcs11 v1.0.3 +github.com/miekg/pkcs11 # github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/mistifyio/go-zfs # github.com/moby/sys/mount v0.1.1 @@ -527,6 +538,8 @@ github.com/sirupsen/logrus/hooks/syslog github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag +# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 +github.com/stefanberger/go-pkcs11uri # github.com/stretchr/testify v1.7.0 github.com/stretchr/testify/assert github.com/stretchr/testify/require @@ -626,7 +639,9 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/socks +golang.org/x/net/internal/timeseries golang.org/x/net/proxy +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -676,10 +691,47 @@ google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.29.1 +# google.golang.org/grpc v1.33.2 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/serviceconfig google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.25.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire |