From e266ab047e046c7e64e52d4151e5f4a350874ce4 Mon Sep 17 00:00:00 2001 From: Jesse Peterson Date: Wed, 31 May 2023 10:51:46 -0700 Subject: [PATCH] initial import --- .github/dependabot.yml | 13 + .github/workflows/go.yml | 33 ++ .gitignore | 1 + Dockerfile | 7 + LICENSE | 7 + Makefile | 42 ++ README.md | 118 +++++ cmd/nanocmd/api.go | 92 ++++ cmd/nanocmd/main.go | 182 +++++++ cmd/nanocmd/storage.go | 70 +++ cmd/nanocmd/workflows.go | 54 ++ docs/openapi.yaml | 439 +++++++++++++++ docs/operations-guide.md | 336 ++++++++++++ docs/quickstart.md | 236 +++++++++ engine/convert.go | 187 +++++++ engine/convert_test.go | 126 +++++ engine/engine.go | 499 ++++++++++++++++++ engine/http/engine.go | 71 +++ engine/http/event.go | 112 ++++ engine/reg.go | 101 ++++ engine/storage/diskv/diskv.go | 38 ++ engine/storage/diskv/diskv_test.go | 14 + engine/storage/inmem/inmem.go | 22 + engine/storage/inmem/inmem_test.go | 12 + engine/storage/kv/event.go | 149 ++++++ engine/storage/kv/kv.go | 262 +++++++++ engine/storage/kv/prim.go | 346 ++++++++++++ engine/storage/kv/worker.go | 108 ++++ engine/storage/kv/worker_prim.go | 225 ++++++++ engine/storage/storage.go | 231 ++++++++ engine/storage/test/test.go | 446 ++++++++++++++++ engine/storage/test/worker.go | 333 ++++++++++++ engine/testdata/devinfo.plist | 27 + engine/testdata/secinfo.gen.plist | 3 + engine/worker.go | 209 ++++++++ go.mod | 16 + go.sum | 15 + http/api/api.go | 19 + http/http.go | 91 ++++ log/ctxlog/ctxlog.go | 72 +++ log/logger.go | 17 + log/logkeys/logkeys.go | 25 + log/nop.go | 21 + log/stdlogfmt/stdlog.go | 116 ++++ mdm/foss/dump.go | 29 + mdm/foss/foss.go | 257 +++++++++ mdm/foss/process.go | 79 +++ mdm/foss/testdata/tokenupdate.json | 11 + mdm/foss/webhook.go | 95 ++++ mdm/foss/webhook_test.go | 103 ++++ mdm/mdm.go | 84 +++ subsystem/cmdplan/http/http.go | 81 +++ subsystem/cmdplan/storage/diskv/diskv.go | 27 + subsystem/cmdplan/storage/diskv/diskv_test.go | 14 + subsystem/cmdplan/storage/inmem/inmem.go | 16 + subsystem/cmdplan/storage/inmem/inmem_test.go | 12 + subsystem/cmdplan/storage/kv/kv.go | 51 ++ subsystem/cmdplan/storage/storage.go | 22 + subsystem/cmdplan/storage/test/test.go | 43 ++ subsystem/filevault/http/http.go | 16 + subsystem/filevault/storage/diskv/diskv.go | 31 ++ subsystem/filevault/storage/inmem/inmem.go | 20 + subsystem/filevault/storage/invprk/invprk.go | 50 ++ .../filevault/storage/invprk/invprk_test.go | 26 + subsystem/filevault/storage/kv/kv.go | 118 +++++ subsystem/filevault/storage/storage.go | 39 ++ subsystem/inventory/http/http.go | 59 +++ subsystem/inventory/storage/diskv/diskv.go | 87 +++ .../inventory/storage/diskv/diskv_test.go | 14 + subsystem/inventory/storage/inmem/inmem.go | 61 +++ .../inventory/storage/inmem/inmem_test.go | 12 + subsystem/inventory/storage/keys.go | 21 + subsystem/inventory/storage/storage.go | 25 + subsystem/inventory/storage/test/test.go | 62 +++ subsystem/profile/http/http.go | 142 +++++ subsystem/profile/storage/diskv/diskv.go | 107 ++++ subsystem/profile/storage/diskv/diskv_test.go | 14 + subsystem/profile/storage/inmem/inmem.go | 84 +++ subsystem/profile/storage/inmem/inmem_test.go | 12 + subsystem/profile/storage/storage.go | 55 ++ subsystem/profile/storage/storage_test.go | 24 + subsystem/profile/storage/test/test.go | 86 +++ utils/cryptoutil/cert.go | 39 ++ utils/kv/kv.go | 45 ++ utils/kv/kvdiskv/kvdiskv.go | 37 ++ utils/kv/kvmap/kvmap.go | 70 +++ utils/mobileconfig/mobileconfig.go | 72 +++ utils/mobileconfig/mobileconfig_test.go | 30 ++ utils/mobileconfig/testdata/test.mobileconfig | 53 ++ utils/uuid/uuid.go | 41 ++ utils/uuid/uuid_test.go | 21 + workflow/cmdplan/workflow.go | 222 ++++++++ workflow/cmdplan/workflow_test.go | 24 + workflow/config.go | 47 ++ workflow/context.go | 100 ++++ workflow/context_test.go | 24 + workflow/doc.go | 79 +++ workflow/event.go | 75 +++ workflow/fvenable/profile.go | 66 +++ workflow/fvenable/workflow.go | 260 +++++++++ workflow/fvrotate/workflow.go | 126 +++++ workflow/inventory/testdata/devinfo.plist | 21 + workflow/inventory/testdata/secinfo.plist | 70 +++ workflow/inventory/workflow.go | 154 ++++++ workflow/inventory/workflow_test.go | 141 +++++ workflow/profile/context.go | 26 + workflow/profile/workflow.go | 279 ++++++++++ workflow/step.go | 84 +++ workflow/workflow.go | 46 ++ 109 files changed, 9884 insertions(+) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/go.yml create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/nanocmd/api.go create mode 100644 cmd/nanocmd/main.go create mode 100644 cmd/nanocmd/storage.go create mode 100644 cmd/nanocmd/workflows.go create mode 100644 docs/openapi.yaml create mode 100644 docs/operations-guide.md create mode 100644 docs/quickstart.md create mode 100644 engine/convert.go create mode 100644 engine/convert_test.go create mode 100644 engine/engine.go create mode 100644 engine/http/engine.go create mode 100644 engine/http/event.go create mode 100644 engine/reg.go create mode 100644 engine/storage/diskv/diskv.go create mode 100644 engine/storage/diskv/diskv_test.go create mode 100644 engine/storage/inmem/inmem.go create mode 100644 engine/storage/inmem/inmem_test.go create mode 100644 engine/storage/kv/event.go create mode 100644 engine/storage/kv/kv.go create mode 100644 engine/storage/kv/prim.go create mode 100644 engine/storage/kv/worker.go create mode 100644 engine/storage/kv/worker_prim.go create mode 100644 engine/storage/storage.go create mode 100644 engine/storage/test/test.go create mode 100644 engine/storage/test/worker.go create mode 100644 engine/testdata/devinfo.plist create mode 100644 engine/testdata/secinfo.gen.plist create mode 100644 engine/worker.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 http/api/api.go create mode 100644 http/http.go create mode 100644 log/ctxlog/ctxlog.go create mode 100644 log/logger.go create mode 100644 log/logkeys/logkeys.go create mode 100644 log/nop.go create mode 100644 log/stdlogfmt/stdlog.go create mode 100644 mdm/foss/dump.go create mode 100644 mdm/foss/foss.go create mode 100644 mdm/foss/process.go create mode 100644 mdm/foss/testdata/tokenupdate.json create mode 100644 mdm/foss/webhook.go create mode 100644 mdm/foss/webhook_test.go create mode 100644 mdm/mdm.go create mode 100644 subsystem/cmdplan/http/http.go create mode 100644 subsystem/cmdplan/storage/diskv/diskv.go create mode 100644 subsystem/cmdplan/storage/diskv/diskv_test.go create mode 100644 subsystem/cmdplan/storage/inmem/inmem.go create mode 100644 subsystem/cmdplan/storage/inmem/inmem_test.go create mode 100644 subsystem/cmdplan/storage/kv/kv.go create mode 100644 subsystem/cmdplan/storage/storage.go create mode 100644 subsystem/cmdplan/storage/test/test.go create mode 100644 subsystem/filevault/http/http.go create mode 100644 subsystem/filevault/storage/diskv/diskv.go create mode 100644 subsystem/filevault/storage/inmem/inmem.go create mode 100644 subsystem/filevault/storage/invprk/invprk.go create mode 100644 subsystem/filevault/storage/invprk/invprk_test.go create mode 100644 subsystem/filevault/storage/kv/kv.go create mode 100644 subsystem/filevault/storage/storage.go create mode 100644 subsystem/inventory/http/http.go create mode 100644 subsystem/inventory/storage/diskv/diskv.go create mode 100644 subsystem/inventory/storage/diskv/diskv_test.go create mode 100644 subsystem/inventory/storage/inmem/inmem.go create mode 100644 subsystem/inventory/storage/inmem/inmem_test.go create mode 100644 subsystem/inventory/storage/keys.go create mode 100644 subsystem/inventory/storage/storage.go create mode 100644 subsystem/inventory/storage/test/test.go create mode 100644 subsystem/profile/http/http.go create mode 100644 subsystem/profile/storage/diskv/diskv.go create mode 100644 subsystem/profile/storage/diskv/diskv_test.go create mode 100644 subsystem/profile/storage/inmem/inmem.go create mode 100644 subsystem/profile/storage/inmem/inmem_test.go create mode 100644 subsystem/profile/storage/storage.go create mode 100644 subsystem/profile/storage/storage_test.go create mode 100644 subsystem/profile/storage/test/test.go create mode 100644 utils/cryptoutil/cert.go create mode 100644 utils/kv/kv.go create mode 100644 utils/kv/kvdiskv/kvdiskv.go create mode 100644 utils/kv/kvmap/kvmap.go create mode 100644 utils/mobileconfig/mobileconfig.go create mode 100644 utils/mobileconfig/mobileconfig_test.go create mode 100644 utils/mobileconfig/testdata/test.mobileconfig create mode 100644 utils/uuid/uuid.go create mode 100644 utils/uuid/uuid_test.go create mode 100644 workflow/cmdplan/workflow.go create mode 100644 workflow/cmdplan/workflow_test.go create mode 100644 workflow/config.go create mode 100644 workflow/context.go create mode 100644 workflow/context_test.go create mode 100644 workflow/doc.go create mode 100644 workflow/event.go create mode 100644 workflow/fvenable/profile.go create mode 100644 workflow/fvenable/workflow.go create mode 100644 workflow/fvrotate/workflow.go create mode 100644 workflow/inventory/testdata/devinfo.plist create mode 100644 workflow/inventory/testdata/secinfo.plist create mode 100644 workflow/inventory/workflow.go create mode 100644 workflow/inventory/workflow_test.go create mode 100644 workflow/profile/context.go create mode 100644 workflow/profile/workflow.go create mode 100644 workflow/step.go create mode 100644 workflow/workflow.go diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..cc3185a --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" # Don't change this despite the path being .github/workflows + schedule: + # Check for updates to GitHub Actions on the first day of the month + interval: "monthly" + + - package-ecosystem: "gomod" + directory: "/" + schedule: + # Check for updates to Go modules on the first day of the month + interval: "monthly" diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000..bf202c6 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,33 @@ +name: Go + +on: + push: + branches: [ main ] + pull_request: + types: [opened, reopened, synchronize] + +jobs: + build-test: + name: Build, test, and format + strategy: + matrix: + go-version: [1.19.x] + platform: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.platform }} + steps: + - uses: actions/checkout@v3 + + - name: setup go + uses: actions/setup-go@v4 + with: + go-version: ${{ matrix.go-version }} + + - name: Format + if: matrix.platform == 'ubuntu-latest' + run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi + + - name: Build + run: go build -v ./... + + - name: Test + run: go test -v -race ./... diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..19e38ae --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/nanocmd-* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..e6274fe --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +FROM gcr.io/distroless/static + +COPY nanocmd-linux-amd64 /nanocmd + +EXPOSE 9003 + +ENTRYPOINT ["/nanocmd"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..07e230a --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright 2023 Jesse Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..8b00fae --- /dev/null +++ b/Makefile @@ -0,0 +1,42 @@ +VERSION = $(shell git describe --tags --always --dirty) +LDFLAGS=-ldflags "-X main.version=$(VERSION)" +OSARCH=$(shell go env GOHOSTOS)-$(shell go env GOHOSTARCH) + +NANOCMD=\ + nanocmd-darwin-amd64 \ + nanocmd-darwin-arm64 \ + nanocmd-linux-amd64 + +my: nanocmd-$(OSARCH) + +docker: nanocmd-linux-amd64 + +$(NANOCMD): cmd/nanocmd + GOOS=$(word 2,$(subst -, ,$@)) GOARCH=$(word 3,$(subst -, ,$(subst .exe,,$@))) go build $(LDFLAGS) -o $@ ./$< + +nanocmd-%-$(VERSION).zip: nanocmd-%.exe + rm -rf $(subst .zip,,$@) + mkdir $(subst .zip,,$@) + ln $^ $(subst .zip,,$@) + zip -r $@ $(subst .zip,,$@) + rm -rf $(subst .zip,,$@) + +nanocmd-%-$(VERSION).zip: nanocmd-% + rm -rf $(subst .zip,,$@) + mkdir $(subst .zip,,$@) + ln $^ $(subst .zip,,$@) + zip -r $@ $(subst .zip,,$@) + rm -rf $(subst .zip,,$@) + +clean: + rm -rf nanocmd-* + +release: \ + nanocmd-darwin-amd64-$(VERSION).zip \ + nanocmd-darwin-arm64-$(VERSION).zip \ + nanocmd-linux-amd64-$(VERSION).zip + +test: + go test -v -cover -race ./... + +.PHONY: my docker $(NANOCMD) clean release test diff --git a/README.md b/README.md new file mode 100644 index 0000000..c4aecc9 --- /dev/null +++ b/README.md @@ -0,0 +1,118 @@ +# NanoCMD + +[![Go](https://github.com/micromdm/nanocmd/workflows/Go/badge.svg)](https://github.com/micromdm/nanocmd/actions) + +NanoCMD is a modular Go library and reference server for abstracting Apple MDM commands and responses into a concept of workflows. + +The NanoCMD project is comprised of three major components: + +* **Workflows**: domain-specific sequences of Apple MDM "v1" commands and responses. +* **Subsystems**: simple reference implementations of domain-specific MDM infrastructure. +* The **Workflow Engine**: intermediary and logisitcal coordinator between MDM servers and workflows. + +These are discussed more below. + +NanoCMD tries to be modular, componentized, and somewhat unopinionated. While it ships with a reference server that can be used itself in a turn-key fashion the project can also be imported as a library to build your own workflows, implement custom subsystems or adapters, etc. + +## Getting started & Documentation + +- [Quickstart](docs/quickstart.md) +A guide to getting NanoCMD up and running quickly. + +- [Operations Guide](docs/operations-guide.md) +A brief overview of operating the NanoCMD server including command line and API interfaces. + +## What NanoCMD is not + +NanoCMD is neat. But there are some things it does not do. There are also some things that are out of scope that it may never do. Here's some of the things NanoCMD is not: + +* *Not* a complete MDM server solution. As we like to say: it is not a product. 😉 +* *Not* an exhaustive inventory of all MDM commands, workflows, or subsystems. This project is primarily focused on the engine and workflow interface. +* We expect that Declarative Management will lessen the need for NanoCMD over time. See the note below about DDM. +* *Not* a "fleet management" tool or "orchestrator" (at least not yet). The NanoCMD server is currently ad-hoc/API and event driven. It does not have a concept of a "fleet" or automatically managing workflows _across_ a group of machines. At least not yet. + * You can trivally script this capability with your own list of devices with e.g. `curl` but it is not built-in. + * We may never get this ability and instead focus on Declarative Management. +* *Not* all encompasing (i.e. an MDM server product). NanoCMD is limited in scope. Before submitting a PR for a major feature please drop the maintainers a message to discuss first. + +## Overview + +Why does one need all this complexity related to MDM commands? In short: because some (sets of) MDM commands require it. Take a look at this [blog post from Kandji about MDM software updates](https://blog.kandji.io/managing-software-updates-mdm). Notice the description of the "flow" of these MDM commands and diagram of the back-and-forth of the commands. This is just the reality of some MDM command worklfows. + +NanoCMD tries to provide a mechanism with its workflow APIs to accomplish this sort of back-and-forth for MDM commands. Here's a sequence diagram to (hopefully) better illustrate. It outlines the general flow into and out of the workflow engine and workflows for a given workflow start: + +```mermaid +sequenceDiagram + autonumber + actor Start as Start (API/Event) + box NanoCMD + participant Engine + participant Workflow + end + actor Enrollment as Enrollment/Device + Start->>Engine: Start workflow for n ID(s) + Engine->>+Workflow: Start workflow for n ID(s) + loop Next Step or Polling + Workflow->>-Engine: Send Step (n Command(s)) + Engine->>+Enrollment: MDM command(s) from Step + Enrollment->>-Engine: MDM response(s) for Step + Engine->>+Workflow: Step Complete + Workflow-->>-Engine: (Optional) Send next Step (n Command(s)) + break Timeout + Engine-->>Workflow: Step Timeout + end + end +``` + +Not shown in this diagram is that the output of most workflows talk to the subsystems to store/persist data. This diagram is mostly illustrating the workflow sequence between the workflow, engine, and enrollment (device). + +### Workflows + +Workflows adhere to the set of interfaces that the workflow engine provides for coordinating MDM commands and responses. A workflow is the specific implementation using those interfaces. + +The intent with workflows is to take away ~~some~~a lot of the drudgery of dealing with sending and receiving MDM commands and processing responses. By doing this we can give more focus to the higher-level goals of what those MDM commands and responses are supposed to accomplish. To that end the workflows are provided with a number of features by the workflow engine. For example as a workflow many things are taken care of for you automatically: + +* Command responses are "routed" back to you after you send them +* Command responses are unmarshalled into the correct and specific MDM *structured* response type +* Multiple grouped commands (called *steps*) are recevied all at once when they complete. You don't need to track ordering or complete status. +* Straight-forward coordination of *sequences* of MDM commands — especially those that need to take different action depending on the outcome of previous commands. +* "Future" command (step) scheduling. This effectively allows for ad-hoc context-aware temporary command *polling* +* Consistent context interface that can be passed between steps. Storage/persistence of this context is also transparently handled by the engine for you. +* In addition to workflow steps you can optionally get notified of any *arbitrary* command type that was sent by the engine — by any workflow. +* Optionally get notified and take action on MDM *events* (such as enrollment or check-out) + +As well, the engine works to make sure workflows don't have to worry about things like: + +* `NotNow` responses: the engine keeps track of actual command completion and only hands over completed commands. +* Re-sending push notifications for outstanding steps (MDM commands) +* "Exclusivity" tracking: the engine, by default, prevents multiple workflows from running at a time for an enrollment. This prevents "stacking" of steps/commands being queued for a device that hasn't yet dealt with its previous set of commands. +* Steps timeouts: Workflow steps can configure a Timeout that the engine manages. The workflow will get notified when that timeout elapses without command responses. +* Marshaling and unmarshaling proper commands: the engine knows which command responses came from what type of command Request Types, so it'll properly hand over the correct already-unmarshalled command responses for you to work with. + +The workflow interface is relatively simple with much of the heavy lifting and logistics being taken care of by the engine. This frees the workflow developer to concentrate on accomplishing useful things with the MDM commands rather than worry about the above logistics. + +The [operations guide](docs/operations-guide.md) discusses the specific workflows included in NanoCMD. + +### Subsystems + +Subsystems are "reference" implementations of MDM "infrastructure." The included subsystems are domain-specific, limited in scope, and generally simple. For example one subsystem is for Configuration Profiles. The subsystem provides the capability for working with its data (e.g. via the API). But the real usefulness comes from workflows that need to use this functionality. E.g. some workflows depend on having access to read profiles. The profile subsystem facilitates that capability. + +Note that "subsystem" is just the name of NanoCMD's built-in MDM/domain-specific infrastructure. While they do have a shared similar design they need not be treated as design advice. If NanoCMD is imported into your project and/or you use it as a library then workflows can interface with whatever infrastructure they need in whatever way makes sense for them. + +The [operations guide](docs/operations-guide.md) discusses the specific subsystems included in NanoCMD. + +### Engine + +The workflow engine is what coordinates MDM commands and responses with the workflows interface. It communicates with MDM servers, including sending MDM commands and receiving responses and events, and coordinates most of the underlying data so that the workflows don't have to. + +As just one (important) example it keeps track of the Request Type of an MDM command that is enqueued to an MDM server and associates this with the command UUID. In this way when we receive the response command UUID we can lookup the Request Type of the original command and instantiate the correct response type. This all happens before responses are delivered to the workflow so a workflow doesn't need to worry about that bookkeeping. It can just deal with the responses to the commands it sent. + +## What about Declarative Device Management? + +It would seem this is an odd project to release this late in the MDM game. So why invest in this? While we fully realize MDM "v1" commands and responses will likely be deprecated in favor of Declarative Management counterparts, there's a few reasons to continue with this: + +There's a few reasons: + +* Older devices/OSes still need to support MDM v1. +* Some MDM operations may continue to be (or stay) MDM v1-only. +* Bugs or delays in Declarative Management features. +* At the time it was written the workflows that were required were not avaialble as Declarative Management functionality yet. diff --git a/cmd/nanocmd/api.go b/cmd/nanocmd/api.go new file mode 100644 index 0000000..a8006b5 --- /dev/null +++ b/cmd/nanocmd/api.go @@ -0,0 +1,92 @@ +package main + +import ( + "github.com/alexedwards/flow" + enginehttp "github.com/micromdm/nanocmd/engine/http" + "github.com/micromdm/nanocmd/log" + cmdplanhttp "github.com/micromdm/nanocmd/subsystem/cmdplan/http" + fvenablehttp "github.com/micromdm/nanocmd/subsystem/filevault/http" + invhttp "github.com/micromdm/nanocmd/subsystem/inventory/http" + profhttp "github.com/micromdm/nanocmd/subsystem/profile/http" +) + +type engineLike interface { + enginehttp.WorkflowNameChecker + enginehttp.WorkflowStarter +} + +func handlers(mux *flow.Mux, logger log.Logger, e engineLike, s *storageConfig) { + // engine (workflow) + + mux.Handle( + "/v1/workflow/:name/start", + enginehttp.StartWorkflowHandler(e, logger.With("handler", "start workflow")), + "POST", + ) + + // engine (event subscriptions) + + mux.Handle( + "/v1/event/:name", + enginehttp.GetHandler(s.event, logger.With("handler", "get event")), + "GET", + ) + + mux.Handle( + "/v1/event/:name", + enginehttp.PutHandler(s.event, e, logger.With("handler", "put event")), + "PUT", + ) + + // inventory + + mux.Handle( + "/v1/inventory", + invhttp.RetrieveInventory(s.inventory, logger.With("handler", "retrieve enrollments")), + "GET", + ) + + // profiles + + mux.Handle( + "/v1/profile/:name", + profhttp.StoreProfileHandler(s.profile, logger.With("handler", "store profile")), + "PUT", + ) + + mux.Handle( + "/v1/profile/:name", + profhttp.GetProfileHandler(s.profile, logger.With("handler", "get raw profile")), + "GET", + ) + + mux.Handle( + "/v1/profile/:name", + profhttp.DeleteProfileHandler(s.profile, logger.With("handler", "delete profile")), + "DELETE", + ) + + mux.Handle( + "/v1/profiles", + profhttp.GetProfilesHandler(s.profile, logger.With("handler", "get profiles")), + "GET", + ) + + // fvenable + + mux.Handle("/v1/fvenable/profiletemplate", fvenablehttp.GetProfileTemplate(), "GET") + + // cmdplan + + mux.Handle( + "/v1/cmdplan/:name", + cmdplanhttp.GetHandler(s.cmdplan, logger.With("handler", "get cmdplan")), + "GET", + ) + + mux.Handle( + "/v1/cmdplan/:name", + cmdplanhttp.PutHandler(s.cmdplan, logger.With("handler", "put cmdplan")), + "PUT", + ) +} diff --git a/cmd/nanocmd/main.go b/cmd/nanocmd/main.go new file mode 100644 index 0000000..6c8af49 --- /dev/null +++ b/cmd/nanocmd/main.go @@ -0,0 +1,182 @@ +// Package main starts a NanoCMD server. +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "math/rand" + "net/http" + "os" + "time" + + "github.com/micromdm/nanocmd/engine" + httpcwe "github.com/micromdm/nanocmd/http" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/log/stdlogfmt" + "github.com/micromdm/nanocmd/mdm/foss" + + "github.com/alexedwards/flow" +) + +// overridden by -ldflags -X +var version = "unknown" + +const ( + apiUsername = "nanocmd" + apiRealm = "nanocmd" +) + +func main() { + var ( + flDebug = flag.Bool("debug", false, "log debug messages") + flListen = flag.String("listen", ":9003", "HTTP listen address") + flVersion = flag.Bool("version", false, "print version") + flDumpWH = flag.Bool("dump-webhook", false, "dump webhook input") + flAPIKey = flag.String("api", "", "API key for API endpoints") + flEnqURL = flag.String("enqueue-url", "", "URL of MDM server enqueue endpoint") + flPushURL = flag.String("push-url", "", "URL of MDM server push endpoint") + flEnqAPI = flag.String("enqueue-api", "", "MDM server API key") + flStorage = flag.String("storage", "file", "name of storage backend") + flDSN = flag.String("storage-dsn", "", "data source name (e.g. connection string or path)") + flMicro = flag.Bool("micromdm", false, "MicroMDM-style command submission") + flWorkSec = flag.Uint("worker-interval", uint(engine.DefaultDuration/time.Second), "interval for worker in seconds") + flPushSec = flag.Uint("repush-interval", uint(engine.DefaultRePushDuration/time.Second), "interval for repushes in seconds") + flStTOSec = flag.Uint("step-timeout", uint(engine.DefaultTimeout/time.Second), "default step timeout in seconds") + ) + flag.Parse() + + if *flVersion { + fmt.Println(version) + return + } + + logger := stdlogfmt.New(stdlogfmt.WithDebugFlag(*flDebug)) + + if *flEnqURL == "" || *flEnqAPI == "" || *flPushURL == "" { + logger.Info(logkeys.Error, "enqueue URL, push URL, and API required") + os.Exit(1) + } + + // configure storage + storage, err := parseStorage(*flStorage, *flDSN) + if err != nil { + logger.Info(logkeys.Message, "parse storage", logkeys.Error, err) + os.Exit(1) + } + + // configure our "MDM" i.e. how we send commands and receive responses + opts := []foss.Option{ + foss.WithLogger(logger.With("service", "mdm")), + foss.WithPush(*flPushURL), + } + if *flMicro { + opts = append(opts, foss.WithMicroMDM()) + } + fossMDM, err := foss.NewFossMDM(*flEnqURL, *flEnqAPI, opts...) + if err != nil { + logger.Info(logkeys.Message, "creating enqueuer", logkeys.Error, err) + os.Exit(1) + } + + // configure the workflow engine + eOpts := []engine.Option{engine.WithLogger(logger.With("service", "engine"))} + if *flStTOSec > 0 { + eOpts = append(eOpts, engine.WithDefaultTimeout(time.Second*time.Duration(*flStTOSec))) + } + if storage.event != nil { + eOpts = append(eOpts, engine.WithEventStorage(storage.event)) + } + e := engine.New(storage.engine, fossMDM, eOpts...) + + // configure the workflow engine worker (async runner/job) + var eWorker *engine.Worker + if *flWorkSec > 0 { + wOpts := []engine.WorkerOption{ + engine.WithWorkerLogger(logger.With("service", "engine worker")), + engine.WithWorkerDuration(time.Second * time.Duration(*flWorkSec)), + } + if *flPushSec > 0 { + wOpts = append(wOpts, engine.WithWorkerRePushDuration(time.Second*time.Duration(*flPushSec))) + } + eWorker = engine.NewWorker( + e, + storage.engine, + fossMDM, + wOpts..., + ) + } + + // register workflows with the engine + err = registerWorkflows(logger, e, storage, e) + if err != nil { + logger.Info(logkeys.Message, "registering workflows", logkeys.Error, err) + os.Exit(1) + } + + mux := flow.New() + + mux.Handle("/version", httpcwe.VersionHandler(version)) + + var eventHandler foss.MDMEventReceiver = e + if *flDumpWH { + eventHandler = foss.NewMDMEventDumper(eventHandler, os.Stdout) + } + var h http.Handler = foss.WebhookHandler(eventHandler, logger.With("handler", "webhook")) + if *flDumpWH { + h = httpcwe.DumpHandler(h, os.Stdout) + } + + mux.Handle("/webhook", h) + + if *flAPIKey != "" { + mux.Group(func(mux *flow.Mux) { + mux.Use(func(h http.Handler) http.Handler { + return httpcwe.BasicAuthMiddleware(h, apiUsername, *flAPIKey, apiRealm) + }) + + // register all of our HTTP handlers + handlers(mux, logger, e, storage) + }) + } + + if eWorker != nil { + go func() { + err := eWorker.Run(context.Background()) + logs := []interface{}{logkeys.Message, "engine worker stopped"} + if err != nil { + logger.Info(append(logs, logkeys.Error, err)...) + return + } + logger.Debug(logs) + }() + } + + // seed for newTraceID + rand.Seed(time.Now().UnixNano()) + + logger.Info(logkeys.Message, "starting server", "listen", *flListen) + err = http.ListenAndServe(*flListen, httpcwe.TraceLoggingMiddleware(mux, logger.With("handler", "log"), newTraceID)) + logs := []interface{}{logkeys.Message, "server shutdown"} + if err != nil { + logs = append(logs, logkeys.Error, err) + } + logger.Info(logs...) +} + +type NullHandler struct{} + +func (h *NullHandler) WebhookConnectEvent(ctx context.Context, id string, uuid string, raw []byte) error { + return errors.New("[*NullHandler WebhookConnectEvent] not implemented") +} + +// newTraceID generates a new HTTP trace ID for context logging. +// Currently this just makes a random string. This would be better +// served by e.g. https://github.com/oklog/ulid or something like +// https://opentelemetry.io/ someday. +func newTraceID(_ *http.Request) string { + b := make([]byte, 8) + rand.Read(b) + return fmt.Sprintf("%x", b) +} diff --git a/cmd/nanocmd/storage.go b/cmd/nanocmd/storage.go new file mode 100644 index 0000000..b002f3b --- /dev/null +++ b/cmd/nanocmd/storage.go @@ -0,0 +1,70 @@ +package main + +import ( + "fmt" + + storageeng "github.com/micromdm/nanocmd/engine/storage" + storageengdiskv "github.com/micromdm/nanocmd/engine/storage/diskv" + storageenginmem "github.com/micromdm/nanocmd/engine/storage/inmem" + storagecmdplan "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" + storagecmdplandiskv "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/diskv" + storagecmdplaninmem "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/inmem" + storagefv "github.com/micromdm/nanocmd/subsystem/filevault/storage" + storagefvdiskv "github.com/micromdm/nanocmd/subsystem/filevault/storage/diskv" + storagefvinmem "github.com/micromdm/nanocmd/subsystem/filevault/storage/inmem" + storagefvinvprk "github.com/micromdm/nanocmd/subsystem/filevault/storage/invprk" + storageinv "github.com/micromdm/nanocmd/subsystem/inventory/storage" + storageinvdiskv "github.com/micromdm/nanocmd/subsystem/inventory/storage/diskv" + storageinvinmem "github.com/micromdm/nanocmd/subsystem/inventory/storage/inmem" + storageprof "github.com/micromdm/nanocmd/subsystem/profile/storage" + storageprofdiskv "github.com/micromdm/nanocmd/subsystem/profile/storage/diskv" + storageprofinmem "github.com/micromdm/nanocmd/subsystem/profile/storage/inmem" +) + +type storageConfig struct { + inventory storageinv.Storage + engine storageeng.AllStorage + profile storageprof.Storage + cmdplan storagecmdplan.Storage + event storageeng.EventSubscriptionStorage + filevault storagefv.FVRotate +} + +func parseStorage(name, dsn string) (*storageConfig, error) { + switch name { + case "inmem": + inv := storageinvinmem.New() + fv, err := storagefvinmem.New(storagefvinvprk.NewInvPRK(inv)) + if err != nil { + return nil, fmt.Errorf("creating filevault inmem storage: %w", err) + } + eng := storageenginmem.New() + return &storageConfig{ + engine: eng, + inventory: inv, + profile: storageprofinmem.New(), + cmdplan: storagecmdplaninmem.New(), + event: eng, + filevault: fv, + }, nil + case "file", "diskv": + if dsn == "" { + dsn = "db" + } + inv := storageinvdiskv.New(dsn) + fv, err := storagefvdiskv.New(dsn, storagefvinvprk.NewInvPRK(inv)) + if err != nil { + return nil, fmt.Errorf("creating filevault inmem storage: %w", err) + } + eng := storageengdiskv.New(dsn) + return &storageConfig{ + engine: eng, + inventory: inv, + profile: storageprofdiskv.New(dsn), + cmdplan: storagecmdplandiskv.New(dsn), + event: eng, + filevault: fv, + }, nil + } + return nil, fmt.Errorf("unknown storage: %s", name) +} diff --git a/cmd/nanocmd/workflows.go b/cmd/nanocmd/workflows.go new file mode 100644 index 0000000..ec56db3 --- /dev/null +++ b/cmd/nanocmd/workflows.go @@ -0,0 +1,54 @@ +package main + +import ( + "fmt" + + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/workflow" + "github.com/micromdm/nanocmd/workflow/cmdplan" + "github.com/micromdm/nanocmd/workflow/fvenable" + "github.com/micromdm/nanocmd/workflow/fvrotate" + "github.com/micromdm/nanocmd/workflow/inventory" + "github.com/micromdm/nanocmd/workflow/profile" +) + +type registerer interface { + RegisterWorkflow(w workflow.Workflow) error +} + +func registerWorkflows(logger log.Logger, r registerer, s *storageConfig, e workflow.StepEnqueuer) error { + var w workflow.Workflow + var err error + + if w, err = inventory.New(e, s.inventory); err != nil { + return fmt.Errorf("creating inventory workflow: %w", err) + } else if err = r.RegisterWorkflow(w); err != nil { + return fmt.Errorf("registering inventory workflow: %w", err) + } + + if w, err = profile.New(e, s.profile, profile.WithLogger(logger)); err != nil { + return fmt.Errorf("creating profile workflow: %w", err) + } else if err = r.RegisterWorkflow(w); err != nil { + return fmt.Errorf("registering profile workflow: %w", err) + } + + if w, err = fvenable.New(e, s.filevault, s.profile, fvenable.WithLogger(logger)); err != nil { + return fmt.Errorf("creating fvenable workflow: %w", err) + } else if err = r.RegisterWorkflow(w); err != nil { + return fmt.Errorf("registering fvenable workflow: %w", err) + } + + if w, err = fvrotate.New(e, s.filevault, fvrotate.WithLogger(logger)); err != nil { + return fmt.Errorf("creating fvrotate workflow: %w", err) + } else if err = r.RegisterWorkflow(w); err != nil { + return fmt.Errorf("registering fvrotate workflow: %w", err) + } + + if w, err = cmdplan.New(e, s.cmdplan, s.profile, cmdplan.WithLogger(logger)); err != nil { + return fmt.Errorf("creating cmdplan workflow: %w", err) + } else if err = r.RegisterWorkflow(w); err != nil { + return fmt.Errorf("registering cmdplan workflow: %w", err) + } + + return nil +} diff --git a/docs/openapi.yaml b/docs/openapi.yaml new file mode 100644 index 0000000..1c822f7 --- /dev/null +++ b/docs/openapi.yaml @@ -0,0 +1,439 @@ +openapi: 3.0.0 +info: + version: 0.1.0 + title: NanoCMD server API +servers: + - url: http://[::1]:9003/ +paths: + /version: + get: + description: Returns the running NanoCMD server version + responses: + '200': + description: Successful response + content: + application/json: + schema: + type: object + properties: + version: + type: string + example: "v0.1.0" + /webhook: + post: + description: Handler for MicroMDM-compatible webhook callback. This endpoint is intended to be called by MicroMDM (or NanoMDM, or other compatible MDM server) with MDM events and command responses. See MicroMDM (or NanoMDM) for complete request body format. + requestBody: + description: Webhook data. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Webhook' + responses: + '200': + description: The webhook was processed without error. + '400': + $ref: '#/components/responses/BadRequest' + '500': + $ref: '#/components/responses/Error' + /v1/workflow/{name}/start: + post: + description: Start a workflow. + security: + - basicAuth: [] + responses: + '200': + description: Workflow successfully started. + content: + application/json: + schema: + type: object + properties: + instance_id: + type: string + example: 71da093b-6d0a-4ba1-992c-cf911e0115d4 + description: The instance ID of the started step. All follow-on workflow steps should descend from and keep this instance ID when queueing commands. + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/workflowName' + - $ref: '#/components/parameters/enrollmentID' + - $ref: '#/components/parameters/context' + /v1/event/{name}: + get: + description: Retrieve the event subscription. + security: + - basicAuth: [] + responses: + '200': + description: Event Subscription. + content: + application/json: + schema: + $ref: '#/components/schemas/EventSubscription' + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/eventName' + put: + description: Store the event subscription provided in the request body. + security: + - basicAuth: [] + requestBody: + description: Event Subscription. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EventSubscription' + responses: + '204': + description: Event Subscription successfully stored. + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/eventName' + /v1/fvenable/profiletemplate: + get: + description: Returns the FileVault enable Configuration Profile template. + security: + - basicAuth: [] + responses: + '200': + description: FileVault enable profile template mobileconfig. + content: + application/x-apple-aspen-config: + schema: + $ref: '#/components/schemas/Plist' + '401': + $ref: '#/components/responses/UnauthorizedError' + /v1/profile/{name}: + get: + description: Fetches the named raw profile. + security: + - basicAuth: [] + responses: + '200': + description: Raw profile mobileconfig. + content: + application/x-apple-aspen-config: + schema: + $ref: '#/components/schemas/Plist' + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + put: + description: Uploads a raw profile. Signed profiles also supported. + security: + - basicAuth: [] + requestBody: + description: Raw profile mobileconfig. + required: true + content: + application/x-apple-aspen-config: + schema: + $ref: '#/components/schemas/Plist' + responses: + '204': + description: Profile successfully stored. + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + delete: + description: Deletes the named profile. + security: + - basicAuth: [] + responses: + '204': + description: Profile successfully deleted. + '401': + $ref: '#/components/responses/UnauthorizedError' + '400': + $ref: '#/components/responses/JSONBadRequest' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/profileName' + /v1/profiles: + get: + description: Retrieve profile metadata. + security: + - basicAuth: [] + responses: + '200': + description: Profile metadata. + content: + application/json: + schema: + type: object + additionalProperties: + $ref: '#/components/schemas/Profile' + '401': + $ref: '#/components/responses/UnauthorizedError' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - in: query + name: name + description: User-defined name of profile. + schema: + type: array + items: + type: string + example: myprofile + required: false + /v1/cmdplan/{name}: + get: + description: Retrieve and return a named command plan as JSON. + security: + - basicAuth: [] + responses: + '200': + description: Command plan. + content: + application/json: + schema: + $ref: '#/components/schemas/CMDPlan' + '400': + $ref: '#/components/responses/JSONBadRequest' + '401': + $ref: '#/components/responses/UnauthorizedError' + '500': + $ref: '#/components/responses/JSONError' + put: + description: Upload a named JSON command plan. + security: + - basicAuth: [] + requestBody: + description: Command plan. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CMDPlan' + responses: + '204': + description: Successful upload of command plan. + '400': + $ref: '#/components/responses/JSONBadRequest' + '401': + $ref: '#/components/responses/UnauthorizedError' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/cmdPlanName' + /v1/inventory: + get: + description: Retrieve inventory data for enrollment IDs. + security: + - basicAuth: [] + responses: + '200': + description: Inventory data for enrollment IDs. Note the keys returned per enrollment ID can be, essentially, arbitrary (even most/many will be standard). + content: + application/json: + schema: + type: object + additionalProperties: + type: object + properties: + serial_number: + type: string + model: + type: string + example: + 59FBE185-5E77-4493-B28B-272AD4138CA7: {serial_number: ZTMXJQTLFX, model: "VirtualMac2,1"} + '400': + $ref: '#/components/responses/JSONBadRequest' + '401': + $ref: '#/components/responses/UnauthorizedError' + '500': + $ref: '#/components/responses/JSONError' + parameters: + - $ref: '#/components/parameters/enrollmentID' +components: + parameters: + enrollmentID: + name: id + in: query + description: Enrollment ID. Unique identifier of MDM enrollment. Often a device UDID or a user channel UUID. + required: true + explode: true + style: form + schema: + type: array + items: + type: string + minItems: 1 + example: ["CFF1D100-BECC-4EA4-8445-2B87E2A87D7F", "A3FAAA18-50C6-4337-B5CC-43376F070DB8"] + workflowName: + name: name + in: path + description: Name of NanoCMD workflow. + required: true + style: simple + schema: + type: string + example: 'io.micromdm.wf.example.v1' + eventName: + name: name + in: path + description: User-defined name of Event Subscription. + required: true + style: simple + schema: + type: string + example: myeventname + cmdPlanName: + name: name + in: path + description: User-defined name of Command Plan. + required: true + style: simple + schema: + type: string + example: mycmdplan + context: + name: context + in: query + description: Workflow-dependent context. + required: false + schema: + type: string + profileName: + name: name + in: path + description: User-defined name of Profile. + required: true + style: simple + schema: + type: string + example: myprofile + securitySchemes: + basicAuth: + type: http + scheme: basic + responses: + UnauthorizedError: + description: API key is missing or invalid. + headers: + WWW-Authenticate: + schema: + type: string + BadRequest: + description: There was a problem with the supplied request. The request was in an incorrect format or other request data error. See server logs for more information. + content: + text/plain: + schema: + type: string + example: Bad Request + Error: + description: An internal server error occured on this endpoint. See server logs for more information. + content: + text/plain: + schema: + type: string + example: Internal Server Error + JSONBadRequest: + description: There was a problem with the supplied request. The request was in an incorrect format or other request data error. + content: + application/json: + schema: + $ref: '#/components/schemas/JSONError' + JSONError: + description: An internal server error occured on this endpoint. + content: + application/json: + schema: + $ref: '#/components/schemas/JSONError' + schemas: + CMDPlan: + type: object + properties: + profile_names: + type: array + items: + type: string + example: "profile1" + manifest_urls: + type: array + items: + type: string + example: "https://example.com/manifest" + format: url + device_configured: + type: boolean + Profile: + type: object + properties: + identifier: + type: string + example: com.example.profile + uuid: + type: string + example: D8F1F355-99EE-4A63-88DE-FBBBFCFF4DB6 + EventSubscription: + type: object + required: [event, workflow] + properties: + event: + type: string + description: Event type to subscribe to. + enum: [Enrollment, Authenticate, TokenUpdate, CheckOut] + workflow: + type: string + description: Name of NanoCMD workflow. + example: "io.micromdm.wf.example.v1" + context: + type: string + description: Workflow-dependent context. + JSONError: + type: object + properties: + error: + type: string + example: "it was sunny outside" + Webhook: + type: object + properties: + topic: + type: string + example: mdm.Connect + event_id: + type: string + created_at: + type: string + format: date-time + acknowledge_event: + type: object + checkin_event: + type: object + Plist: + type: string + description: Apple XML property list. Depending on the context may or may not be CMS/PKCS#7 signed. + example: |- + + + + + ... + + diff --git a/docs/operations-guide.md b/docs/operations-guide.md new file mode 100644 index 0000000..099a860 --- /dev/null +++ b/docs/operations-guide.md @@ -0,0 +1,336 @@ +# NanoCMD Operations Guide + +This is a brief overview of the various flags, APIs, and other topics related to the operation of the NanoCMD server. + +## NanoCMD server + +### Command line flags + +Additional context and description for the NanoCMD server command line flags follows. The `-h` flag will print the supported flags for the server. + +#### -api string + + * API key for API endpoints + +API authorization in NanoCMD is simply HTTP Basic authentication using "nanocmd" as the username and this API key as the password. + +#### -debug + + * log debug messages + +Enable additional debug logging. + +#### -dump-webhook + + * dump webhook input + +For each incoming webhook response this flag dumps the HTTP body to standard output. For the "mdm.Connect" (command response) webhook event it also decodes and outputs the raw Plist. + +#### -enqueue-api string + + * MDM server API key + +The API key (HTTP Basic authentication password) for the MDM server enqueue endpoint. The HTTP Basic username depends on the MDM mode. By default it is "nanomdm" but if the `-micromdm` flag is enabled then it is "micromdm". + +#### -enqueue-url string + + * URL of MDM server enqueue endpoint + +URL of the MDM server for enqueuing commands. The enrollmnet ID is added onto this URL as a path element (or multiple, if the MDM server supports it). + +#### -listen string + + * HTTP listen address (default ":9003") + +Specifies the listen address (interface & port number) for the server to listen on. + +#### -micromdm + + * MicroMDM-style command submission + +Submit commands for enqueueing in a style that is compatible with MicroMDM (instead of NanoMDM). Specifically this flag limits sending commands to one enrollment ID at a time, uses a POST request, and changes the HTTP Basic username. + +#### -push-url string + + * URL of MDM server push endpoint + +URL of the MDM server for sending APNs pushes. The enrollment ID is added onto this URL as a path element (or multiple, if the MDM server supports it). + +#### -repush-interval uint + + * interval for repushes in seconds (default 86400) [1 day] + +If an enrollment ID has not seen a response to a command after this interval then NanoCMD sends an APNs notification to the device. + +#### -step-timeout uint + + * default step timeout in seconds (default 259200) [3 days] + +If a step is not completed within this time period the step is cancelled and returned to the workflow for any (optional) processing. Note the client may still respond to the commands (they are not de-queued from the MDM server, merely removed from tracking in NanoCMD). + +#### -storage & -storage-dsn + +The `-storage` and `-storage-dsn` flags together configure the storage backend. `-storage` specifies the storage backend type while `-storage-dsn` specifies the data source name (or DSN — i.e. the database location or connection string). The default storage backend is "file" if no other backend is specified. + +##### file storage backend + +* `-storage file` + +Configures the `file` storage backend. Data is stored in filesystem files and directories, requires zero dependencies, and should just work right out of the box. The `-storage-dsn` flag specifies the filesystem directory under which the database is created. If no DSN is provided then a default of `db` is used. + +*Example:* `-storage file -storage-dsn /path/to/my/db` + +##### inmem storage backend + +* `-storage inmem` + +Configures the `inmem` storage backend. Data is stored entirely in-memory and is completely volatile — the database will disappear the moment the server process exits. The `-storage-dsn` flag is ignored for this storage backend. + +*Example:* `-storage inmem` + +#### -version + + * print version + +Print version and exit. + +#### -worker-interval uint + + * interval for worker in seconds (default 300) [5 minutes] + +NanoCMD spins up a worker that enqueues future steps, re-pushes to devices, and monitors for timed-out steps. The worker will wake up at this internval to process asynchronous duties. Setting this flag to zero will turn off the worker (effectively disabling those features). + +### API endpoints + +The NanoCMD server is directed via its REST-ish API. A brief overview of the API endpoints is provided here. For detailed API documentation please refer to the [NanoCMD OpenAPI documentation](https://www.jessepeterson.space/swagger/nanocmd.html). The [OpenAPI source YAML](../docs/openapi.yaml) is part of this project as well. Also take a look at the [QuickStart guide](../docs/quickstart.md) for a tutorial on using the APIs. + +Most of the API endpoints are protected by HTTP Basic authentication where the password is specified by the `-api` flag (as documented above). + +#### Version endpoint + +* Endpoint: `GET /version` + +Returns a JSON response with the version of the running NanoCMD server. + +#### Webhook endpoint + +* Endpoint: `POST /webhook` + +The webhook endpoint handles MicroMDM-compatible webhook events. These include MDM command and check-in event responses from MDM clients. See the [MicroMDM documentation for more information](https://github.com/micromdm/micromdm/blob/main/docs/user-guide/api-and-webhooks.md). + +#### Workflow Start endpoint + +* Endpoint: `POST /v1/workflow/{name}/start` +* Path parameters: + * `name`: workflow name +* Query parameters: + * `id`: enrollment ID. multiple supported. + * `context`: workflow-dependent context (start) value + +Starts a workflow. + +#### Event Subscription endpoints + +* Endpoint: `GET /v1/event/{name}` +* Endpoint: `PUT /v1/event/{name}` +* Path parameters: + * `name`: user-defined event subscription name + +Configures Event Subscriptions. Event Subscriptions start workflows for MDM events. In JSON form they look like this: + +```json +{ + "event": "Enrollment", + "workflow": "io.micromdm.wf.example.v1", + "context": "string" +} +``` + +The JSON keys are as follows: + +* `event`: the NanoCMD event name. + * `Authenticate`: when a device sends an Authenticate MDM check-in message. + * `TokenUpdate`: when an enrollment sends a TokenUpdate MDM check-in message. + * `Enrollment`: when an enrollment enrolls; i.e. the first TokenUpdate message. + * `CheckOut`: when a device sends a CheckOut MDM check-in message. +* `workflow`: the name of the workflow. +* `context`: optional context to give to the workflow when it starts. + +#### FileVault profile template endpoint + +* Endpoint: `GET /v1/fvenable/profiletemplate` + +Returns the Configuration Profile template for the FileVault enable workflow. Take care to note the `__CERTIFICATE__` string (which is string-replaced with the actual certificate). This profile can be modified and re-uploaded to the profile store with a name of the FileVault enable workflow. The workflow will attempt to pull that profile and fallback to this hard-coded version if it does not exist. + +#### Profile endpoints + +* Endpoint: `GET /v1/profile/{name}` +* Endpoint: `PUT /v1/profile/{name}` +* Endpoint: `DELETE /v1/profile/{name}` +* Path parameters: + * `name`: user-defined profile name + +Retrieve, store, or delete profiles by name parameter in the path. Upload raw profiles (including signed profiles) using the `PUT` method. Retrive again with `GET` and of course delete with `DELETE`. + +#### Profile list endpoint + +* Endpoint `GET /v1/profiles` +* Query parameters: + * `name`: user-defined profile name. optional. multiple supported. + +List the profile UUIDs and identifiers mapped by profile name in profile subsystem storage. Supply the name argument for specific profiles to list. + +#### Command Plan endpoints + +* Endpoint: `GET /v1/cmdplan/{name}` +* Endpoint: `PUT /v1/cmdplan/{name}` +* Path parameters: + * `name`: user-defined command plan name + +Retrieve and store command plans — collections of MDM actions/commands to be sent together (such as upon device enrollment). **See also** the below discussion of the command plan workflow. Command plans take the JSON form of: + +```json +{ + "profile_names": [ + "profile1" + ], + "manifest_urls": [ + "https://example.com/manifest" + ], + "device_configured": true +} +``` + +The JSON keys are: + +* `profile_names`: list of profiles in the profile subsystem storage. will generate an `InstallProfile` MDM command for each listed item. +* `manifest_urls`: list of URLs to [app installation manifests](https://developer.apple.com/documentation/devicemanagement/manifesturl/itemsitem). will generate an `InstallApplication` MDM command for each URL. +* `device_configured`: if the workflow is started from an enroll event and the device is in the await configuration state then setting this `true` will generate a `DeviceConfigured` MDM command. this will bring the device out of the await configuration state. + +#### Inventory endpoint + +* Endpoint: `GET /v1/inventory` +* Query parameters: + * `id`: enrollment ID. multiple supported. + +Queries the inventory subsystem to retrieve previously saved inventory data. Inventory key-value data is returned in a JSON object (map) for for each `id` parameter specified. + +### Engine + +As mentioned in the [README](../README.md) the workflow *engine* is the component that does the heavy lifting of abstracting the MDM command sending and response receiving to provide the workflows with a consistent and easy to use API. It acts as the glue between workflows and MDM servers. + +There are a few knobs in the server for the engine: namely the flags `-worker-interval`, `-step-timeout`, and `-repush-interval` documented above. Largely, though, the engine is driven by workflows enqueuing steps and the MDM server sending events. That said the main API endpoints for working with the engine are going to be the Workflow Start endpoint and the Event Subscription endpoints — also documented above. These are the ways ways you kick-off workflows in NanoCMD. + +## Subsystems + +While they are alluded to the APIs above and workflows below it is worth calling out the *subsystems* themselves. Largely they provide storage backing for their domain specific data as well as the raw HTTP API handlers. + +### Command Plan subsystem + +The command plan subsystem provides storage backends for command plans. This supports the subsystem's HTTP APIs and of course the actual workflow for retrieving the configurations. + +### FileVault subsystem + +The FileVault storage subsystem supports two main duties. First the keypair generation, storage, and decryption that allows for devices to encrypt FileVault Pre-Shared Keys (PSKs) to the subsystem-provided public keys. Secondly the FileVault subsystem includes an adapter to the *inventory* subsystem for escrowing (storing) and retrieving PSKs. Effectively this means FileVault esrowed PSKs are stored on directly on the enrollment inventory record. + +### Profile subsystem + +The profile subsystem provides storage backends for user-named Apple Configuration profiles. This supports the subsystem's HTTP APIs and of course the actual workflow for installing and removing profiles. As well the FileVault workflow uses the profile subsystem for storage. + +### Inventory subsystem + +The inventory subsystem provides storage backends for "inventory" data — that is, metadata about MDM enrollments. This data is largely collected through the inventory workflow but also data is populated from other workflows such as the FileVault PSK mechanism. + +## Workflows + +Workflows are domain-specific, contained, and encapsulated MDM command sequence senders and processors. For a higher level review of workflows check out the [README](../README.md). For more information about the internals and implementation of workflows please read [the package documentation](../workflow/doc.go). + +### Command Plan Workflow + +* Workflow name: `io.micromdm.wf.cmdplan.v1` +* Start value/context: string value of command plan name. See also parameter expansion discussion below. + * Example: `my_cool_cmdplan` + +A command plan (or cmdplan) is a named structured list of operations to send to an enrollment. Each item roughtly corresponds to an MDM command (such as for installing profiles or applications). An example might look like: + +```json +{ + "profile_names": [ + "test1" + ], + "manifest_urls": [], + "device_configured": true +} +``` + +In this example this command plan would send one `InstallProfile` command with the contents of the `test1` profile from the profile subsystem as well as try to send a `DeviceConfigured` command (assuming that it's appropriate for the enrollment at the time — i.e. at the initial Setup Assistant for an ADE enrollment). Command plans themselves are managed via NanoCMD's APIs. + +#### Parameter expansion + +There is a special parameter expansion mode that the command plan workflow supports when being started. Usually you provide the name of the command plan as the initial context/start value. However you can also provide a shell-like variable substituion based on the URL paremeters that the MDM client is using (which is, ultimately, specified in the MDM enrollment profile). + +For example, this is the command plan name that be configured: + +```sh +cmdplan_${group} +``` + +Then, if a client has this in their MDM enrollment profile: + +```xml +CheckInURL +https://mdm.example.com/mdm?group=staff +``` + +Then the command plan workflow will replace the `${group}` variable with the name of the parameter (in this example, `staff`) before using that as the command plan name. I.e. it will use `cmdplan_staff` to lookup the and use the command plan name. + +In this way you can have per-device or per-workgroup command plans at enrollment time. This will only work when triggering this workflow from an MDM event (like Enrollment). You'll need to specify the full command plan name when ad-hoc executing a command plan. + +Finally this parameter expansion provides a fallback mechanism as well. Follow the parameter name with a colon (`:`) to specify a fallback. For example if `cmdplan_${group:fallback}` is specified and group is *not* in the MDM URL parameters then the expansion would be `cmdplan_fallback`. + +#### As compared to the Profile Workflow + +*Question:* Both the Command Plan workflow and the Profile Workflow (below) support installing profiles. Which should I use? + +In general the Command Plan workflow is meant for installing *sequences* of MDM commands. In particular the there are some Configuration Profile payloads and MDM commands that must be sent while an enrollment (device) is in the Await Configuration state. The Command Plan workflow is well suited for this and in general is probably more suited for ad-hoc or event-driven invocation/starting. + +The Profile workflow, on the other hand, is meant more for managing state and if your goal is *continually* make sure profiles are consistent on devices then the Profile workflow is probably a better choice. + +### FileVault Enable Workflow + +* Workflow name: `io.micromdm.wf.fvenable.v1` +* Start value/context: (n/a) + +The FileVault enable workflow does two primary things: first it sends a Configuration Profile to the device (containing the payloads for FileVault escrow, and deferred enablement, and an certificate for encryption). Then it polls the device with a `SecurityInfo` command waiting for the device (likely the end-user) to have enabled FileVault. Once this is done it escrows the FileVault PRK to the inventory system. The default polling is once a minute with a limit of 180 (in other words about 6 hours). + +Note that the profile template can be customized. You'll first need to export the profile by using the API endpoint then re-upload your changed profile to the profile store *with the same name as the workflow*. The system will query the profile store every time the workflow starts first and will fallback to the built-in profile template if it is missing. + +### FileVault Rotate Workflow + +* Workflow name: `io.micromdm.wf.fvrotate.v1` +* Start value/context: (n/a) + +The rotate FileVault workflow sends an MDM command to rotate the enrolled device's FileVault FDE Personal Recovery Key (PRK). It will retrieve the existing PRK from inventory subsystem in order to rotate the key. The new PRK will be escrowed back to inventory subsystem. + +### Inventory Workflow + +* Workflow name: `io.micromdm.wf.inventory.v1` +* Start value/context: (n/a) + +The inventory workflow sends `DeviceInformation` and `SecurityInfo` commands to the enrollment to collect information from the host and store it in the inventory subsystem. As well the inventory workflow updates the inventory for any other `SecurityInfo` command that happens to be sent by any other workflow (as this command has no input to make it context-dependent). + +### Profile Workflow + +* Workflow name: `io.micromdm.wf.profile.v1` +* Start value/context: comma-separated list of profile names. removals prefixed with a minus/dash (-) + * Example: `profile1,profile2,-profile3,profile4` + +The profile workflow manages Configuration Profile "state" on an enrollment for the set of provided profile names. The workflow checks the already-installed profile identifiers and UUIDs to make sure the profiles are current and if not (or they are missing) installs them. The list of profiles is specified as a comma-separated list of profile names already stored in the profile subsystem. You can also specify profiles to be removed by prefixing them with a minus/dash (-) character. + +For example, this start/context value: + +``` +dock,munki,-uakel,pppc +``` + +Would try to make sure that the profiles with the names of `dock`, `munki`, and `pppc` in the profile subsystem are installed (if they are not already) while making sure the `uakel` profile is removed (if it is installed). diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 0000000..d965cdb --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,236 @@ +# NanoCMD Quick Start Guide + +This quickstart guide is intended to quickly get a functioning NanoCMD instance up and running and able to use some of the included workflows. + +## Requirements + +* A functioning NanoMDM or MicroMDM (v1.9.0 or later) server. + * You'll need to know the URLs of the command submission and APNs push API endpoints. + * For [NanoMDM](https://github.com/micromdm/nanomdm/blob/main/docs/operations-guide.md#enqueue) this is usally `/v1/enqueue/` and `/v1/push/` endpoints. + * For [MicroMDM](https://github.com/micromdm/micromdm/blob/main/docs/user-guide/api-and-webhooks.md#schedule-raw-commands-with-the-api) this is usually `/v1/commands` and `/push`. +* An enrolled macOS device in that MDM server. + * It doesn't have to be a macOS device but for the demo below it makes things easier. + * You'll need to know the device's [enrollment ID](https://github.com/micromdm/nanomdm/blob/main/docs/operations-guide.md#enrollment-ids) — usually its UDID. + +## Setup and start server + +### NanoCMD + +You'll need the NanoCMD server to start running it, of course. You can fetch it from the [NanoCMD GitHub releases page](https://github.com/micromdm/nanocmd/releases). You can also build it from source if you prefer but that's outside the scope of this document. + +Next you'll need to run it and point it at your MDM server. Here's an example invocation for running against a NanoMDM server: + +```sh +./nanocmd-darwin-amd64 \ + -api supersecret \ + -enqueue-api supersecretNano \ + -enqueue-url 'http://[::1]:9000/v1/enqueue/' \ + -push-url 'http://[::1]:9000/v1/push/' \ + -debug +``` + +You can review the [operations guide](../docs/operations-guide.md) for the full command-line flags but we'll briefly review them here: + +* `-api` configures the API password for NanoCMD. +* `-enqueue-url` is the URL that commands are submitted to NanoMDM. +* `-enqueue-api` is the API password for your NanoMDM command enqueue API. +* `-push-url` is the URL that APNs pushes are submitted to NanoMDM. +* `-debug` turns on additional debug logging. + +If we wanted to run it against MicroMDM that might look like this: + +```sh +./nanocmd-darwin-amd64 \ + -api supersecret \ + -enqueue-api supersecretMicro \ + -enqueue-url 'http://[::1]:8080/v1/commands/' \ + -push-url 'http://[::1]:8080/push/' \ + -micromdm \ + -debug +``` + +Note the changed URLs and the additional flag: + +* `-micromdm` turns on the ability to talk to MicroMDM servers. + +With either server the operation of NanoCMD should be the same. Once we start NanoMDM you'll see some output. One of the lines shoud look similar to this: + +```sh +ts=2023-05-30T15:01:46-07:00 level=info msg=starting server listen=:9003 caller=main.go:159 +``` + +Indicating to us that the NanoCMD server started and is listening on port 9003. + +### NanoMDM (or MicroMDM) + +NanoMDM (or MicroMDM) will need to be pointed "back" at NanoCMD's webhook URL handler. For NanoMDM you'll need to use [the `-webhook-url` flag](https://github.com/micromdm/nanomdm/blob/main/docs/operations-guide.md#-webhook-url-string) when starting NanoMDM. For MicroMDM you'll need to use [the `-command-webhook-url` flag](https://github.com/micromdm/micromdm/blob/main/docs/user-guide/api-and-webhooks.md#configure-a-webhook-url-to-process-device-events). For example if you started NanoMDM like this: + +```sh +./nanomdm-darwin-amd64 -ca ca.pem -api supersecretNano -debug +``` + +You'll need to point it at NanoCMD like so: + +```sh +./nanomdm-darwin-amd64 -ca ca.pem -api supersecretNano -debug -webhook-url 'http://[::1]:9003/webhook' +``` + +Simlar for MicroMDM. + +Good, you should now have your MDM and NanoCMD pointed at each other. Let's have some fun with it! + +## First workflow: inventory + +For the rest of this guide, let's assume our device's enrollment ID is `FF269FDC-7A93-5F12-A4B7-09923F0D1F7F`. Also in many places the JSON output may look nice and formatted — I've taken the liberty of running it through `jq .` just so it's easier to read here. You're welcome to do that, too, but it may make errors harder to troubleshoot with the `curl` calls. + +Let's check if there is any inventory data already. There shouldn't be, but let's make sure: + +```sh +$ curl -u nanocmd:supersecret 'http://[::1]:9003/v1/inventory?id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F' +{} +``` + +Nothing returned, that's what we expected. Now, let's start the inventory workflow for this ID: + +```sh +$ curl -u nanocmd:supersecret -X POST 'http://[::1]:9003/v1/workflow/io.micromdm.wf.inventory.v1/start?id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F' +{"instance_id":"d4f8c9c4-a8ef-4dd9-99e0-38dca53e60f4"} +``` + +You can see we returned an instance ID from starting this workflow. I'm sure you also saw *a lot* of output in the NanoCMD logs with all of the debug logging we enabled. The most important one we're looking for, perhaps, is this one: + +``` +ts=2023-05-30T15:26:29-07:00 level=debug service=engine trace_id=547000a1cebaac11 command_uuid=c146d3d5-0d75-4b28-8793-ad93e24f43f3 id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F engine_command=true request_type=SecurityInfo command_completed=true step_completed=true workflow_name=io.micromdm.wf.inventory.v1 instance_id=d4f8c9c4-a8ef-4dd9-99e0-38dca53e60f4 msg=completed workflow step caller=engine.go:371 +``` + +Which indicates our the step for this instance ID completed for this enrollment ID (`step_completed=true`). This means that all commands that were enqueued as part of the initial step for this workflow completed. + +So, let's check our inventory using the same query we issued above: + +```sh +$ curl -u nanocmd:supersecret 'http://[::1]:9003/v1/inventory?id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F' | jq . +{ + "FF269FDC-7A93-5F12-A4B7-09923F0D1F7F": { + "apple_silicon": true, + "build_version": "22E261", + "device_name": "Laika’s Virtual Machine", + "ethernet_mac": "76:f1:14:93:fc:00", + "fde_enabled": false, + "has_battery": false, + "last_source": "DeviceInformation", + "model": "VirtualMac2,1", + "model_name": "Virtual Machine", + "modified": "2023-05-30T15:26:29.157322-07:00", + "os_version": "13.3.1", + "serial_number": "ZRMXJQTTFX", + "sip_enabled": true, + "supports_lom": false + } +} +``` + +Ah, that's better. We can see that we populated a bunch of properties for this device from both `DeviceInformation` and `SecurityInfo` MDM commands. This data is persisted in the inventory subsystem storage for this device and is available whether the device is online or not. + +We can run this workflow any time we want to update the inventory stored here. Attributes will get overwritten from the newer command responses. + +## Second workflow: profiles + +With MicroMDM or NanoMDM it's pretty easy to individually install (or remove) profiles, of course. You just send the relevant commands. However what if we want to get a bit more... stateful, or even (gasp) idempotent? NanoCMD's profile workflow may be able to help. It can install or remove profiles based on the profiles already installed by querying them first. Let's give it a try. + +First, we need to upload a profile. I like to use a simple Dock profile that changes the Dock orientation on macOS to the left because it's gives instant visual feedback when it gets installed. [Here is an example](https://gist.github.com/jessepeterson/27d39e8cc4d7ed81773b0a5e2cdc01f5) that I've call `dockleft.mobileconfig`. + +Before we upload, let's check if we have this profile in NanoCMD already: + +```sh +$ curl -u nanocmd:supersecret 'http://[::1]:9003/v1/profiles?name=dockleft' +{ + "error": "profile not found for dockleft: profile not found" +} +``` + +Okay, not uploaded yet. As we expected. Let's upload it! + +```sh +$ curl -u nanocmd:supersecret -w "%{http_code}\n" -T ~/Desktop/dockleft.mobileconfig 'http://[::1]:9003/v1/profile/dockleft' +204 +``` + +Here we uploaded the `dockleft.mobileconfig` on my desktop to the `/v1/profile/dockleft` URL — the 204 (No Content) status update is expected. The last part of that URL is the name of profile in the profile subsystem storage. Now, if we query our profiles like we did before: + +```sh +$ curl -u nanocmd:supersecret 'http://[::1]:9003/v1/profiles?name=dockleft' | jq . +{ + "dockleft": { + "identifier": "com.example.dockleft", + "uuid": "D0C38014-4DBB-4F19-A23F-2768FA2246AE" + } +} +``` + +We can see that our "dockleft" profile is uploaded with a specific identifier and UUID which was in the profile. We can also retrieve this profile from the store as well: + +```sh +$ curl -u nanocmd:supersecret 'http://[::1]:9003/v1/profile/dockleft' | head -5 + + + + + PayloadContent +``` + +Which is our raw profile contents. + +Okay, so we have a profile in the store, how do we get it to install? By using our workflow, of course! Let's kickoff the profile workflow specifying this profile name to install: + +```sh +$ curl -u nanocmd:supersecret -X POST 'http://[::1]:9003/v1/workflow/io.micromdm.wf.profile.v1/start?id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F&context=dockleft' +{"instance_id":"3929e3a7-8ae9-473c-96ba-1938df998b4a"} +``` + +Notice the `context=` parameter at the end there. If all went to plan you should have seen the Dock disappear and reappear on the left hand side of the screen on macOS. + +More to the point though the workflow queried the installed profiles to determine if it needed to install this profile, found that it did, and sent that MDM command. We can see this in the logs with lines like `request_type=ProfileList command_completed=true` + +If we run that exact workflow again the device should query the profiles and find that, because its identifier and UUID have not changed, it won't need to be installed. And indeed that's what happens, telling us in the logs: `msg=no profiles to install or remove after profile list`. + +Now, what if we want to remove that profile? No problem. Prefix the profile name with a minus/dash (-) sign like so: + +```sh +$ curl -u nanocmd:supersecret -X POST 'http://[::1]:9003/v1/workflow/io.micromdm.wf.profile.v1/start?id=FF269FDC-7A93-5F12-A4B7-09923F0D1F7F&context=-dockleft' +{"instance_id":"3929e3a7-8ae9-473c-96ba-1938df998b4a"} +``` + +The profile will then be removed from the system. We see lines in the logs like: `request_type=ProfileList command_completed=true step_completed=true`. Similarly to the install case, if we run this exact command again, it won't try to remove the profile (because it isn't installed). + +Now, that's fun for single profiles. But the workflow supports multiple installs and removals, too. If you've uploaded a number of profiles you can specify them all by supplying them in the context separated by commas. For example: + +``` +dockleft,munki,-uakel,pppc +``` + +The profile workflow will work out what those profile identifiers are (from the profile subsystem storage), query the device for its list of profiles, and determine what MDM commands need to happen to make the installed profiles match the specified state — including, as above, taking no action if the device already has the correct matching set of profiles. + +Combining the fact that you can start a workflow for multiple enrollment IDs you can have an invocation like this: + +```sh +$ curl -u nanocmd:supersecret -X POST 'http://[::1]:9003/v1/workflow/io.micromdm.wf.profile.v1/start?id=DEV1&id=DEV2&id=DEV3&context=dockleft,munki,-uakel,pppc' +{"instance_id":"3929e3a7-8ae9-473c-96ba-1938df998b4a"} +``` + +Which would manage the state of those four profiles (`dockleft`, `munki`, `uakel`, and `pppc`) on those three devices (`DEV1`, `DEV2`, and `DEV3`) and be smart about only installing (or removing) profiles on the devices that need to be. + +As well if you upload a different version of the profile (and its UUID changes) then you don't need to change your workflow invocation. It will actively query the profile store to figure out the newest version and issue the install profile command just for that change. + +## Next steps + +Those are two example workflows. Here's a few ideas on where to proceed next: + +* Read the [Operations Guide](../docs/operations-guide.md) for more details on configuration, troubleshooting, etc. +* Try other workflows! See the operations guide for documentation. + * Command Plans — groups of MDM commands intended for installation + * FileVault enable and rotate — enables deferred FileVault, polls device to escrow the PSK, and can rotate PSKs. +* Configure Event Subscriptions to e.g. start workflows on device enrollment. See the operations guide for documentation. +* Configure a proper deployment + * Behind HTTPS/proxies + * Behind firewalls or in a private cloud/VPC + * In a container environment like Docker, Kubernetes, etc. or even just running as a service with systemctl. diff --git a/engine/convert.go b/engine/convert.go new file mode 100644 index 0000000..4337b95 --- /dev/null +++ b/engine/convert.go @@ -0,0 +1,187 @@ +package engine + +import ( + "errors" + "fmt" + "time" + + "github.com/groob/plist" + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/workflow" +) + +type newContextValuer interface { + NewContextValue(string) workflow.ContextMarshaler +} + +// workflowStepStartFromEngine creates a new workflow start step from raw engine input. +func workflowStepStartFromEngine(instanceID string, newCtx newContextValuer, rawContext []byte, ids []string, e *workflow.Event, mCtx *workflow.MDMContext) (*workflow.StepStart, error) { + ss := &workflow.StepStart{ + StepContext: workflow.StepContext{ + InstanceID: instanceID, + Name: "", // workflow step name is blank when starting + Context: newCtx.NewContextValue(""), + }, + IDs: ids, + Event: e, + } + if mCtx != nil { + ss.MDMContext = *mCtx + } + if ss.Context != nil && len(rawContext) > 0 { + if err := ss.Context.UnmarshalBinary(rawContext); err != nil { + return ss, fmt.Errorf("unmarshal context: %w", err) + } + } + return ss, nil +} + +// storageCommandRawFromWorkflowCommand converts a workflow command to a storage command. +func storageCommandRawFromWorkflowCommand(cmd interface{}) (*storage.StepCommandRaw, error) { + if cmd == nil { + return nil, errors.New("invalid command (nil)") + } + // make sure we're able to extract the generic set of command attributes + genCmder, ok := cmd.(mdmcommands.GenericCommander) + if !ok { + return nil, errors.New("invalid command type") + } + // extract them + genCmd := genCmder.GenericCommand() + if genCmd == nil { + return nil, errors.New("invalid command (nil generic command)") + } + // some sanity tests + if genCmd.CommandUUID == "" { + return nil, errors.New("empty command uuid") + } + if genCmd.Command.RequestType == "" { + return nil, errors.New("empty request type") + } + // marshal into plist XML + rawCmd, err := plist.Marshal(cmd) + if err != nil { + return nil, fmt.Errorf("marshal command: %w", err) + } + // construct our storage step command + stepCmd := &storage.StepCommandRaw{ + CommandUUID: genCmd.CommandUUID, + RequestType: genCmd.Command.RequestType, + Command: rawCmd, + } + return stepCmd, nil +} + +// storageStepEnqueuingWithConfigFromWorkflowStepEnqueueing converts a workflow step (for enqueueing) to a storage step. +func storageStepEnqueuingWithConfigFromWorkflowStepEnqueueing(n workflow.Namer, defaultTimeout time.Time, se *workflow.StepEnqueueing) (*storage.StepEnqueuingWithConfig, error) { + ss := &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + StepContext: storage.StepContext{ + InstanceID: se.InstanceID, + Name: se.Name, + WorkflowName: n.Name(), + }, + IDs: se.IDs, + }, + Timeout: se.Timeout, + NotUntil: se.NotUntil, + } + if ss.Timeout.IsZero() { + ss.Timeout = defaultTimeout + } + if se.Context != nil { + var err error + ss.Context, err = se.Context.MarshalBinary() + if err != nil { + return ss, fmt.Errorf("marshal context: %w", err) + } + } + for _, cmd := range se.Commands { + sc, err := storageCommandRawFromWorkflowCommand(cmd) + if err != nil { + // TODO: more error context? (rt, uuid) + return ss, fmt.Errorf("converting command: %w", err) + } + ss.Commands = append(ss.Commands, *sc) + } + return ss, nil +} + +// storageStepCommandFromRawResponse converts a raw response into a StepCommandResult. +func storageStepCommandFromRawResponse(reqType string, rawResp []byte) (*storage.StepCommandResult, interface{}, error) { + response, err := workflowCommandResponseFromRawResponse(reqType, rawResp) + if err != nil { + return nil, response, fmt.Errorf("converting response: %w", err) + } + genResper, ok := response.(mdmcommands.GenericResponser) + if !ok { + // this would be odd as the response comes from the mdmcommands module + return nil, response, errors.New("invalid response type") + } + genResp := genResper.GetGenericResponse() + if genResp == nil { + return nil, response, errors.New("invalid response (nil generic response)") + } + sc := &storage.StepCommandResult{ + CommandUUID: genResp.CommandUUID, + RequestType: reqType, + ResultReport: rawResp, + Completed: genResp.Status != "" && genResp.Status != "NotNow", + } + return sc, response, nil +} + +// workflowCommandResponseFromRawResponse converts a raw XML plist of a command response to a workflow response. +func workflowCommandResponseFromRawResponse(reqType string, rawResp []byte) (interface{}, error) { + resp := mdmcommands.NewResponse(reqType) + if resp == nil { + return nil, fmt.Errorf("no response for request type: %s", reqType) + } + err := plist.Unmarshal(rawResp, resp) + if err != nil { + return resp, fmt.Errorf("unmarshal response: %w", err) + } + return resp, nil +} + +// workflowStepResultFromStorageStep converts a storage step into a workflow step result. +// As a special optimization we accept a uuid and resp to short-circuit an already- +// unmarshalled workflow step command to be used. +// If ignoreEmptyResp is set then we do not require completed commands and skip +// parsing empty responses. +func workflowStepResultFromStorageStepResult(ss *storage.StepResult, newCtx newContextValuer, ignoreEmptyResp bool, uuid string, spResp interface{}) (*workflow.StepResult, error) { + if len(ss.IDs) != 1 { + // results (MDM command responses) can only be for a single ID + return nil, errors.New("incorrect id count") + } + sr := &workflow.StepResult{ + StepContext: workflow.StepContext{ + InstanceID: ss.InstanceID, + Name: ss.Name, + Context: newCtx.NewContextValue(ss.Name), + }, + ID: ss.IDs[0], + } + if sr.Context != nil && len(ss.Context) > 0 { + if err := sr.Context.UnmarshalBinary(ss.Context); err != nil { + return sr, fmt.Errorf("unmarshal context: %w", err) + } + } + for _, cmd := range ss.Commands { + if cmd.CommandUUID == uuid { + sr.CommandResults = append(sr.CommandResults, spResp) + continue + } + if ignoreEmptyResp && len(cmd.ResultReport) < 1 { + continue + } + resp, err := workflowCommandResponseFromRawResponse(cmd.RequestType, cmd.ResultReport) + if err != nil { + // TODO: more error context? (rt, uuid) + return sr, fmt.Errorf("converting response: %w", err) + } + sr.CommandResults = append(sr.CommandResults, resp) + } + return sr, nil +} diff --git a/engine/convert_test.go b/engine/convert_test.go new file mode 100644 index 0000000..394b582 --- /dev/null +++ b/engine/convert_test.go @@ -0,0 +1,126 @@ +package engine + +import ( + "bytes" + "os" + "reflect" + "testing" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +type testContextMarshaler struct{} + +func (cm *testContextMarshaler) NewContextValue(stepName string) workflow.ContextMarshaler { + if stepName == "" { + return new(workflow.StringContext) + } + return nil +} + +func TestConvertWorkflowStepStartFromEngine(t *testing.T) { + instID := "AAABBB111222" + + newCtxMarshaler := &testContextMarshaler{} + + inputIDs := []string{"a", "b", "c"} + + ss, err := workflowStepStartFromEngine(instID, newCtxMarshaler, []byte("hello"), inputIDs, nil, nil) + if err != nil { + t.Fatal(err) + } + + stepCtx, ok := ss.Context.(*workflow.StringContext) + if !ok { + t.Fatal("incorrect context type") + } + + if have, want := string(*stepCtx), "hello"; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + if have, want := ss.InstanceID, instID; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + if have, want := ss.Name, ""; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + if !reflect.DeepEqual(ss.IDs, inputIDs) { + t.Error("IDs do not match") + } +} + +func TestStorageStepCommandFromRawResponse(t *testing.T) { + pBytes, err := os.ReadFile("testdata/devinfo.plist") + if err != nil { + t.Fatal(err) + } + sc, response, err := storageStepCommandFromRawResponse("DeviceInformation", pBytes) + if err != nil { + t.Fatal(err) + } + if sc == nil { + t.Fatal("nil StepCommand") + } + if response == nil { + t.Fatal("nil response") + } + devInfo, ok := response.(*mdmcommands.DeviceInformationResponse) + if !ok { + t.Fatal("incorrect command type") + } + if have, want := sc.CommandUUID, "DevInfo001"; have != want { + t.Fatalf("have=%v, want=%v", have, want) + } + var udid string + if devInfo.UDID != nil { + udid = *devInfo.UDID + } + if have, want := udid, "UDID001"; have != want { + t.Fatalf("have=%v, want=%v", have, want) + } +} + +// TestConvertNilStorageCommandRawFromWorkflowCommand tests for a +// regression of a nil check/fix in the mdmcommands module. +func TestConvertNilStorageCommandRawFromWorkflowCommand(t *testing.T) { + var c *mdmcommands.InstallProfileCommand + _, err := storageCommandRawFromWorkflowCommand(c) + if err == nil { + t.Fatal("want error, have nil") + } +} + +func TestConvertStorageCommandRawFromWorkflowCommand(t *testing.T) { + _, err := storageCommandRawFromWorkflowCommand(&struct{}{}) + if err == nil { + t.Fatal("want error, have nil") + } + + ider := uuid.NewStaticIDs("ABCUUID") + cmd := mdmcommands.NewSecurityInfoCommand(ider.ID()) + sc, err := storageCommandRawFromWorkflowCommand(cmd) + if err != nil { + t.Fatal(err) + } + if have, want := sc.RequestType, cmd.Command.RequestType; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + if have, want := sc.CommandUUID, cmd.CommandUUID; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + if have, want := cmd.CommandUUID, "ABCUUID"; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + secInfoGenPlist, err := os.ReadFile("testdata/secinfo.gen.plist") + if err != nil { + t.Fatal(err) + } + if have, want := sc.Command, secInfoGenPlist; !bytes.Equal(have, want) { + t.Errorf("have: %v, want: %v", string(have), string(want)) + } +} diff --git a/engine/engine.go b/engine/engine.go new file mode 100644 index 0000000..ee8cfa5 --- /dev/null +++ b/engine/engine.go @@ -0,0 +1,499 @@ +// Package engine implements the NanoCMD workflow engine. +package engine + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/mdm" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +var ( + ErrNoSuchWorkflow = errors.New("no such workflow") + ErrNoIDs = errors.New("no IDs") +) + +func NewErrNoSuchWorkflow(name string) error { + return fmt.Errorf("%w: %s", ErrNoSuchWorkflow, name) +} + +// RawEnqueuer sends raw Plist commands to enrollment IDs. +type RawEnqueuer interface { + Enqueue(ctx context.Context, ids []string, rawCmd []byte) error +} + +// PushEnqueuer sends raw commands and APNs pushes to enrollment IDs. +type PushEnqueuer interface { + RawEnqueuer + Push(ctx context.Context, ids []string) error +} + +// Enqueuer sends raw Plist commands to enrollment IDs and relays multi-command capability. +type Enqueuer interface { + RawEnqueuer + SupportsMultiCommands() bool +} + +// DefaultTimeout is the default workflow step timeout. +// A workflow's configured timeout will override this default and a +// step's enqueued timeout will override that. +const DefaultTimeout = time.Hour * 24 * 3 + +// Engine coordinates workflows with MDM servers. +type Engine struct { + workflowsMu sync.RWMutex + workflows map[string]workflow.Workflow + allResps map[string][]string // map of MDM command Request Types to slice of workflow names + + storage storage.Storage + enqueuer Enqueuer + eventStorage storage.ReadEventSubscriptionStorage + + logger log.Logger + ider uuid.IDer + + defaultTimeout time.Duration +} + +// Options configure the engine. +type Option func(*Engine) + +// WithLogger sets the engine logger. +func WithLogger(logger log.Logger) Option { + return func(e *Engine) { + e.logger = logger + } +} + +// WithDefaultTimeout configures the engine for a default workflow step timeout. +func WithDefaultTimeout(timeout time.Duration) Option { + return func(e *Engine) { + e.defaultTimeout = timeout + } +} + +// WithEventStorage turns on the event dispatch and configures the storage. +func WithEventStorage(evStorage storage.ReadEventSubscriptionStorage) Option { + return func(e *Engine) { + e.eventStorage = evStorage + } +} + +// New creates a new NanoCMD engine with default configurations. +func New(storage storage.Storage, enqueuer Enqueuer, opts ...Option) *Engine { + engine := &Engine{ + workflows: make(map[string]workflow.Workflow), + allResps: make(map[string][]string), + storage: storage, + enqueuer: enqueuer, + logger: log.NopLogger, + ider: uuid.NewUUID(), + defaultTimeout: DefaultTimeout, + } + for _, opt := range opts { + opt(engine) + } + return engine +} + +// diff returns the difference between a and b +// That is: the items not in both slices. +func diff(a, b []string) (r []string) { + seen := make(map[string]int) + for _, v := range a { + seen[v]++ + } + for _, v := range b { + seen[v]-- + } + for k, v := range seen { + if v != 0 { + r = append(r, k) + } + } + return +} + +// StartWorkflow starts a new workflow instance for workflow name. +func (e *Engine) StartWorkflow(ctx context.Context, name string, context []byte, ids []string, ev *workflow.Event, mdmCtx *workflow.MDMContext) (string, error) { + // retrieve our workflow and check validity + w := e.Workflow(name) + if w == nil { + return "", NewErrNoSuchWorkflow(name) + } + + logger := ctxlog.Logger(ctx, e.logger).With(logkeys.WorkflowName, name) + + if cfg := w.Config(); cfg == nil || cfg.Exclusivity == workflow.Exclusive { + // check if our ids have any outstanding workflows running + wRunningIDs, err := e.storage.RetrieveOutstandingWorkflowStatus(ctx, name, ids) + if err != nil { + return "", fmt.Errorf("retrieving outstanding status: %w", err) + } + if len(wRunningIDs) > 0 { + ct := len(ids) + ids = diff(ids, wRunningIDs) // replace our ids with the set of NON-outstanding ids + if len(ids) < 1 { + // if all IDs are already running, then return an error + return "", fmt.Errorf("workflow already started on %d (of %d) ids", len(wRunningIDs), ct) + } else { + logger.Debug( + logkeys.Message, fmt.Sprintf("workflow already started on %d (of %d) ids", len(wRunningIDs), ct), + logkeys.GenericCount, len(ids), + ) + } + } + } + + var startIDs [][]string + if e.enqueuer.SupportsMultiCommands() { + startIDs = [][]string{ids} + } else { + // if we do not support multi-targeted commands then we need + // to break apart the initial multi-target IDs. the primary + // reason for this is workflows should be agnostic about + // whether they can target multi-ids or not. in this way a + // workflow can simply generate a unique UUID for each of its + // commands regardless of the underlying support. + for _, id := range ids { + startIDs = append(startIDs, []string{id}) + } + } + + // create a new instance ID + instanceID := e.ider.ID() + + var retErr error // accumulate and return the last start error + for _, startID := range startIDs { + // check that we have enrollment IDs to start in our IDs + if len(startID) < 1 { + logger.Info(logkeys.Error, ErrNoIDs) + continue + } + + // create a workflow start step + ss, err := workflowStepStartFromEngine(instanceID, w, context, ids, ev, mdmCtx) + if err != nil { + return instanceID, fmt.Errorf("converting step start: %w", err) + } + if err = w.Start(ctx, ss); err != nil { + return instanceID, fmt.Errorf("staring workflow: %w", err) + } + logger.Debug( + logkeys.InstanceID, instanceID, + logkeys.Message, "starting workflow", + logkeys.FirstEnrollmentID, startID[0], + logkeys.GenericCount, len(startID), + ) + } + + return instanceID, retErr +} + +// stepDefaultTimeout returns either the engine or workflow default step timeout. +func (e *Engine) stepDefaultTimeout(workflowName string) (defaultTimeout time.Time) { + if e.defaultTimeout > 0 { + defaultTimeout = time.Now().Add(e.defaultTimeout) + } + w := e.Workflow(workflowName) + if w == nil { + return + } + cfg := w.Config() + if cfg == nil { + return + } + if cfg.Timeout > 0 { + defaultTimeout = time.Now().Add(cfg.Timeout) + } + return +} + +// EnqueueStep stores the step and enqueues the commands to the MDM server. +func (e *Engine) EnqueueStep(ctx context.Context, n workflow.Namer, se *workflow.StepEnqueueing) error { + ss, err := storageStepEnqueuingWithConfigFromWorkflowStepEnqueueing(n, e.stepDefaultTimeout(n.Name()), se) + if err != nil { + return fmt.Errorf("converting workflow step: %w", err) + } + + if err = e.storage.StoreStep(ctx, ss, time.Now()); err != nil { + return fmt.Errorf("storing step: %w", err) + } + + if ss.NotUntil.IsZero() { + // if we are not delaying the steps, then send them now + for _, cmd := range ss.Commands { + if err = e.enqueuer.Enqueue(ctx, ss.IDs, cmd.Command); err != nil { + return fmt.Errorf("enqueueing step: %w", err) + } + } + } + + ctxlog.Logger(ctx, e.logger).Debug( + logkeys.Message, "enqueued step", + logkeys.InstanceID, ss.InstanceID, + logkeys.GenericCount, len(ss.IDs), + logkeys.FirstEnrollmentID, ss.IDs[0], + logkeys.WorkflowName, ss.WorkflowName, + logkeys.StepName, ss.Name, + "command_count", len(ss.Commands), + ) + + return nil +} + +// dispatchAllCommandResponseRequestTypes sends the "AllCommandResponse" to subscribed workflows. +func (e *Engine) dispatchAllCommandResponseRequestTypes(ctx context.Context, reqType string, id string, response interface{}, mdmCtx *workflow.MDMContext) error { + logger := ctxlog.Logger(ctx, e.logger).With( + "request_type", reqType, + logkeys.EnrollmentID, id, + ) + ev := &workflow.Event{ + EventFlag: workflow.EventAllCommandResponse, + EventData: response, + } + var wg sync.WaitGroup + for _, w := range e.allRespWorkflows(reqType) { + wg.Add(1) + go func(w workflow.Workflow) { + defer wg.Done() + err := w.Event(ctx, ev, id, mdmCtx) + if err != nil { + logger.Info( + logkeys.Message, "workflow all command response", + logkeys.WorkflowName, w.Name(), + logkeys.Error, err, + ) + } + }(w) + } + wg.Wait() + return nil +} + +func logAndError(err error, logger log.Logger, msg string) error { + logger.Info( + logkeys.Message, msg, + logkeys.Error, err, + ) + return fmt.Errorf("%s: %w", msg, err) +} + +// MDMCommandResponseEvent receives MDM command responses. +func (e *Engine) MDMCommandResponseEvent(ctx context.Context, id string, uuid string, raw []byte, mdmContext *workflow.MDMContext) error { + logger := ctxlog.Logger(ctx, e.logger).With( + logkeys.CommandUUID, uuid, + logkeys.EnrollmentID, id, + ) + + // see if this is a engine-"tracked" MDM command and get its metadata if so + reqType, ok, err := e.storage.RetrieveCommandRequestType(ctx, id, uuid) + if err != nil { + return logAndError(err, logger, "retreive command request type") + } + logger = logger.With("engine_command", ok) + + if !ok { + // we didn't find this command UUID + // probably did not originate with the engine + logger.Debug() + return nil + } + + logger = logger.With(logkeys.RequestType, reqType) + + // convert raw response to a storage raw response + sc, response, err := storageStepCommandFromRawResponse(reqType, raw) + if err != nil { + return logAndError(err, logger, "convert response") + } + logger = logger.With("command_completed", sc.Completed) + + var wg sync.WaitGroup + defer wg.Wait() // we have a context so make sure we block + wg.Add(1) + go func() { + defer wg.Done() + if err := e.dispatchAllCommandResponseRequestTypes(ctx, reqType, id, response, mdmContext); err != nil { + logger.Info( + logkeys.Message, "dispatching all command response types", + logkeys.Error, err, + ) + } + }() + + // store our command response and get the completed storage step result + ssr, err := e.storage.StoreCommandResponseAndRetrieveCompletedStep(ctx, id, sc) + if err != nil { + return logAndError(err, logger, "store command retrieve completed") + } + logger = logger.With("step_completed", ssr != nil) + + if ssr == nil { + logger.Debug() + // return if there was no completed step; nothing more to do. + return nil + } + + logger = logger.With( + logkeys.WorkflowName, ssr.WorkflowName, + logkeys.InstanceID, ssr.InstanceID, + ) + + w := e.Workflow(ssr.WorkflowName) + if w == nil { + return logAndError(NewErrNoSuchWorkflow(ssr.WorkflowName), logger, "retrieving workflow") + } + + // create a workflow step result for handing off to a workflow + stepResult, err := workflowStepResultFromStorageStepResult(ssr, w, false, uuid, response) + if err != nil { + return logAndError(err, logger, "converting storage step") + } + + if mdmContext != nil { + stepResult.MDMContext = *mdmContext + } + + // let our workflow know that we have completed the step + if err = w.StepCompleted(ctx, stepResult); err != nil { + return logAndError(err, logger, "completing workflow step") + } + logger.Debug(logkeys.Message, "completed workflow step") + return nil +} + +// dispatchEvents dispatches MDM check-in events. +// this includes event subscriptions (user configured) and workflow +// configurations. +func (e *Engine) dispatchEvents(ctx context.Context, id string, ev *workflow.Event, mdmCtx *workflow.MDMContext) error { + logger := ctxlog.Logger(ctx, e.logger).With( + "event", ev.EventFlag, + logkeys.EnrollmentID, id, + ) + var wg sync.WaitGroup + if e.eventStorage != nil { + subs, err := e.eventStorage.RetrieveEventSubscriptionsByEvent(ctx, ev.EventFlag) + if err != nil { + logger.Info( + logkeys.Message, "retrieving event subscriptions", + logkeys.Error, err, + ) + } else { + for _, sub := range subs { + wg.Add(1) + go func(es *storage.EventSubscription) { + defer wg.Done() + if instanceID, err := e.StartWorkflow(ctx, es.Workflow, []byte(es.Context), []string{id}, ev, mdmCtx); err != nil { + logger.Info( + logkeys.Message, "start workflow", + logkeys.WorkflowName, es.Workflow, + logkeys.InstanceID, instanceID, + logkeys.Error, err, + ) + } else { + logger.Debug( + logkeys.Message, "started workflow", + logkeys.WorkflowName, es.Workflow, + logkeys.InstanceID, instanceID, + ) + } + }(sub) + } + } + } + for _, w := range e.eventWorkflows(ev.EventFlag) { + wg.Add(1) + go func(w workflow.Workflow) { + defer wg.Done() + if err := w.Event(ctx, ev, id, mdmCtx); err != nil { + logger.Info( + logkeys.Message, "workflow event", + logkeys.WorkflowName, w.Name(), + logkeys.Error, err, + ) + } else { + logger.Debug( + logkeys.Message, "workflow event", + logkeys.WorkflowName, w.Name(), + ) + } + }(w) + } + wg.Wait() + return nil +} + +// MDMCheckinEvent receives MDM checkin messages. +func (e *Engine) MDMCheckinEvent(ctx context.Context, id string, checkin interface{}, mdmContext *workflow.MDMContext) error { + logger := ctxlog.Logger(ctx, e.logger).With(logkeys.EnrollmentID, id) + cancelSteps := false + var events []*workflow.Event + switch v := checkin.(type) { + case *mdm.Authenticate: + cancelSteps = true + events = []*workflow.Event{{ + EventFlag: workflow.EventAuthenticate, + EventData: v, + }} + case *mdm.TokenUpdate: + events = []*workflow.Event{{ + EventFlag: workflow.EventTokenUpdate, + EventData: v, + }, { + // from a pure token update we can't tell if an enrollment + // happened. so we default to sending that event, too. + // even if this is a supplementary intra-enrollment token + // update. + EventFlag: workflow.EventEnrollment, + EventData: v, + }} + case *mdm.TokenUpdateEnrolling: + events = []*workflow.Event{{ + EventFlag: workflow.EventTokenUpdate, + EventData: v.TokenUpdate, + }} + if v.Enrolling { + // with this type we *can* tell if we're enrolling or not. + // so only dispatch that event if, truly, we're enrolling. + events = append(events, &workflow.Event{ + EventFlag: workflow.EventEnrollment, + EventData: v.TokenUpdate, + }) + } + case *mdm.CheckOut: + cancelSteps = true + events = []*workflow.Event{{ + EventFlag: workflow.EventCheckOut, + EventData: v, + }} + } + if cancelSteps { + // we cancel all steps for an enrollment upon re-enrollment + // or checkout. this will allow us to enqueue workflows again. + // otherwise any outstanding workflow instances would block + // new ones being executed due to exclusivity. + if err := e.storage.CancelSteps(ctx, id, ""); err != nil { + return logAndError(err, logger, "checkin event: cancel steps") + } + } + for _, event := range events { + if err := e.dispatchEvents(ctx, id, event, mdmContext); err != nil { + logger.Info( + logkeys.Message, "checkin event: dispatch events", + "event", event.EventFlag, + logkeys.Error, err, + ) + } + } + return nil +} diff --git a/engine/http/engine.go b/engine/http/engine.go new file mode 100644 index 0000000..b237850 --- /dev/null +++ b/engine/http/engine.go @@ -0,0 +1,71 @@ +// Package http contains HTTP handlers that work with the NanoCMD engine. +package http + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + "github.com/alexedwards/flow" + "github.com/micromdm/nanocmd/http/api" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/workflow" +) + +var ( + ErrNoIDs = errors.New("no IDs provided") + ErrNoStarter = errors.New("missing workflow starter") +) + +type WorkflowStarter interface { + StartWorkflow(ctx context.Context, name string, context []byte, ids []string, e *workflow.Event, mdmCtx *workflow.MDMContext) (string, error) +} + +// StartWorkflowHandler creates a HandlerFunc that starts a workflow. +func StartWorkflowHandler(starter WorkflowStarter, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + ids := r.URL.Query()["id"] + if len(ids) < 1 { + logger.Info(logkeys.Message, "parameters", logkeys.Error, ErrNoIDs) + api.JSONError(w, ErrNoIDs, http.StatusBadRequest) + return + } + + name := flow.Param(r.Context(), "name") + logger = logger.With( + logkeys.FirstEnrollmentID, ids[0], + logkeys.WorkflowName, name, + ) + if starter == nil { + logger.Info(logkeys.Message, "starting workflow", logkeys.Error, ErrNoStarter) + api.JSONError(w, ErrNoStarter, 0) + return + } + + logger.Debug(logkeys.Message, "starting workflow") + instanceID, err := starter.StartWorkflow( + r.Context(), + name, + []byte(r.URL.Query().Get("context")), + ids, + nil, + nil, + ) + if err != nil { + logger.Info(logkeys.Message, "starting workflow", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + jsonResp := &struct { + InstanceID string `json:"instance_id"` + }{InstanceID: instanceID} + if err = json.NewEncoder(w).Encode(jsonResp); err != nil { + logger.Info(logkeys.Message, "encoding json response", logkeys.Error, err) + } + } +} diff --git a/engine/http/event.go b/engine/http/event.go new file mode 100644 index 0000000..e9eda6e --- /dev/null +++ b/engine/http/event.go @@ -0,0 +1,112 @@ +package http + +import ( + "encoding/json" + "errors" + "net/http" + + "github.com/alexedwards/flow" + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/http/api" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" +) + +var ( + ErrMissingStore = errors.New("missing store") + ErrNoName = errors.New("missing name parameter") + ErrWorkflowNotRegistered = errors.New("workflow not registered") +) + +// GetHandler retrieves and returns JSON of the named event subscription. +func GetHandler(store storage.ReadEventSubscriptionStorage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + if store == nil { + logger.Info(logkeys.Error, ErrMissingStore) + api.JSONError(w, ErrMissingStore, 0) + return + } + + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "parameters", logkeys.Error, ErrNoName) + api.JSONError(w, ErrNoName, http.StatusBadRequest) + return + } + + logger = logger.With("name", name) + es, err := store.RetrieveEventSubscriptions(r.Context(), []string{name}) + if err != nil { + logger.Info(logkeys.Message, "retrieve event subscription", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + logger.Debug( + logkeys.Message, "retrieved event subscription", + logkeys.GenericCount, len(es), + ) + w.Header().Set("Content-Type", "application/json") + if err = json.NewEncoder(w).Encode(es[name]); err != nil { + logger.Info(logkeys.Message, "encoding json to body", logkeys.Error, err) + return + } + } +} + +type WorkflowNameChecker interface { + WorkflowRegistered(name string) bool +} + +// PutHandler stores JSON of the named event subscription. +func PutHandler(store storage.EventSubscriptionStorage, chk WorkflowNameChecker, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + if store == nil { + logger.Info(logkeys.Error, ErrMissingStore) + api.JSONError(w, ErrMissingStore, 0) + return + } + + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "parameters", logkeys.Error, ErrNoName) + api.JSONError(w, ErrNoName, http.StatusBadRequest) + return + } + + logger = logger.With("name", name) + es := new(storage.EventSubscription) + err := json.NewDecoder(r.Body).Decode(es) + if err != nil { + logger.Info(logkeys.Message, "decoding body", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + logger = logger.With(logkeys.WorkflowName, es.Workflow) + + if err = es.Validate(); err != nil { + logger.Info(logkeys.Message, "validating event subscription", logkeys.Error, err) + api.JSONError(w, err, http.StatusBadRequest) + return + } + + if !chk.WorkflowRegistered(es.Workflow) { + logger.Info(logkeys.Message, "checking workflow name", logkeys.Error, ErrWorkflowNotRegistered) + api.JSONError(w, ErrWorkflowNotRegistered, http.StatusBadRequest) + return + } + + if err = store.StoreEventSubscription(r.Context(), name, es); err != nil { + logger.Info(logkeys.Message, "storing event subscription", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + logger.Debug(logkeys.Message, "stored event subscription") + w.WriteHeader(http.StatusNoContent) + } +} diff --git a/engine/reg.go b/engine/reg.go new file mode 100644 index 0000000..1034cb5 --- /dev/null +++ b/engine/reg.go @@ -0,0 +1,101 @@ +package engine + +import "github.com/micromdm/nanocmd/workflow" + +func in(s []string, i string) int { + for j, v := range s { + if v == i { + return j + } + } + return -1 +} + +func (e *Engine) registerAllResp(name string, allRespReqTypes []string) { + for _, reqType := range allRespReqTypes { + regNames := e.allResps[reqType] + if in(regNames, name) == -1 { + regNames = append(regNames, name) + e.allResps[reqType] = regNames + } + } +} + +// RegisterWorkflow associates w with the engine by name. +func (e *Engine) RegisterWorkflow(w workflow.Workflow) error { + e.workflowsMu.Lock() + defer e.workflowsMu.Unlock() + e.workflows[w.Name()] = w + if cfg := w.Config(); cfg != nil { + e.registerAllResp(w.Name(), cfg.AllCommandResponseRequestTypes) + } + e.logger.Debug("msg", "registered workflow", "name", w.Name()) + return nil +} + +func (e *Engine) unregisterAllResp(name string) { + for k, v := range e.allResps { + pos := in(v, name) + if pos >= 0 && len(v) > pos { + e.allResps[k] = append(v[0:pos], v[pos+1:]...) + } + } +} + +func (e *Engine) allRespWorkflows(reqType string) (workflows []workflow.Workflow) { + e.workflowsMu.RLock() + defer e.workflowsMu.RUnlock() + for _, name := range e.allResps[reqType] { + w := e.Workflow(name) + if w != nil { + workflows = append(workflows, w) + } + } + return +} + +// UnregisterWorkflow dissociates the named workflow from the engine by name. +func (e *Engine) UnregisterWorkflow(name string) error { + e.workflowsMu.Lock() + defer e.workflowsMu.Unlock() + if _, ok := e.workflows[name]; ok { + delete(e.workflows, name) + e.unregisterAllResp(name) + e.logger.Debug("msg", "unregistered workflow", "name", name) + } else { + e.logger.Info( + "msg", "unregistered workflow", + "name", name, + "err", "workflow name not found", + ) + } + return nil +} + +// Workflow returns the registered workflow by name. +func (e *Engine) Workflow(name string) workflow.Workflow { + e.workflowsMu.RLock() + defer e.workflowsMu.RUnlock() + return e.workflows[name] +} + +// WorkflowRegistered returns true if the workflow name is registered. +func (e *Engine) WorkflowRegistered(name string) bool { + e.workflowsMu.RLock() + defer e.workflowsMu.RUnlock() + _, ok := e.workflows[name] + return ok +} + +// eventWorkflows returns workflows that are configured to receive the ev event. +func (e *Engine) eventWorkflows(ev workflow.EventFlag) (workflows []workflow.Workflow) { + e.workflowsMu.RLock() + defer e.workflowsMu.RUnlock() + for _, w := range e.workflows { + if cfg := w.Config(); cfg != nil && ev&cfg.Events > 0 { + // w "subscribes" to ev; add w to the list + workflows = append(workflows, w) + } + } + return +} diff --git a/engine/storage/diskv/diskv.go b/engine/storage/diskv/diskv.go new file mode 100644 index 0000000..d0ac672 --- /dev/null +++ b/engine/storage/diskv/diskv.go @@ -0,0 +1,38 @@ +// Package diskv implements an engine storage backend using the diskv key-value store. +package diskv + +import ( + "path/filepath" + + "github.com/micromdm/nanocmd/engine/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvdiskv" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/peterbourgon/diskv/v3" +) + +// Diskv is a a diskv-backed engine storage backend. +type Diskv struct { + *kv.KV +} + +func New(path string) *Diskv { + flatTransform := func(s string) []string { return []string{} } + return &Diskv{KV: kv.New( + kvdiskv.NewBucket(diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "engine", "step"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + })), + kvdiskv.NewBucket(diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "engine", "idcmd"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + })), + kvdiskv.NewBucket(diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "engine", "eventsubs"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + })), + uuid.NewUUID(), + )} +} diff --git a/engine/storage/diskv/diskv_test.go b/engine/storage/diskv/diskv_test.go new file mode 100644 index 0000000..29927b8 --- /dev/null +++ b/engine/storage/diskv/diskv_test.go @@ -0,0 +1,14 @@ +package diskv + +import ( + "os" + "testing" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/engine/storage/test" +) + +func TestDiskvStorage(t *testing.T) { + test.TestEngineStorage(t, func() storage.AllStorage { return New("teststor") }) + os.RemoveAll("teststor") +} diff --git a/engine/storage/inmem/inmem.go b/engine/storage/inmem/inmem.go new file mode 100644 index 0000000..af74429 --- /dev/null +++ b/engine/storage/inmem/inmem.go @@ -0,0 +1,22 @@ +// Package inmem implements an engine storage backend using the a map-based key-value store. +package inmem + +import ( + "github.com/micromdm/nanocmd/engine/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvmap" + "github.com/micromdm/nanocmd/utils/uuid" +) + +// InMem is an in-memory engine storage backend. +type InMem struct { + *kv.KV +} + +func New() *InMem { + return &InMem{KV: kv.New( + kvmap.NewBucket(), + kvmap.NewBucket(), + kvmap.NewBucket(), + uuid.NewUUID(), + )} +} diff --git a/engine/storage/inmem/inmem_test.go b/engine/storage/inmem/inmem_test.go new file mode 100644 index 0000000..fce694a --- /dev/null +++ b/engine/storage/inmem/inmem_test.go @@ -0,0 +1,12 @@ +package inmem + +import ( + "testing" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/engine/storage/test" +) + +func TestDiskvStorage(t *testing.T) { + test.TestEngineStorage(t, func() storage.AllStorage { return New() }) +} diff --git a/engine/storage/kv/event.go b/engine/storage/kv/event.go new file mode 100644 index 0000000..8d0f6c4 --- /dev/null +++ b/engine/storage/kv/event.go @@ -0,0 +1,149 @@ +package kv + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/utils/kv" + "github.com/micromdm/nanocmd/workflow" +) + +const ( + keySfxEventFlag = ".flag" // contains a strconv integer + keySfxEventWorkflow = ".name" + keySfxEventContext = ".ctx" +) + +type kvEventSubscription struct { + *storage.EventSubscription +} + +func (es *kvEventSubscription) set(ctx context.Context, b kv.Bucket, name string) error { + if es == nil || name == "" { + return errors.New("invalid") + } + err := es.Validate() + if err != nil { + return fmt.Errorf("validating: %w", err) + } + esMap := map[string][]byte{ + name + keySfxEventWorkflow: []byte(es.Workflow), + name + keySfxEventFlag: []byte(strconv.Itoa(int(workflow.EventFlagForString(es.Event)))), + } + if len(es.Context) > 0 { + esMap[name+keySfxEventContext] = []byte(es.Context) + } + return kv.SetMap(ctx, b, esMap) +} + +func (es *kvEventSubscription) get(ctx context.Context, b kv.Bucket, name string) error { + if es == nil || name == "" { + return errors.New("invalid") + } + if es.EventSubscription == nil { + es.EventSubscription = new(storage.EventSubscription) + } + esMap, err := kv.GetMap(ctx, b, []string{ + name + keySfxEventWorkflow, + name + keySfxEventFlag, + }) + if err != nil { + return err + } + es.Workflow = string(esMap[name+keySfxEventWorkflow]) + eventFlag, err := strconv.Atoi(string(esMap[name+keySfxEventFlag])) + if err != nil { + return fmt.Errorf("getting event flag: %w", err) + } + es.Event = workflow.EventFlag(eventFlag).String() + if ok, err := b.Has(ctx, name+keySfxEventContext); err != nil { + return fmt.Errorf("checking event context: %w", err) + } else if !ok { + return nil + } + if ctxBytes, err := b.Get(ctx, name+keySfxEventContext); err != nil { + return fmt.Errorf("getting event context: %w", err) + } else { + es.Context = string(ctxBytes) + } + return nil +} + +func (s *KV) RetrieveEventSubscriptions(ctx context.Context, names []string) (map[string]*storage.EventSubscription, error) { + if len(names) < 1 { + return nil, errors.New("no names specified") + } + ret := make(map[string]*storage.EventSubscription) + for _, name := range names { + wrapped := new(kvEventSubscription) + if err := wrapped.get(ctx, s.eventStore, name); err != nil { + return ret, fmt.Errorf("getting event subscription record for %s: %w", name, err) + } + ret[name] = wrapped.EventSubscription + } + return ret, nil +} + +func kvFindEventSubNamesByEvent(ctx context.Context, b kv.TraversingBucket, f workflow.EventFlag) ([]string, error) { + var names []string + + // this.. is not very efficient. perhaps it would be better to + // make a specific bucket/index for this. + for k := range b.Keys(nil) { + if !strings.HasSuffix(k, keySfxEventFlag) { + continue + } + flagBytes, err := b.Get(ctx, k) + if err != nil { + return nil, err + } + eventFlag, err := strconv.Atoi(string(flagBytes)) + if err != nil { + continue + } + if eventFlag != int(f) { + continue + } + names = append(names, k[:len(k)-len(keySfxEventFlag)]) + } + return names, nil +} + +func (s *KV) RetrieveEventSubscriptionsByEvent(ctx context.Context, f workflow.EventFlag) ([]*storage.EventSubscription, error) { + if f < 1 { + return nil, errors.New("invalid event flag") + } + names, err := kvFindEventSubNamesByEvent(ctx, s.eventStore, f) + if err != nil { + return nil, fmt.Errorf("finding event subscriptions: %w", err) + } + var ret []*storage.EventSubscription + for _, name := range names { + es := new(kvEventSubscription) + if err = es.get(ctx, s.eventStore, name); err != nil { + return ret, fmt.Errorf("getting event subscription for %s: %w", name, err) + } + ret = append(ret, es.EventSubscription) + } + return ret, nil +} + +func (s *KV) StoreEventSubscription(ctx context.Context, name string, es *storage.EventSubscription) error { + wrapped := &kvEventSubscription{EventSubscription: es} + if err := wrapped.set(ctx, s.eventStore, name); err != nil { + return fmt.Errorf("setting event subscription record for %s: %w", name, err) + } + return nil +} + +func (s *KV) DeleteEventSubscription(ctx context.Context, name string) error { + return kvDeleteKeysIfExists(ctx, s.eventStore, []string{ + name + keySfxEventFlag, + name + keySfxEventWorkflow, + name + keySfxEventContext, + }) +} diff --git a/engine/storage/kv/kv.go b/engine/storage/kv/kv.go new file mode 100644 index 0000000..12cc565 --- /dev/null +++ b/engine/storage/kv/kv.go @@ -0,0 +1,262 @@ +// Package kv implements a workflow engine storage backend using a key-value interface. +package kv + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/utils/kv" + "github.com/micromdm/nanocmd/utils/uuid" +) + +// KV is a workflow engine storage backend using a key-value interface. +type KV struct { + mu sync.RWMutex + stepStore kv.TraversingBucket + idCmdStore kv.TraversingBucket + eventStore kv.TraversingBucket + ider uuid.IDer +} + +// New creates a new key-value workflow engine storage backend. +func New(stepStore kv.TraversingBucket, idCmdStore kv.TraversingBucket, eventStore kv.TraversingBucket, ider uuid.IDer) *KV { + return &KV{ + stepStore: stepStore, + idCmdStore: idCmdStore, + eventStore: eventStore, + ider: ider, + } +} + +// RetrieveCommandRequestType implements the storage interface method. +func (s *KV) RetrieveCommandRequestType(ctx context.Context, id string, cmdUUID string) (string, bool, error) { + s.mu.RLock() + defer s.mu.RUnlock() + // first check if we have a valid command + if ok, err := kvIDCmdExists(ctx, s.idCmdStore, id, cmdUUID); err != nil { + return "", false, fmt.Errorf("checking command exists for %s: %w", cmdUUID, err) + } else if !ok { + return "", false, nil + } + // then return the request type + return kvGetIDCmdReqType(ctx, s.idCmdStore, id, cmdUUID) +} + +// StoreCommandResponseAndRetrieveCompletedStep implements the storage interface method. +func (s *KV) StoreCommandResponseAndRetrieveCompletedStep(ctx context.Context, id string, sc *storage.StepCommandResult) (*storage.StepResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + // check to make sure this command actually exists + if ok, err := kvIDCmdExists(ctx, s.idCmdStore, id, sc.CommandUUID); err != nil { + return nil, fmt.Errorf("checking command exists for %s: %w", sc.CommandUUID, err) + } else if !ok { + // command must exist for us to try to update a response to it + return nil, fmt.Errorf("command not found: %s", sc.CommandUUID) + } + + // update our command response data + // TODO: we may not need to write the result if we've finished the command + // to place this after the next !sc.Completed check + if err := kvSetIDCmdUpdate(ctx, s.idCmdStore, id, sc); err != nil { + return nil, fmt.Errorf("setting command result: %w", err) + } + + if !sc.Completed { + // if 'this' command itself is not complete, then the step + // can't be completed. + // return incomplete after we've already recorded its result. + return nil, nil + } + // 'this' command is complete, let's now check to see if the step is + // complete for this id. + + // retrieve the step ID for the command UUID for this id + stepID, err := kvGetIDCmdStepID(ctx, s.idCmdStore, id, sc.CommandUUID) + if err != nil { + return nil, fmt.Errorf("getting step ID for %s: %w", sc.CommandUUID, err) + } + + // retrieve the command UUIDs of this step + stepCmdUUIDs, err := kvGetStepCmds(ctx, s.stepStore, stepID) + if err != nil { + return nil, fmt.Errorf("reading step commands: %w", err) + } + + var commands []storage.StepCommandResult + for _, stepCmdUUID := range stepCmdUUIDs { + // assume complete only because if 'this' command was not + // complete itself we should't even get this far + stepCmdComplete := true + if stepCmdUUID != sc.CommandUUID { + stepCmdComplete, err = kvIDCmdIsComplete(ctx, s.idCmdStore, id, stepCmdUUID) + if err != nil { + return nil, fmt.Errorf("checking complete status for %s: %w", stepCmdUUID, err) + } + } + if !stepCmdComplete { + // if any of our commands aren't yet finished then return as incomplete + return nil, nil + } + + // start assembling our storage command result + stepCommandResult := storage.StepCommandResult{ + CommandUUID: stepCmdUUID, + Completed: stepCmdComplete, + } + + if stepCmdUUID == sc.CommandUUID { + // used 'this' command's response rather than looking it up + stepCommandResult.RequestType = sc.RequestType + stepCommandResult.ResultReport = sc.ResultReport + } else { + stepCommandResult.ResultReport, err = kvGetIDCmdResult(ctx, s.idCmdStore, id, stepCmdUUID) + if err != nil { + return nil, fmt.Errorf("getting result for %s: %w", stepCmdUUID, err) + } + } + + if stepCommandResult.RequestType == "" { + stepCommandResult.RequestType, _, err = kvGetIDCmdReqType(ctx, s.idCmdStore, id, stepCmdUUID) + if err != nil { + return nil, fmt.Errorf("getting request type for %s: %w", stepCmdUUID, err) + } + } + + commands = append(commands, stepCommandResult) + } + + step, err := kvGetStepResult(ctx, s.stepStore, stepID) + if err != nil { + return step, fmt.Errorf("retrieving step result: %w", err) + } + step.IDs = []string{id} + step.Commands = commands + + // delete all id-command records + for _, stepCmdUUID := range stepCmdUUIDs { + if err = kvDeleteIDCmd(ctx, s.idCmdStore, id, stepCmdUUID); err != nil { + return step, fmt.Errorf("deleting command %s: %w", stepCmdUUID, err) + } + } + + err = kvDeleteStepIfAllIDsComplete(ctx, s.stepStore, s.idCmdStore, stepID, stepCmdUUIDs) + if err != nil { + return step, fmt.Errorf("step deletion: %w", err) + } + + return step, nil +} + +// StoreStep implements the storage interface method. +func (s *KV) StoreStep(ctx context.Context, step *storage.StepEnqueuingWithConfig, pushTime time.Time) error { + s.mu.Lock() + defer s.mu.Unlock() + // fabricate a unique ID to track this unique step + stepID := s.ider.ID() + + err := kvSetStep(ctx, s.stepStore, stepID, step) + if err != nil { + return fmt.Errorf("setting step record: %w", err) + } + + var lastPush time.Time + if step.NotUntil.IsZero() { + // assume that these commands have been pushed if they haven't + // explicitly been delayed + lastPush = time.Now() + } + + // explode each command to be tracked for each id + for _, sc := range step.Commands { + if !step.NotUntil.IsZero() { + // write the command with an enrollment ID of the stepID + if err = kvSetIDCmd(ctx, s.idCmdStore, stepID, stepID, &sc, time.Time{}, true); err != nil { + return fmt.Errorf("writing step not until command records: %w", err) + } + } + for _, id := range step.IDs { + if err = kvSetIDCmd(ctx, s.idCmdStore, stepID, id, &sc, lastPush, false); err != nil { + return fmt.Errorf("writing command records: %w", err) + } + } + } + + return nil +} + +// StoreStep implements the storage interface method. +func (s *KV) RetrieveOutstandingWorkflowStatus(ctx context.Context, workflowName string, ids []string) ([]string, error) { + s.mu.RLock() + defer s.mu.RUnlock() + stepIDs, err := kvFindWorkflowStepsWithIDs(ctx, s.stepStore, workflowName, ids) + if err != nil { + return nil, fmt.Errorf("finding workflow steps: %w", err) + } + + // now that we have workflow steps of interest (that is, that were enqueued to us) + // lets make sure we have uncompleted steps for our ids. + + idAcc := make(map[string]struct{}) + + for _, stepID := range stepIDs { + cmdUUIDs, err := kvGetStepCmds(ctx, s.stepStore, stepID) + if err != nil { + return nil, fmt.Errorf("getting step commands for %s: %w", stepID, err) + } + foundInStep: + for _, id := range ids { + if _, ok := idAcc[id]; ok { + continue + } + for _, cmdUUID := range cmdUUIDs { + if ok, err := kvIDCmdExists(ctx, s.idCmdStore, id, cmdUUID); err != nil { + return nil, fmt.Errorf("checking command exists for %s: %w", cmdUUID, err) + } else if !ok { + // command does not exist for this id, perhaps already completed (and deleted?) + continue + } + if ok, err := kvIDCmdIsComplete(ctx, s.idCmdStore, id, cmdUUID); err != nil { + return nil, fmt.Errorf("getting command complete status for %s: %w", cmdUUID, err) + } else if !ok { + idAcc[id] = struct{}{} + continue foundInStep + } + } + } + } + + outstandingIDs := make([]string, 0, len(idAcc)) + for id := range idAcc { + outstandingIDs = append(outstandingIDs, id) + } + + return outstandingIDs, nil +} + +// CancelSteps implements the storage interface method. +func (s *KV) CancelSteps(ctx context.Context, id, workflowName string) error { + s.mu.Lock() + defer s.mu.Unlock() + stepIDs, err := kvFindWorkflowStepsWithIDs(ctx, s.stepStore, workflowName, []string{id}) + if err != nil { + return fmt.Errorf("finding workflow steps: %w", err) + } + for _, stepID := range stepIDs { + cmdUUIDs, err := kvGetStepCmds(ctx, s.stepStore, stepID) + if err != nil { + return fmt.Errorf("getting step commands for %s: %w", stepID, err) + } + for _, cmdUUID := range cmdUUIDs { + if err = kvDeleteIDCmd(ctx, s.idCmdStore, id, cmdUUID); err != nil { + return fmt.Errorf("deleting commands for %s: %w", cmdUUID, err) + } + } + if err = kvDeleteStep(ctx, s.stepStore, stepID); err != nil { + return fmt.Errorf("deleting step for %s: %w", stepID, err) + } + } + return nil +} diff --git a/engine/storage/kv/prim.go b/engine/storage/kv/prim.go new file mode 100644 index 0000000..722cb3e --- /dev/null +++ b/engine/storage/kv/prim.go @@ -0,0 +1,346 @@ +package kv + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/utils/kv" +) + +const ( + kvStringSep = "," // using a comma is probably dangerous + + // step bucket + keySfxStepMeta = ".meta" // marshalled step metadata + keySfxStepIDs = ".ids" // marshalled step enrollment identifiers + keySfxStepCtx = ".ctx" // step context + keySfxStepCmds = ".cmds" // marshalled command UUIDs + keySfxStepNotUntil = ".notuntil" // step NotUntil time + keySfxStepTimeout = ".timeout" // step Timeout time + + // id-command bucket + keySfxCmdStepID = ".step" // associated step ID + keySfxCmdReqType = ".reqtype" // MDM command request type + keySfxCmdRaw = ".raw" // raw MDM command plist + keySfxCmdLastPush = ".lastpush" // MDM command last push time + keySfxCmdResult = ".result" // raw MDM command result report plist + keySfxCmdComplete = ".done" // command is complete indicator + keySfxCmdID = ".id" // enrollment ID for this command +) + +var keySfxCmdKeys = []string{ + keySfxCmdStepID, // should always exist + keySfxCmdReqType, // should always exist + keySfxCmdRaw, + keySfxCmdLastPush, // should always exist + keySfxCmdResult, + keySfxCmdComplete, + keySfxCmdID, +} + +var keySfxStepKeys = []string{ + keySfxStepMeta, // should always exist + keySfxStepIDs, // should always exist + keySfxStepCtx, + keySfxStepCmds, // should always exist + keySfxStepNotUntil, + keySfxStepTimeout, +} + +func marshalStrings(s []string) []byte { + return []byte(strings.Join(s, kvStringSep)) +} + +func unmarshalStrings(b []byte) []string { + return strings.Split(string(b), kvStringSep) +} + +// kvSetStep writes step to b. +func kvSetStep(ctx context.Context, b kv.Bucket, stepID string, step *storage.StepEnqueuingWithConfig) error { + // check of the step is valid + err := step.Validate() + if err != nil { + return fmt.Errorf("validating step: %w", err) + } + + var notUntilBytes []byte + if !step.NotUntil.IsZero() { + notUntilBytes, err = step.NotUntil.MarshalText() + if err != nil { + return fmt.Errorf("marshal not until time: %w", err) + } + } + + var timeoutBytes []byte + if !step.Timeout.IsZero() { + timeoutBytes, err = step.Timeout.MarshalText() + if err != nil { + return fmt.Errorf("marshal timeout time: %w", err) + } + } + + // gather our command UUIDs + var cmdUUIDs []string + for _, cmd := range step.Commands { + cmdUUIDs = append(cmdUUIDs, cmd.CommandUUID) + } + + // begin writing our step data + sr := map[string][]byte{ + stepID + keySfxStepMeta: marshalStrings([]string{step.InstanceID, step.WorkflowName, step.Name}), + stepID + keySfxStepIDs: marshalStrings(step.IDs), + stepID + keySfxStepCmds: marshalStrings(cmdUUIDs), + } + if len(step.Context) > 0 { + sr[stepID+keySfxStepCtx] = step.Context + } + if len(notUntilBytes) > 0 { + sr[stepID+keySfxStepNotUntil] = notUntilBytes + } + if len(timeoutBytes) > 0 { + sr[stepID+keySfxStepTimeout] = timeoutBytes + } + if err = kv.SetMap(ctx, b, sr); err != nil { + return fmt.Errorf("writing step records: %w", err) + } + + return nil +} + +// kvGetStepCmds retrieve step command UUIDs. +func kvGetStepCmds(ctx context.Context, b kv.Bucket, stepID string) ([]string, error) { + cmdUUIDBytes, err := b.Get(ctx, stepID+keySfxStepCmds) + return unmarshalStrings(cmdUUIDBytes), err +} + +// kvGetStepIDs returns the enrollment IDs a step is enqueued for. +func kvGetStepIDs(ctx context.Context, b kv.Bucket, stepID string) ([]string, error) { + stepEnrIDs, err := b.Get(ctx, stepID+keySfxStepIDs) + return unmarshalStrings(stepEnrIDs), err +} + +// kvGetStepResult creates and populates a step result from a stored step. +func kvGetStepResult(ctx context.Context, b kv.Bucket, stepID string) (*storage.StepResult, error) { + step := new(storage.StepResult) + + // populate all the metadata of the step + metaBytes, err := b.Get(ctx, stepID+keySfxStepMeta) + if err != nil { + return step, fmt.Errorf("getting step meta: %w", err) + } + s := unmarshalStrings(metaBytes) + if len(s) != 3 { + return step, errors.New("invalid step metadata length") + } + step.InstanceID = s[0] + step.WorkflowName = s[1] + step.Name = s[2] + + // fetch the step context + if ok, err := b.Has(ctx, stepID+keySfxStepCtx); err != nil { + return step, fmt.Errorf("checking context: %w", err) + } else if ok { + if step.Context, err = b.Get(ctx, stepID+keySfxStepCtx); err != nil { + return step, fmt.Errorf("reading context: %w", err) + } + } + + return step, nil +} + +// kvGetReqType checks for and returns +func kvGetIDCmdReqType(ctx context.Context, b kv.Bucket, id, cmdUUID string) (string, bool, error) { + // then, return get the request type + val, err := b.Get(ctx, id+cmdUUID+keySfxCmdReqType) + if err != nil { + return "", false, fmt.Errorf("getting request type for %s: %w", cmdUUID, err) + } + return string(val), true, nil +} + +// kvSetIDCmd writes sc to b. +func kvSetIDCmd(ctx context.Context, b kv.Bucket, stepID, id string, sc *storage.StepCommandRaw, lastPush time.Time, saveRaw bool) error { + cr := map[string][]byte{ + id + sc.CommandUUID + keySfxCmdStepID: []byte(stepID), + id + sc.CommandUUID + keySfxCmdReqType: []byte(sc.RequestType), + id + sc.CommandUUID + keySfxCmdID: []byte(id), + } + if saveRaw && len(sc.Command) > 0 { + cr[id+sc.CommandUUID+keySfxCmdRaw] = sc.Command + } + + var err error + cr[id+sc.CommandUUID+keySfxCmdLastPush], err = lastPush.MarshalText() + if err != nil { + return fmt.Errorf("marshal last push time: %w", err) + } + + if err = kv.SetMap(ctx, b, cr); err != nil { + return fmt.Errorf("writing command records: %w", err) + } + + return nil +} + +// kvIDCmdExists checks to see if a command UUID exists for an enrollment ID. +func kvIDCmdExists(ctx context.Context, b kv.Bucket, id, cmdUUID string) (bool, error) { + return b.Has(ctx, id+cmdUUID+keySfxCmdStepID) +} + +// kvIDCmdIsComplete checks to see if a command UUID is complete for an enrollment ID. +func kvIDCmdIsComplete(ctx context.Context, b kv.Bucket, id, cmdUUID string) (bool, error) { + return b.Has(ctx, id+cmdUUID+keySfxCmdComplete) +} + +// kvGetIDCmdResult retrieves the previuosly saved command result (likely from kvSetIDCmdUpdate()) +func kvGetIDCmdResult(ctx context.Context, b kv.Bucket, id, cmdUUID string) ([]byte, error) { + return b.Get(ctx, id+cmdUUID+keySfxCmdResult) +} + +// kvSetIDCmdUpdate updates the command in sc with the result data. +func kvSetIDCmdUpdate(ctx context.Context, b kv.Bucket, id string, sc *storage.StepCommandResult) error { + cr := map[string][]byte{ + id + sc.CommandUUID + keySfxCmdResult: sc.ResultReport, + } + if sc.Completed { + cr[id+sc.CommandUUID+keySfxCmdComplete] = []byte{'1'} + } + return kv.SetMap(ctx, b, cr) +} + +// kvGetIDCmdStepID tries to read the step ID of a command. +func kvGetIDCmdStepID(ctx context.Context, b kv.Bucket, id, cmdUUID string) (string, error) { + stepIDBytes, err := b.Get(ctx, id+cmdUUID+keySfxCmdStepID) + return string(stepIDBytes), err +} + +func kvDeleteKeysIfExists(ctx context.Context, b kv.Bucket, keys []string) error { + for _, k := range keys { + ok, err := b.Has(ctx, k) + if err != nil { + return fmt.Errorf("checking key %s: %w", k, err) + } else if !ok { + continue + } + if err = b.Delete(ctx, k); err != nil { + return fmt.Errorf("delete key %s: %w", k, err) + } + } + return nil +} + +// kvDeleteIDCmd deletes all keys for a command queued for id. +func kvDeleteIDCmd(ctx context.Context, b kv.Bucket, id, cmdUUID string) error { + var keys []string + for _, k := range keySfxCmdKeys { + keys = append(keys, id+cmdUUID+k) + } + return kvDeleteKeysIfExists(ctx, b, keys) +} + +// kvDeleteStep deletes all keys for a step. +func kvDeleteStep(ctx context.Context, b kv.Bucket, stepID string) error { + var keys []string + for _, k := range keySfxStepKeys { + keys = append(keys, stepID+k) + } + return kvDeleteKeysIfExists(ctx, b, keys) +} + +// kvDeleteStepNotUntil deletes a step's NotUntil key +func kvDeleteStepNotUntil(ctx context.Context, b kv.Bucket, stepID string) error { + return kvDeleteKeysIfExists(ctx, b, []string{stepID + keySfxStepNotUntil}) +} + +// kvFindWorkflowStepsWithIDs finds specific workflow steps (step IDs) for specific enrollment IDs. +func kvFindWorkflowStepsWithIDs(ctx context.Context, b kv.TraversingBucket, name string, ids []string) ([]string, error) { + var stepIDs []string + + // this.. is not very efficient. perhaps it would be better to + // make a specific bucket/index for this. +start: + for k := range b.Keys(nil) { + if !strings.HasSuffix(k, keySfxStepMeta) { + continue + } + metaBytes, err := b.Get(ctx, k) + if err != nil { + return nil, fmt.Errorf("getting step meta for %s: %w", k, err) + } + stepID := k[:len(k)-len(keySfxStepMeta)] + if unmarshalStrings(metaBytes)[1] != name { + continue + } + stepEnrIDs, err := kvGetStepIDs(ctx, b, stepID) + if err != nil { + return nil, fmt.Errorf("getting step ids for %s: %w", k, err) + } + for _, stepEnrID := range stepEnrIDs { + for _, id := range ids { + if stepEnrID != id { + continue + } + // found that this step (stepID) contains our enrollment ID + stepIDs = append(stepIDs, stepID) + continue start + } + + } + } + + return stepIDs, nil +} + +func kvDeleteStepIfAllIDsComplete(ctx context.Context, b kv.Bucket, cb kv.Bucket, stepID string, cmdUUIDs []string) error { + stepEnrIDs, err := kvGetStepIDs(ctx, b, stepID) + if err != nil { + return fmt.Errorf("getting step IDs for step %s: %w", stepID, err) + } + var cmdsToDelete []struct { + id string + uuid string + } + for _, id := range stepEnrIDs { + for _, cmdUUID := range cmdUUIDs { + ok, err := kvIDCmdExists(ctx, cb, id, cmdUUID) + if err != nil { + return fmt.Errorf("checking command exists for %s: %w", cmdUUID, err) + } else if !ok { + // command doesn't exist. could be deleted already. + continue + } + if ok, err = kvIDCmdIsComplete(ctx, cb, id, cmdUUID); err != nil { + return fmt.Errorf("checking command complete for %s: %w", cmdUUID, err) + } else if !ok { + // step is not complete, bail now + return nil + } + // if we got here then this command still exists but is completed + cmdsToDelete = append(cmdsToDelete, struct { + id string + uuid string + }{ + id: id, + uuid: cmdUUID, + }) + } + } + + // delete any commands + for _, cmd := range cmdsToDelete { + if err = kvDeleteIDCmd(ctx, cb, cmd.id, cmd.uuid); err != nil { + return fmt.Errorf("deleting command for %s: %w", cmd.uuid, err) + } + } + + // delete step + if err = kvDeleteStep(ctx, b, stepID); err != nil { + return fmt.Errorf("deleting step for %s: %w", stepID, err) + } + + return nil +} diff --git a/engine/storage/kv/worker.go b/engine/storage/kv/worker.go new file mode 100644 index 0000000..10e3890 --- /dev/null +++ b/engine/storage/kv/worker.go @@ -0,0 +1,108 @@ +package kv + +import ( + "context" + "fmt" + "time" + + "github.com/micromdm/nanocmd/engine/storage" +) + +// RetrieveStepsToEnqueue implements the storage interface method. +func (s *KV) RetrieveStepsToEnqueue(ctx context.Context, pushTime time.Time) ([]*storage.StepEnqueueing, error) { + s.mu.Lock() + defer s.mu.Unlock() + stepIDs, err := kvFindNotUntilStepsWithIDs(ctx, s.stepStore) + if err != nil { + return nil, fmt.Errorf("finding not util steps: %w", err) + } + + var ret []*storage.StepEnqueueing + + for _, stepID := range stepIDs { + se, err := kvGetStepEnqueueing(ctx, s.stepStore, s.idCmdStore, stepID) + if err != nil { + return nil, fmt.Errorf("getting step enqueueing for %s: %w", stepID, err) + } + if err = kvDeleteStepNotUntil(ctx, s.stepStore, stepID); err != nil { + return nil, fmt.Errorf("deleting step not until for %s: %w", stepID, err) + } + // reset our push times + for _, id := range se.IDs { + for _, cmd := range se.Commands { + if err = kvSetIDCmdLastPush(ctx, s.idCmdStore, id, cmd.CommandUUID, pushTime); err != nil { + return nil, fmt.Errorf("setting last push for %s: %w", cmd.CommandUUID, err) + } + } + } + ret = append(ret, se) + } + + return ret, nil +} + +// RetrieveTimedOutSteps implements the storage interface method. +func (s *KV) RetrieveTimedOutSteps(ctx context.Context) ([]*storage.StepResult, error) { + s.mu.Lock() + defer s.mu.Unlock() + stepIDs, err := kvFindTimedOutStepIDs(ctx, s.stepStore) + if err != nil { + return nil, fmt.Errorf("finding timeout steps: %w", err) + } + + var steps []*storage.StepResult + + for _, stepID := range stepIDs { + step, err := kvGetStepResult(ctx, s.stepStore, stepID) + if err != nil { + return nil, fmt.Errorf("retrieving step result: %w", err) + } + stepEnrIDs, err := kvGetStepIDs(ctx, s.stepStore, stepID) + if err != nil { + return nil, fmt.Errorf("retrieving step enrollment IDs: %w", err) + } + stepCmdUUIDs, err := kvGetStepCmds(ctx, s.stepStore, stepID) + if err != nil { + return nil, fmt.Errorf("retrieving step enrollment IDs: %w", err) + } + + for _, id := range stepEnrIDs { + // make a per-id copy of our step for workflow processing + step2 := *step + step2.IDs = []string{id} + + for _, stepCmdUUID := range stepCmdUUIDs { + result, err := kvGetIDCmdStepResult(ctx, s.idCmdStore, id, stepCmdUUID, false) + if err != nil { + return nil, fmt.Errorf("retrieving command result for %s: %w", stepCmdUUID, err) + } + if result != nil { + step2.Commands = append(step2.Commands, *result) + + // clear out the step commands + if err = kvDeleteIDCmd(ctx, s.idCmdStore, id, stepCmdUUID); err != nil { + return nil, fmt.Errorf("retrieving command result for %s: %w", stepCmdUUID, err) + } + } + } + + if len(step2.Commands) > 0 { + steps = append(steps, &step2) + } + } + + // clear out the step + if err = kvDeleteStep(ctx, s.stepStore, stepID); err != nil { + return nil, fmt.Errorf("deleting step for %s: %w", stepID, err) + } + } + + return steps, nil +} + +// RetrieveAndMarkRePushed implements the storage interface method. +func (s *KV) RetrieveAndMarkRePushed(ctx context.Context, ifBefore time.Time, setTo time.Time) ([]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + return kvFindCommandsToRePush(ctx, s.idCmdStore, ifBefore, setTo) +} diff --git a/engine/storage/kv/worker_prim.go b/engine/storage/kv/worker_prim.go new file mode 100644 index 0000000..a170eaf --- /dev/null +++ b/engine/storage/kv/worker_prim.go @@ -0,0 +1,225 @@ +package kv + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/utils/kv" +) + +func kvFindNotUntilStepsWithIDs(ctx context.Context, b kv.TraversingBucket) ([]string, error) { + var stepIDs []string + + now := time.Now() + + // this.. is not very efficient. perhaps it would be better to + // make a specific bucket/index for this. + for k := range b.Keys(nil) { + if !strings.HasSuffix(k, keySfxStepNotUntil) { + continue + } + notUntilBytes, err := b.Get(ctx, k) + if err != nil { + return nil, fmt.Errorf("getting step meta for %s: %w", k, err) + } + var notUntil time.Time + if err = notUntil.UnmarshalText(notUntilBytes); err != nil { + return nil, fmt.Errorf("unmarshal not until time: %w", err) + } + if notUntil.After(now) || notUntil.IsZero() { + continue + } + stepID := k[:len(k)-len(keySfxStepNotUntil)] + // found that this step (stepID) contains our enrollment ID + stepIDs = append(stepIDs, stepID) + } + + return stepIDs, nil +} + +func kvGetStepEnqueueing(ctx context.Context, b kv.Bucket, cb kv.Bucket, stepID string) (*storage.StepEnqueueing, error) { + step := new(storage.StepEnqueueing) + + // populate all the metadata of the step + metaBytes, err := b.Get(ctx, stepID+keySfxStepMeta) + if err != nil { + return step, fmt.Errorf("getting step meta: %w", err) + } + s := unmarshalStrings(metaBytes) + if len(s) != 3 { + return step, errors.New("invalid step metadata length") + } + step.InstanceID = s[0] + step.WorkflowName = s[1] + step.Name = s[2] + + // get enrollment IDs + stepEnrIDsBytes, err := b.Get(ctx, stepID+keySfxStepIDs) + if err != nil { + return step, fmt.Errorf("getting step enrollment ID for %s: %w", stepID, err) + } + step.IDs = unmarshalStrings(stepEnrIDsBytes) + + // retrieve the list of commands + stepCmdUUIDs, err := kvGetStepCmds(ctx, b, stepID) + if err != nil { + return step, fmt.Errorf("getting step commands for %s: %w", stepCmdUUIDs, err) + } + + for _, stepCmdUUID := range stepCmdUUIDs { + sc, err := kvGetIDCmdRaw(ctx, cb, stepID, stepCmdUUID) + if err != nil { + return nil, fmt.Errorf("getting command queue data for %s: %w", stepCmdUUID, err) + } + step.Commands = append(step.Commands, *sc) + if err = kvDeleteIDCmd(ctx, cb, stepID, stepCmdUUID); err != nil { + return nil, fmt.Errorf("deleting command for %s: %w", stepID, err) + } + } + + // no longer consider this for for future NotUntils + if err = b.Delete(ctx, stepID+keySfxStepNotUntil); err != nil { + return nil, fmt.Errorf("deleting not now for %s: %w", stepID, err) + } + + return step, nil +} + +func kvGetIDCmdRaw(ctx context.Context, b kv.Bucket, id, cmdUUID string) (*storage.StepCommandRaw, error) { + cr, err := kv.GetMap(ctx, b, []string{ + id + cmdUUID + keySfxCmdReqType, + id + cmdUUID + keySfxCmdRaw, + }) + if err != nil { + return nil, err + } + return &storage.StepCommandRaw{ + CommandUUID: cmdUUID, + RequestType: string(cr[id+cmdUUID+keySfxCmdReqType]), + Command: cr[id+cmdUUID+keySfxCmdRaw], + }, nil +} + +func kvFindTimedOutStepIDs(ctx context.Context, b kv.TraversingBucket) ([]string, error) { + var stepIDs []string + + now := time.Now() + + // this.. is not very efficient. perhaps it would be better to + // make a specific bucket/index for this. + for k := range b.Keys(nil) { + if !strings.HasSuffix(k, keySfxStepTimeout) { + continue + } + timeoutBytes, err := b.Get(ctx, k) + if err != nil { + return nil, fmt.Errorf("getting step meta for %s: %w", k, err) + } + var timeout time.Time + if err = timeout.UnmarshalText(timeoutBytes); err != nil { + return nil, fmt.Errorf("unmarshal not until time: %w", err) + } + if timeout.After(now) || timeout.IsZero() { + continue + } + stepID := k[:len(k)-len(keySfxStepTimeout)] + // found that this step (stepID) contains our enrollment ID + stepIDs = append(stepIDs, stepID) + } + + return stepIDs, nil +} + +func kvGetIDCmdStepResult(ctx context.Context, b kv.Bucket, id, cmdUUID string, noCheckExists bool) (*storage.StepCommandResult, error) { + if !noCheckExists { + ok, err := kvIDCmdExists(ctx, b, id, cmdUUID) + if err != nil { + return nil, fmt.Errorf("checking command exists for %s: %w", cmdUUID, err) + } else if !ok { + return nil, nil + } + } + result := &storage.StepCommandResult{ + CommandUUID: cmdUUID, + ResultReport: []byte{}, + } + var err error + result.Completed, err = kvIDCmdIsComplete(ctx, b, id, cmdUUID) + if err != nil { + return nil, fmt.Errorf("checking command completed for %s: %w", cmdUUID, err) + } + result.RequestType, _, err = kvGetIDCmdReqType(ctx, b, id, cmdUUID) + if err != nil { + return nil, fmt.Errorf("getting command req type for %s: %w", cmdUUID, err) + } + if ok, err := b.Has(ctx, id+cmdUUID+keySfxCmdResult); err != nil { + return nil, fmt.Errorf("checking result exists for %s: %w", cmdUUID, err) + } else if ok { + result.ResultReport, err = kvGetIDCmdResult(ctx, b, id, cmdUUID) + if err != nil { + return nil, fmt.Errorf("getting command result for %s: %w", cmdUUID, err) + } + } + return result, err +} + +func kvFindCommandsToRePush(ctx context.Context, b kv.TraversingBucket, ifBefore time.Time, setTo time.Time) ([]string, error) { + var ids []string + + resetLastPushes := make(map[string][]byte) + + // this.. is not very efficient. perhaps it would be better to + // make a specific bucket/index for this. + for k := range b.Keys(nil) { + if !strings.HasSuffix(k, keySfxCmdLastPush) { + continue + } + lastPushBytes, err := b.Get(ctx, k) + if err != nil { + return nil, fmt.Errorf("getting step meta for %s: %w", k, err) + } + var lastPush time.Time + if err = lastPush.UnmarshalText(lastPushBytes); err != nil { + return nil, fmt.Errorf("unmarshal not until time: %w", err) + } + + if lastPush.IsZero() || lastPush.After(ifBefore) { + continue + } + + idCmd := k[:len(k)-len(keySfxCmdLastPush)] + // lookup the enrollment ID of this command + idBytes, err := b.Get(ctx, idCmd+keySfxCmdID) + if err != nil { + return nil, fmt.Errorf("getting command for %s: %w", idCmd, err) + } + id := string(idBytes) + + // reset the last push time + if lastPushBytes, err = setTo.MarshalText(); err != nil { + return nil, fmt.Errorf("marshal now: %w", err) + } + resetLastPushes[k] = lastPushBytes + + // found that this step (stepID) contains our enrollment ID + ids = append(ids, id) + } + + if err := kv.SetMap(ctx, b, resetLastPushes); err != nil { + return ids, fmt.Errorf("resetting pushes: %w", err) + } + + return ids, nil +} + +func kvSetIDCmdLastPush(ctx context.Context, b kv.Bucket, id, cmdUUID string, lastPush time.Time) error { + lastPushBytes, err := lastPush.MarshalText() + if err != nil { + return fmt.Errorf("marshal last push: %w", err) + } + return b.Set(ctx, id+cmdUUID+keySfxCmdLastPush, lastPushBytes) +} diff --git a/engine/storage/storage.go b/engine/storage/storage.go new file mode 100644 index 0000000..4d6cc51 --- /dev/null +++ b/engine/storage/storage.go @@ -0,0 +1,231 @@ +// Package storage defines types and primitives for workflow engine storage backends. +package storage + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/micromdm/nanocmd/workflow" +) + +var ( + // ErrInvalidStorageStep is returned when validating storage steps. + ErrEmptyStorageStep = errors.New("empty storage step") + + // ErrInvalidStepContext is returned when a step context is nil. + ErrEmptyStepContext = errors.New("invalid step context") + + ErrMissingWorkflowName = errors.New("missing workflow name") + ErrMissingInstanceID = errors.New("missing instance id") + ErrMissingIDs = errors.New("missing IDs") + ErrMissingCommands = errors.New("missing commands") +) + +// StepContext is common contextual information for steps. +// An approximately serialized form of a workflow step. +type StepContext struct { + WorkflowName string // workflow name. used for routing back to the workflow via the engine's step registry. + InstanceID string // unique ID of this 'instance' of a workflow. + Name string // workflow step name. defined and used by the workflow. + Context []byte // workflow step context (in raw marshaled binary form). +} + +// Validate checks for missing values. +func (sc *StepContext) Validate() error { + if sc == nil { + return ErrEmptyStepContext + } + if sc.WorkflowName == "" { + return ErrMissingWorkflowName + } + if sc.InstanceID == "" { + return ErrMissingInstanceID + } + return nil +} + +// StepCommandResult is the result of MDM commands from enrollments. +// An approximately serialized form of a workflow step command response. +type StepCommandResult struct { + CommandUUID string + RequestType string + ResultReport []byte // raw XML plist result of MDM command + Completed bool // whether this specific command did *not* have a NotNow status +} + +// StepCommandRaw is a raw command, its UUID, and request type. +// An approximately serialized form of a workflow step command. +type StepCommandRaw struct { + CommandUUID string + RequestType string + + // raw XML plist of MDM command + // Note that in the case of a step enqueuing a command is considered + // enqueued with the MDM server if its NotUntil.IsZero() returns + // true. + Command []byte +} + +// StepEnqueueing is a step for storage that is to be enqueued. +// Ostensibly used to enqueue the commands to MDM and log metadata. +// An approximately serialized form of a workflow step. +type StepEnqueueing struct { + StepContext + IDs []string + Commands []StepCommandRaw +} + +func (se *StepEnqueueing) Validate() error { + if se == nil { + return ErrEmptyStorageStep + } + if err := se.StepContext.Validate(); err != nil { + return fmt.Errorf("storage step context invalid: %w", err) + } + if len(se.IDs) < 1 { + return ErrMissingIDs + } + if len(se.Commands) < 1 { + return ErrMissingCommands + } + return nil +} + +// StepEnqueuingWithConfig is for enqueuing a step with additional configuration. +// An approximately serialized form of a workflow step enqueueing. +type StepEnqueuingWithConfig struct { + StepEnqueueing + + // wait until after this time to enqueue step commands + // note that this has implications for the the storage backends. + // if a NotUntil time is set then the raw commands need to be saved + // (so that they can be enqueued to the MDM server later). if a + // NotUntil time is not set then the raw commands can be discarded + // as we assume that they've been delivered. after a NotUntil time + // has past then a storage backend can get rid of the raw commands + // in order to save space. + NotUntil time.Time + + Timeout time.Time // step times out if not complete by this time +} + +func (se *StepEnqueuingWithConfig) Validate() error { + if se == nil { + return ErrEmptyStorageStep + } + return se.StepEnqueueing.Validate() +} + +// StepResult represent the results of all of a step's MDM commands. +// An approximately serialized form of a workflow step result. +type StepResult struct { + StepContext + IDs []string + Commands []StepCommandResult +} + +// Storage is the primary interface for workflow engine backend storage implementations. +type Storage interface { + // RetrieveCommandRequestType retrieves a command request type given id and uuid. + // This effectively tells the engine whether the provided command UUID + // originated from a workflow enqueueing or not (i.e. whether processing should + // continue). + RetrieveCommandRequestType(ctx context.Context, id string, uuid string) (string, bool, error) + + // StoreCommandResponseAndRetrieveCompletedStep stores a command response and returns the completed step for the id. + // The completed step will be nil if sc does not complete the step. + // Implementations will need to lookup the step that sc (the Command UUID + // and Request Type) belongs to. The provided Result Report and Completed + // values should determine whether this step is completed or not (depending + // on other pending commands for this id). + // + // Any retrieved completed step is assumed to be permanently deleted from storage. + StoreCommandResponseAndRetrieveCompletedStep(ctx context.Context, id string, sc *StepCommandResult) (*StepResult, error) + + // StoreStep stores a step and its commands for later state tracking. + // Depending on whether a command was enqueued immediately (NotUntil) the + // implementation may discard the raw command Plist bytes. + StoreStep(context.Context, *StepEnqueuingWithConfig, time.Time) error + + // RetrieveOutstandingWorkflowStates finds enrollment IDs with an outstanding workflow step from a given set. + RetrieveOutstandingWorkflowStatus(ctx context.Context, workflowName string, ids []string) (outstandingIDs []string, err error) + + // CancelSteps cancels workflow steps for id. + // If workflowName is empty then implementations should cancel all + // workflow steps for the id. "NotUntil" (future) workflows steps + // should also be canceled. + CancelSteps(ctx context.Context, id, workflowName string) error +} + +// WorkerStorage is used by the workflow engine worker for async (scheduled) actions. +type WorkerStorage interface { + // RetrieveStepsToEnqueue fetches steps to be enqueued that were enqueued "later" with NotUntil. + // These steps-for-enqueueing will be enqueued to the MDM server for the IDs. + // Returned steps may not be per-ID and may target mutliple IDs (depending + // on how the workflow originally enqueued them). + // + // Any retrieved step is assumed to be permanently marked as enqueued and + // will not be retrieved again with this method. As such the raw command + // bytes can be discarded by the implementation (to e.g. save space). + RetrieveStepsToEnqueue(ctx context.Context, pushTime time.Time) ([]*StepEnqueueing, error) + + // RetrieveTimedOutSteps fetches steps that have timed out. + // These steps will be delivered to their workflows as timed out. + // + // Any retrieved completed step is assumed to be permanently deleted from storage. + RetrieveTimedOutSteps(ctx context.Context) ([]*StepResult, error) + + // RetrieveAndMarkRePushed retrieves a set of IDs that need to have APNs re-pushes sent. + // Marks those IDs as having been pushed to now. + // + // Any retrieved IDs are assumed to have neen successfully APNs pushed to and will be marked so at pushTime. + RetrieveAndMarkRePushed(ctx context.Context, ifBefore time.Time, pushTime time.Time) ([]string, error) +} + +type AllStorage interface { + Storage + WorkerStorage +} + +// EventSubscription is a user-configured subscription for starting workflows with optional context. +type EventSubscription struct { + Event string `json:"event"` + Workflow string `json:"workflow"` + Context string `json:"context,omitempty"` +} + +var ( + ErrEmptyEventSubscription = errors.New("empty event subscription") + ErrMissingEvent = errors.New("missing event type") +) + +func (es *EventSubscription) Validate() error { + if es == nil { + return ErrEmptyEventSubscription + } + if es.Event == "" { + return ErrMissingEvent + } + if !workflow.EventFlagForString(es.Event).Valid() { + return fmt.Errorf("invalid event type: %s", es.Event) + } + if es.Workflow == "" { + return ErrMissingWorkflowName + } + return nil +} + +// ReadEventSubscriptionStorage describes storage backends that can retrieve and query event subscriptions. +type ReadEventSubscriptionStorage interface { + RetrieveEventSubscriptions(ctx context.Context, names []string) (map[string]*EventSubscription, error) + RetrieveEventSubscriptionsByEvent(ctx context.Context, f workflow.EventFlag) ([]*EventSubscription, error) +} + +// EventSubscriptionStorage describes storage backends that can also write and delete event subscriptions. +type EventSubscriptionStorage interface { + ReadEventSubscriptionStorage + StoreEventSubscription(ctx context.Context, name string, es *EventSubscription) error + DeleteEventSubscription(ctx context.Context, name string) error +} diff --git a/engine/storage/test/test.go b/engine/storage/test/test.go new file mode 100644 index 0000000..260ffae --- /dev/null +++ b/engine/storage/test/test.go @@ -0,0 +1,446 @@ +package test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/micromdm/nanocmd/engine/storage" +) + +// stepCmdWithUUID tries to find the StepCommand having uuid within step. +func stepCmdWithUUID(step *storage.StepResult, uuid string) (storage.StepCommandResult, bool) { + for _, cmd := range step.Commands { + if cmd.CommandUUID == uuid { + return cmd, true + } + } + return storage.StepCommandResult{}, false +} + +func TestEngineStorage(t *testing.T, newStorage func() storage.AllStorage) { + s := newStorage() + mainTest(t, s) + + t.Run("testEngineStorageNotUntil", func(t *testing.T) { + testEngineStorageNotUntil(t, s) + }) + + t.Run("testEngineStepTimeout", func(t *testing.T) { + testEngineStepTimeout(t, s) + }) + + t.Run("testRepush", func(t *testing.T) { + testRepush(t, newStorage()) + }) + + t.Run("testOutstanding", func(t *testing.T) { + testOutstanding(t, s) + }) +} + +func mainTest(t *testing.T, s storage.AllStorage) { + ctx := context.Background() + + type responseTest struct { + testName string + resp *storage.StepCommandResult + shouldBeCompleted bool + shouldError bool + reqType string + skipReqType bool + id string // enrollment id + skipCmdLen bool + skipByteCompare bool + } + + fakeID := "456DFB" + + // yikes! bit of a beast, these tests + for _, tStep := range []struct { + testName string + step *storage.StepEnqueuingWithConfig + shouldError bool + respSteps []responseTest + }{ + { + "nil_step", + nil, + true, + nil, + }, + { + "nil_id", + &storage.StepEnqueuingWithConfig{}, + true, + nil, + }, + { + "missing_command_ReportResults", + &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{fakeID}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "A", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + }, + }, + }, + }, + false, + []responseTest{{ + testName: "missing_ReportResults", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-1", + Completed: true, + // missing ReportResults (should error) + }, + shouldBeCompleted: false, + shouldError: true, + }}, + }, + { + "normal_test1_command_multi_id", + &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"AAA111", "BBB222"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "B", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-2", + RequestType: "DeviceInformation", + }, + }, + }, + }, + false, + []responseTest{ + { + testName: "UUID-2-testResp1", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-2", + Completed: true, + ResultReport: []byte("Resp1-UUID-1"), + }, + shouldBeCompleted: true, + shouldError: false, + reqType: "DeviceInformation", + id: "AAA111", + }, + { + testName: "UUID-2-testResp1-2nd", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-2", + Completed: true, + ResultReport: []byte("Resp2-UUID-1"), + }, + shouldBeCompleted: true, + shouldError: false, + reqType: "DeviceInformation", + id: "BBB222", + }, + { + // should fail (duplicate response for same id) + testName: "UUID-2-testResp1-3rd-dup", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-2", + Completed: true, + ResultReport: []byte("Resp2-UUID-1"), + }, + shouldBeCompleted: false, + shouldError: true, + id: "BBB222", + skipByteCompare: false, + }, + }, + }, + { + "normal_test1_command_fail_resp", + &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{fakeID}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "C", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-3", + RequestType: "DeviceInformation", + }, + }, + }, + }, + false, + []responseTest{ + { + testName: "UUID-1-testResp1", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-NotFound", + Completed: true, + ResultReport: []byte("UUID-NotFound"), + }, + shouldBeCompleted: false, + shouldError: true, + id: fakeID, + }, + }, + }, + { + "multi-command-single-id", + &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"AAA111"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "D", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "X-UUID-1", + RequestType: "DeviceInformation", + }, + { + CommandUUID: "X-UUID-2", + RequestType: "SecurityInfo", + }, + }, + }, + }, + false, + []responseTest{ + { + testName: "resp1", + resp: &storage.StepCommandResult{ + CommandUUID: "X-UUID-1", + Completed: true, + ResultReport: []byte("Resp1-UUID-1"), + }, + shouldBeCompleted: false, + shouldError: false, + reqType: "DeviceInformation", + id: "AAA111", + }, + { + testName: "resp2", + resp: &storage.StepCommandResult{ + CommandUUID: "X-UUID-2", + Completed: true, + ResultReport: []byte("Resp2-UUID-2"), + }, + shouldBeCompleted: true, + shouldError: false, + reqType: "SecurityInfo", + id: "AAA111", + }, + }, + }, + + { + // this caused a crash in the inmem storage + "multi-command-same-uuid", + &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"CCC222"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "E", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + }, + { + CommandUUID: "UUID-1", + RequestType: "SecurityInfo", + }, + }, + }, + }, + false, + []responseTest{ + { + testName: "resp1", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-1", + Completed: true, + ResultReport: []byte("UUID-1"), + }, + shouldBeCompleted: true, + shouldError: false, + skipReqType: true, + skipCmdLen: true, + id: "CCC222", + }, + { + testName: "resp2", + resp: &storage.StepCommandResult{ + CommandUUID: "UUID-1", + Completed: true, + ResultReport: []byte("UUID-1"), + }, + shouldBeCompleted: false, + shouldError: true, + skipReqType: true, + id: "CCC222", + }, + }, + }, + } { + t.Run("step-"+tStep.testName, func(t *testing.T) { + err := s.StoreStep(ctx, tStep.step, time.Now()) + if tStep.shouldError && err == nil { + t.Fatalf("StoreStep: expected error; step=%v", tStep.step) + } else if !tStep.shouldError && err != nil { + t.Fatalf("StoreStep: expected no error; step=%v err=%v", tStep.step, err) + } + + for _, tRespStep := range tStep.respSteps { + t.Run("cmd-resp-"+tRespStep.testName, func(t *testing.T) { + reqType, _, err := s.RetrieveCommandRequestType(ctx, tRespStep.id, tRespStep.resp.CommandUUID) + if err != nil { + t.Fatalf("err looking up request type for uuid=%s: %v", tRespStep.resp.CommandUUID, err) + } + + if have, want := reqType, tRespStep.reqType; !tRespStep.skipReqType && have != want { + t.Errorf("request type does not match; have: %s, want: %s", have, want) + } + + completedStep, err := s.StoreCommandResponseAndRetrieveCompletedStep(ctx, tRespStep.id, tRespStep.resp) + + if tRespStep.shouldError && err == nil { + t.Errorf("StoreCommandResponseAndRetrieveCompletedStep: expected error for resp; command=%v", tRespStep.resp) + } else if !tRespStep.shouldError && err != nil { + t.Fatalf("StoreCommandResponseAndRetrieveCompletedStep: expected no error for resp; command=%v, err=%v", tRespStep.resp, err) + } + + if tRespStep.shouldBeCompleted && completedStep == nil { + t.Errorf("StoreCommandResponseAndRetrieveCompletedStep: found incomplete, expected complete; command=%v", tRespStep.resp) + } else if !tRespStep.shouldBeCompleted && completedStep != nil { + t.Errorf("StoreCommandResponseAndRetrieveCompletedStep: found complete, expected incomplete; command=%v completed=%v", tRespStep.resp, completedStep) + } + + if completedStep != nil { + if want, have := len(tStep.step.Commands), len(completedStep.Commands); !tRespStep.skipCmdLen && have != want { + t.Errorf("mismatch of returned commands; have %d, wanted %d", have, want) + } + + for _, eCmd := range tStep.respSteps { + sc, ok := stepCmdWithUUID(completedStep, eCmd.resp.CommandUUID) + if !ok { + t.Errorf("command uuid not found: uuid=%v", eCmd.resp.CommandUUID) + } else { + if have, want := sc.ResultReport, eCmd.resp.ResultReport; eCmd.skipByteCompare && bytes.Compare(have, want) != 0 { + t.Errorf("command result raw does not match: have=%s, want=%s", string(have), string(want)) + } + + } + + } + } + }) + } + + if tStep.step != nil && len(tStep.step.IDs) > 0 { + // clear just this workflow + err = s.CancelSteps(ctx, tStep.step.IDs[0], tStep.step.WorkflowName) + if err != nil { + t.Fatal(err) + } + } + }) + } +} + +func testOutstanding(t *testing.T, s storage.AllStorage) { + ctx := context.Background() + + enq := &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"EnrollmentID-4", "EnrollmentID-5"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "InstanceID-1", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + Command: []byte("Command-1"), + }, + }, + }, + } + + err := s.StoreStep(ctx, enq, time.Now()) + if err != nil { + t.Fatal(err) + } + + outstandingIDs, err := s.RetrieveOutstandingWorkflowStatus(ctx, enq.WorkflowName, enq.IDs) + if err != nil { + t.Fatal(err) + } + + if have, want := len(outstandingIDs), 2; have != want { + t.Errorf("have: %v, want: %v: %v", have, want, outstandingIDs) + } + + // complete one of the commands + _, err = s.StoreCommandResponseAndRetrieveCompletedStep(ctx, enq.IDs[0], &storage.StepCommandResult{ + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + ResultReport: []byte("Result-1"), + Completed: true, + }) + if err != nil { + t.Fatal(err) + } + + outstandingIDs, err = s.RetrieveOutstandingWorkflowStatus(ctx, enq.WorkflowName, enq.IDs) + if err != nil { + t.Fatal(err) + } + + if have, want := len(outstandingIDs), 1; have != want { + t.Fatalf("have: %v, want: %v: %v", have, want, outstandingIDs) + } + + err = s.CancelSteps(ctx, outstandingIDs[0], "invalid.workflow.name") + if err != nil { + t.Fatal(err) + } + + outstandingIDs, err = s.RetrieveOutstandingWorkflowStatus(ctx, enq.WorkflowName, []string{outstandingIDs[0]}) + if err != nil { + t.Fatal(err) + } + + if have, want := len(outstandingIDs), 1; have != want { + t.Fatalf("have: %v, want: %v: %v", have, want, outstandingIDs) + } + + err = s.CancelSteps(ctx, outstandingIDs[0], enq.WorkflowName) + if err != nil { + t.Fatal(err) + } + + outstandingIDs, err = s.RetrieveOutstandingWorkflowStatus(ctx, enq.WorkflowName, []string{outstandingIDs[0]}) + if err != nil { + t.Fatal(err) + } + + if have, want := len(outstandingIDs), 0; have != want { + t.Errorf("have: %v, want: %v: %v", have, want, outstandingIDs) + } +} diff --git a/engine/storage/test/worker.go b/engine/storage/test/worker.go new file mode 100644 index 0000000..16c342e --- /dev/null +++ b/engine/storage/test/worker.go @@ -0,0 +1,333 @@ +package test + +import ( + "context" + "reflect" + "sort" + "testing" + "time" + + "github.com/micromdm/nanocmd/engine/storage" +) + +func testEngineStorageNotUntil(t *testing.T, s storage.AllStorage) { + ctx := context.Background() + + for _, test := range []struct { + testName string + steps []*storage.StepEnqueuingWithConfig + + stepsWanted int + stepsWanted2 int + reqTypeWanted string + }{ + { + testName: "in-the-past", + steps: []*storage.StepEnqueuingWithConfig{ + { + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"EnrollmentID-1", "EnrollmentID-2"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "InstanceID-1", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + Command: []byte("Command-1"), + }, + }, + }, + NotUntil: time.Now().Add(-time.Minute), // by setting NotUntil we should register this + }, + }, + stepsWanted: 1, + stepsWanted2: 0, + reqTypeWanted: "DeviceInformation", + }, + } { + t.Run(test.testName, func(t *testing.T) { + for _, step := range test.steps { + err := s.StoreStep(ctx, step, time.Now()) + if err != nil { + t.Fatal(err) + } + } + + steps, err := s.RetrieveStepsToEnqueue(ctx, time.Now()) + if err != nil { + t.Fatal(err) + } + + if have, want := len(steps), test.stepsWanted; have != want { + t.Fatalf("expected steps: have %v, want %v", have, want) + } + + if len(steps) >= 1 && test.reqTypeWanted != "" && len(steps[0].Commands) < 1 { + t.Error("request type wanted, but no commands returned") + } + + if len(steps) >= 1 && len(steps[0].Commands) >= 1 { + if have, want := steps[0].Commands[0].RequestType, test.reqTypeWanted; have != want { + t.Errorf("expected request type: have %v, want %v", have, want) + } + } + + for _, step := range steps { + for _, cmd := range step.Commands { + if len(cmd.Command) < 1 { + t.Errorf("no command bytes for NotUntil command?") + } + } + // regression test + if len(step.IDs) < 1 { + t.Error("no IDs for step") + } + } + + steps, err = s.RetrieveStepsToEnqueue(ctx, time.Now()) + if err != nil { + t.Fatal(err) + } + + if have, want := len(steps), test.stepsWanted2; have != want { + t.Fatalf("expected steps (2nd): have %v, want %v", have, want) + } + }) + } +} + +func testEngineStepTimeout(t *testing.T, s storage.AllStorage) { + ctx := context.Background() + + type response struct { + id string + sc storage.StepCommandResult + completed bool + } + + for _, test := range []struct { + testName string + steps []*storage.StepEnqueuingWithConfig + resps []response + + stepsWanted int + stepsWanted2 int + stepsWanted3 int + reqTypeWanted string + }{ + { + testName: "timeout-test-1", + steps: []*storage.StepEnqueuingWithConfig{ + { + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"EnrollmentID-1", "EnrollmentID-2", "EnrollmentID-3"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "InstanceID-1", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + }, + }, + }, + Timeout: time.Now().Add(-time.Minute), + }, + }, + resps: []response{ + { + id: "EnrollmentID-1", + sc: storage.StepCommandResult{ + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + ResultReport: []byte("Command-1"), + Completed: true, + }, + completed: true, + }, + }, + stepsWanted: 2, + stepsWanted2: 0, + reqTypeWanted: "DeviceInformation", + }, + } { + t.Run(test.testName, func(t *testing.T) { + for _, step := range test.steps { + err := s.StoreStep(ctx, step, time.Now()) + if err != nil { + t.Fatal(err) + } + } + + for _, response := range test.resps { + step, err := s.StoreCommandResponseAndRetrieveCompletedStep(ctx, response.id, &response.sc) + if err != nil { + t.Fatal(err) + } + if want, have := response.completed, step != nil; have != want { + t.Errorf("mismatched completed; have: %v, want: %v", have, want) + } + } + + steps, err := s.RetrieveTimedOutSteps(ctx) + if err != nil { + t.Fatal(err) + } + + if have, want := len(steps), test.stepsWanted; have != want { + t.Fatalf("expected steps: have: %v, want: %v", have, want) + } + + for _, step := range steps { + if step.WorkflowName == "" { + t.Error("empty workflow name") + } + } + + if len(steps) >= 1 && test.reqTypeWanted != "" && len(steps[0].Commands) < 1 { + t.Error("request type wanted, but no commands returned") + } + + if len(steps) >= 1 && len(steps[0].Commands) >= 1 { + if have, want := steps[0].Commands[0].RequestType, test.reqTypeWanted; have != want { + t.Errorf("expected request type: have: %v, want: %v", have, want) + } + } + + steps, err = s.RetrieveTimedOutSteps(ctx) + if err != nil { + t.Fatal(err) + } + + if have, want := len(steps), test.stepsWanted2; have != want { + t.Fatalf("expected steps (2nd): have %v, want %v", have, want) + } + }) + } +} + +func testRepush(t *testing.T, s storage.AllStorage) { + ctx := context.Background() + enq := &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"EnrollmentID-1", "EnrollmentID-2"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "InstanceID-1", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + Command: []byte("Command-1"), + }, + }, + }, + // NotUntil: not setting NotUntil to sure these are simulated to be sent pushes "now" + } + + now := time.Now() + + err := s.StoreStep(ctx, enq, now) + if err != nil { + t.Fatal(err) + } + + // complete one of the commands + _, err = s.StoreCommandResponseAndRetrieveCompletedStep(ctx, enq.IDs[0], &storage.StepCommandResult{ + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + ResultReport: []byte("Result-1"), + Completed: true, + }) + if err != nil { + t.Fatal(err) + } + + ifBefore := now.Add(time.Second) + now = ifBefore.Add(time.Second) + + ids, err := s.RetrieveAndMarkRePushed(ctx, ifBefore, now) + if err != nil { + t.Fatal(err) + } + if have, want := ids, []string{enq.IDs[1]}; !reflect.DeepEqual(have, want) { + t.Errorf("have: %v, want: %v", have, want) + } + + ids, err = s.RetrieveAndMarkRePushed(ctx, ifBefore, now) + if err != nil { + t.Fatal(err) + } + + if have, want := len(ids), 0; have != want { + t.Errorf("have: %v, want: %v: %v", have, want, ids) + } + + enq2 := &storage.StepEnqueuingWithConfig{ + StepEnqueueing: storage.StepEnqueueing{ + IDs: []string{"EnrollmentID-3"}, + StepContext: storage.StepContext{ + WorkflowName: "workflow.name.test1", + InstanceID: "InstanceID-1", + }, + Commands: []storage.StepCommandRaw{ + { + CommandUUID: "UUID-1", + RequestType: "DeviceInformation", + Command: []byte("Command-1"), + }, + }, + }, + NotUntil: time.Now().Add(-time.Minute), + } + + err = s.StoreStep(ctx, enq2, now) + if err != nil { + t.Fatal(err) + } + + ids, err = s.RetrieveAndMarkRePushed(ctx, ifBefore, now) + if err != nil { + t.Fatal(err) + } + + // should still be zero because we haven't enqueued anything (NotUntil on last command) + // and our dates have not changed + if have, want := len(ids), 0; have != want { + t.Errorf("have: %v, want: %v: %v", have, want, ids) + } + + // "enqueue" our NotUntils command in enq2 + _, err = s.RetrieveStepsToEnqueue(ctx, now) + if err != nil { + t.Fatal(err) + } + + ifBefore = now.Add(time.Second) + now = ifBefore.Add(time.Second) + + ids, err = s.RetrieveAndMarkRePushed(ctx, ifBefore, now) + if err != nil { + t.Fatal(err) + } + + find := []string{enq.IDs[1], enq2.IDs[0]} + sort.Strings(ids) + sort.Strings(find) + if have, want := ids, find; !reflect.DeepEqual(have, want) { + t.Errorf("have: %v, want: %v", have, want) + } + + ids, err = s.RetrieveAndMarkRePushed(ctx, ifBefore, now) + if err != nil { + t.Fatal(err) + } + + if have, want := len(ids), 0; have != want { + t.Errorf("have: %v, want: %v: %v", have, want, ids) + } +} diff --git a/engine/testdata/devinfo.plist b/engine/testdata/devinfo.plist new file mode 100644 index 0000000..b9bd1db --- /dev/null +++ b/engine/testdata/devinfo.plist @@ -0,0 +1,27 @@ + + + + + CommandUUID + DevInfo001 + QueryResponses + + BuildVersion + 19H12 + DeviceName + My iPhone + Model + MN9H2LL + ModelName + iPhone + OSVersion + 15.7 + SerialNumber + BNPTFLNZHG7K + + Status + Acknowledged + UDID + UDID001 + + diff --git a/engine/testdata/secinfo.gen.plist b/engine/testdata/secinfo.gen.plist new file mode 100644 index 0000000..ce77369 --- /dev/null +++ b/engine/testdata/secinfo.gen.plist @@ -0,0 +1,3 @@ + + +CommandRequestTypeSecurityInfoCommandUUIDABCUUID diff --git a/engine/worker.go b/engine/worker.go new file mode 100644 index 0000000..6116eb8 --- /dev/null +++ b/engine/worker.go @@ -0,0 +1,209 @@ +package engine + +import ( + "context" + "fmt" + "time" + + "github.com/micromdm/nanocmd/engine/storage" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/workflow" +) + +const DefaultDuration = time.Minute * 5 +const DefaultRePushDuration = time.Hour * 24 + +type WorkflowFinder interface { + Workflow(name string) workflow.Workflow +} + +// Worker polls storage backends for timed events on an interval. +// Examples include step timeouts, delayed steps (NotUntil), and +// re-pushes. +type Worker struct { + wff WorkflowFinder + storage storage.WorkerStorage + enqueuer PushEnqueuer + logger log.Logger + + // duration is the interval at which the worker will wake up to + // continue polling the storage backend for data to take action on. + duration time.Duration + + // repushDuration is how long MDM commands should go without any + // response seen before we send an APNs to the enrollment ID. + repushDuration time.Duration +} + +type WorkerOption func(w *Worker) + +func WithWorkerLogger(logger log.Logger) WorkerOption { + return func(w *Worker) { + w.logger = logger + } +} + +// WithWorkerDuration configures the polling interval for the worker. +func WithWorkerDuration(d time.Duration) WorkerOption { + return func(w *Worker) { + w.duration = d + } +} + +// WithWorkerRePushDuration configures when enrollments should be sent APNs pushes. +// This is the duration an enrollment ID has not received a response for +// an MDM command. +func WithWorkerRePushDuration(d time.Duration) WorkerOption { + return func(w *Worker) { + w.repushDuration = d + } +} + +func NewWorker(wff WorkflowFinder, storage storage.WorkerStorage, enqueuer PushEnqueuer, opts ...WorkerOption) *Worker { + w := &Worker{ + wff: wff, + storage: storage, + enqueuer: enqueuer, + logger: log.NopLogger, + duration: DefaultDuration, + + repushDuration: DefaultRePushDuration, + } + for _, opt := range opts { + opt(w) + } + return w +} + +// RunOnce runs the processes of the worker and logs errors. +func (w *Worker) RunOnce(ctx context.Context) error { + err := w.processEnqueuings(ctx) + if err != nil { + return logAndError(err, w.logger, "processing enqueueings") + } + if err = w.processTimeouts(ctx); err != nil { + return logAndError(err, w.logger, "processing timeouts") + } + if w.repushDuration > 0 { + if err = w.processRePushes(ctx); err != nil { + return logAndError(err, w.logger, "processing repushes") + } + } + return nil +} + +// Run starts and runs the worker forever on an interval. +func (w *Worker) Run(ctx context.Context) error { + w.logger.Debug(logkeys.Message, "starting worker", "duration", w.duration) + + ticker := time.NewTicker(w.duration) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + w.RunOnce(ctx) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (w *Worker) processEnqueuings(ctx context.Context) error { + steps, err := w.storage.RetrieveStepsToEnqueue(ctx, time.Now()) + if err != nil { + return fmt.Errorf("retrieving steps to enqueue: %w", err) + } + + for _, step := range steps { + stepLogger := w.logger.With( + logkeys.Message, "enqueueing command", + logkeys.InstanceID, step.InstanceID, + logkeys.WorkflowName, step.WorkflowName, + logkeys.StepName, step.Name, + logkeys.GenericCount, len(step.IDs), + logkeys.FirstEnrollmentID, step.IDs[0], + ) + if len(step.Commands) < 1 { + stepLogger.Info() + } + + for _, cmd := range step.Commands { + logger := stepLogger.With( + logkeys.CommandUUID, cmd.CommandUUID, + logkeys.RequestType, cmd.RequestType, + ) + err := w.enqueuer.Enqueue(ctx, step.IDs, cmd.Command) + if err != nil { + logger.Info(logkeys.Error, err) + } else { + logger.Debug() + } + } + } + return nil +} + +func (w *Worker) processTimeouts(ctx context.Context) error { + steps, err := w.storage.RetrieveTimedOutSteps(ctx) + if err != nil { + return fmt.Errorf("retrieving timed-out steps: %w", err) + } + + for _, step := range steps { + stepLogger := w.logger.With( + logkeys.Message, "step timeout", + logkeys.InstanceID, step.InstanceID, + logkeys.WorkflowName, step.WorkflowName, + logkeys.StepName, step.Name, + ) + if len(step.IDs) != 1 { + // step timeouts need to be per-enrollment ID + stepLogger.Info(logkeys.Error, "invalid count of step IDs") + continue + } + stepLogger = stepLogger.With(logkeys.EnrollmentID, step.IDs[0]) + w := w.wff.Workflow(step.WorkflowName) + if w == nil { + stepLogger.Info(logkeys.Error, NewErrNoSuchWorkflow(step.WorkflowName)) + continue + } + + // convert the storage step result to a workflow step result + stepResult, err := workflowStepResultFromStorageStepResult(step, w, true, "", nil) + if err != nil { + stepLogger.Info(logkeys.Error, err) + continue + } + + // send the timeout notification + if err = w.StepTimeout(ctx, stepResult); err != nil { + stepLogger.Info(logkeys.Error, err) + } else { + stepLogger.Debug() + } + } + return nil +} + +func (w *Worker) processRePushes(ctx context.Context) error { + ids, err := w.storage.RetrieveAndMarkRePushed(ctx, time.Now().Add(-w.repushDuration), time.Now()) + if err != nil { + return fmt.Errorf("retrieving repush ids: %w", err) + } + if len(ids) < 1 { + return nil + } + logger := w.logger.With( + logkeys.FirstEnrollmentID, ids[0], + logkeys.GenericCount, len(ids), + ) + if err = w.enqueuer.Push(ctx, ids); err != nil { + return logAndError(err, logger, "sending push") + } + logger.Debug( + logkeys.Message, "processed repushes", + ) + return nil +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..e9432d8 --- /dev/null +++ b/go.mod @@ -0,0 +1,16 @@ +module github.com/micromdm/nanocmd + +go 1.19 + +require ( + github.com/alexedwards/flow v0.0.0-20220806114457-cf11be9e0e03 + github.com/google/uuid v1.3.0 + github.com/groob/plist v0.0.0-20220217120414-63fa881b19a5 + github.com/jessepeterson/mdmcommands v0.0.0-20230517161100-c5ca4128e1e3 + github.com/peterbourgon/diskv/v3 v3.0.1 + go.mozilla.org/pkcs7 v0.0.0-00010101000000-000000000000 +) + +require github.com/google/btree v1.1.2 // indirect + +replace go.mozilla.org/pkcs7 => github.com/smallstep/pkcs7 v0.0.0-20230302202335-4c094085c948 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..48ae558 --- /dev/null +++ b/go.sum @@ -0,0 +1,15 @@ +github.com/alexedwards/flow v0.0.0-20220806114457-cf11be9e0e03 h1:r07xZN3ENBWdxGuU/feCsnpsgHJ7+3uLm7cq9S0sqoI= +github.com/alexedwards/flow v0.0.0-20220806114457-cf11be9e0e03/go.mod h1:1rjOQiOqQlmMdUMuvlJFjldqTnE/tQULE7qPIu4aq3U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/groob/plist v0.0.0-20220217120414-63fa881b19a5 h1:saaSiB25B1wgaxrshQhurfPKUGJ4It3OxNJUy0rdOjU= +github.com/groob/plist v0.0.0-20220217120414-63fa881b19a5/go.mod h1:itkABA+w2cw7x5nYUS/pLRef6ludkZKOigbROmCTaFw= +github.com/jessepeterson/mdmcommands v0.0.0-20230517161100-c5ca4128e1e3 h1:J825aym3sjbzht2K6m4Bd8HPkvV0lbyM/G2ZjGHuDaU= +github.com/jessepeterson/mdmcommands v0.0.0-20230517161100-c5ca4128e1e3/go.mod h1:EHxwKfMUtf7wNjF19BQQ/XCOvh62vbOXTggS9guNVxY= +github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU= +github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o= +github.com/smallstep/pkcs7 v0.0.0-20230302202335-4c094085c948 h1:/80FqDt6pzL9clNW8G2IsRAzKGNAuzsEs7g1Y5oaM/Y= +github.com/smallstep/pkcs7 v0.0.0-20230302202335-4c094085c948/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= diff --git a/http/api/api.go b/http/api/api.go new file mode 100644 index 0000000..bc93f76 --- /dev/null +++ b/http/api/api.go @@ -0,0 +1,19 @@ +package api + +import ( + "encoding/json" + "net/http" +) + +// JSONError encodes err as JSON to w. +func JSONError(w http.ResponseWriter, err error, statusCode int) { + jsonErr := &struct { + Err string `json:"error"` + }{Err: err.Error()} + w.Header().Set("Content-type", "application/json") + if statusCode < 1 { + statusCode = http.StatusInternalServerError + } + w.WriteHeader(statusCode) + json.NewEncoder(w).Encode(jsonErr) +} diff --git a/http/http.go b/http/http.go new file mode 100644 index 0000000..1416fc2 --- /dev/null +++ b/http/http.go @@ -0,0 +1,91 @@ +// Package http includes handlers and utilties. +package http + +import ( + "bytes" + "context" + "crypto/subtle" + "io" + "net" + "net/http" + + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" +) + +// ReadAllAndReplaceBody reads all of r.Body and replaces it with a new byte buffer. +func ReadAllAndReplaceBody(r *http.Request) ([]byte, error) { + b, err := io.ReadAll(r.Body) + if err != nil { + return b, err + } + defer r.Body.Close() + r.Body = io.NopCloser(bytes.NewBuffer(b)) + return b, nil +} + +// BasicAuthMiddleware is a simple HTTP plain authentication middleware. +func BasicAuthMiddleware(next http.Handler, username, password, realm string) http.HandlerFunc { + uBytes := []byte(username) + pBytes := []byte(password) + return func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || subtle.ConstantTimeCompare([]byte(u), uBytes) != 1 || subtle.ConstantTimeCompare([]byte(p), pBytes) != 1 { + w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + next.ServeHTTP(w, r) + } +} + +// VersionHandler returns a simple JSON response from a version string. +func VersionHandler(version string) http.HandlerFunc { + bodyBytes := []byte(`{"version":"` + version + `"}`) + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write(bodyBytes) + } +} + +type ctxKeyTraceID struct{} + +// TraceLoggingMiddleware sets up a trace ID in the request context and +// logs HTTP requests. +func TraceLoggingMiddleware(next http.Handler, logger log.Logger, traceID func(*http.Request) string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if traceID != nil { + ctx = context.WithValue(r.Context(), ctxKeyTraceID{}, traceID(r)) + ctx = ctxlog.AddFunc(ctx, ctxlog.SimpleStringFunc("trace_id", ctxKeyTraceID{})) + } + + host, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + host = r.RemoteAddr + } + logs := []interface{}{ + "addr", host, + "method", r.Method, + "path", r.URL.Path, + "agent", r.UserAgent(), + } + + if fwdedFor := r.Header.Get("X-Forwarded-For"); fwdedFor != "" { + logs = append(logs, "x_forwarded_for", fwdedFor) + } + + ctxlog.Logger(ctx, logger).Info(logs...) + + next.ServeHTTP(w, r.WithContext(ctx)) + } +} + +// DumpHandler outputs the body of the request to output. +func DumpHandler(next http.Handler, output io.Writer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + body, _ := ReadAllAndReplaceBody(r) + output.Write(append(body, '\n')) + next.ServeHTTP(w, r) + } +} diff --git a/log/ctxlog/ctxlog.go b/log/ctxlog/ctxlog.go new file mode 100644 index 0000000..c3dfc21 --- /dev/null +++ b/log/ctxlog/ctxlog.go @@ -0,0 +1,72 @@ +// Package ctxlog allows logging data stored with a context. +package ctxlog + +import ( + "context" + "sync" + + "github.com/micromdm/nanocmd/log" +) + +// CtxKVFunc creates logger key-value pairs from a context. +// CtxKVFuncs should aim to be be as efficient as possible—ideally only +// doing the minimum to read context values and generate KV pairs. Each +// associated CtxKVFunc is called every time we adapt a logger with +// Logger. +type CtxKVFunc func(context.Context) []interface{} + +// ctxKeyFuncs is the context key for storing and retriveing +// a funcs{} struct on a context. +type ctxKeyFuncs struct{} + +// funcs holds the associated CtxKVFunc functions to run. +type funcs struct { + sync.RWMutex + funcs []CtxKVFunc +} + +// AddFunc associates a new CtxKVFunc function to a context. +func AddFunc(ctx context.Context, f CtxKVFunc) context.Context { + if ctx == nil { + return ctx + } + ctxFuncs, ok := ctx.Value(ctxKeyFuncs{}).(*funcs) + if !ok || ctxFuncs == nil { + ctxFuncs = &funcs{} + } + ctxFuncs.Lock() + ctxFuncs.funcs = append(ctxFuncs.funcs, f) + ctxFuncs.Unlock() + return context.WithValue(ctx, ctxKeyFuncs{}, ctxFuncs) +} + +// Logger runs the associated CtxKVFunc functions and returns a new +// logger with the results. +func Logger(ctx context.Context, logger log.Logger) log.Logger { + if ctx == nil { + return logger + } + ctxFuncs, ok := ctx.Value(ctxKeyFuncs{}).(*funcs) + if !ok || ctxFuncs == nil { + return logger + } + var acc []interface{} + ctxFuncs.RLock() + for _, f := range ctxFuncs.funcs { + acc = append(acc, f(ctx)...) + } + ctxFuncs.RUnlock() + return logger.With(acc...) +} + +// SimpleStringFunc is a helper that generates a simple CtxKVFunc that +// returns a key-value pair if found on the context. +func SimpleStringFunc(logKey string, ctxKey interface{}) CtxKVFunc { + return func(ctx context.Context) (out []interface{}) { + v, _ := ctx.Value(ctxKey).(string) + if v != "" { + out = []interface{}{logKey, v} + } + return + } +} diff --git a/log/logger.go b/log/logger.go new file mode 100644 index 0000000..2775e82 --- /dev/null +++ b/log/logger.go @@ -0,0 +1,17 @@ +package log + +// Pacakge log is embedded (not imported) from: +// https://github.com/jessepeterson/go-log + +// Logger is a generic logging interface to a structured, leveled, nest-able logger +type Logger interface { + // Info logs using the info level + Info(...interface{}) + + // Debug logs using the debug level + Debug(...interface{}) + + // With nests the Logger + // Usually for adding logging context to a sub-logger + With(...interface{}) Logger +} diff --git a/log/logkeys/logkeys.go b/log/logkeys/logkeys.go new file mode 100644 index 0000000..6ee10e7 --- /dev/null +++ b/log/logkeys/logkeys.go @@ -0,0 +1,25 @@ +// Package logkeys defines some static logging keys for consistent structured logging output. +// Mostly exists as a mental aid when drafting log messages. +package logkeys + +const ( + Message = "msg" + Error = "err" + + // an MDM enrollment ID. i.e. a UDID, EnrollmentID, etc. + EnrollmentID = "id" + + // in cases where we might need to log multiple enrollment IDs but only + // want to log the first (to avoid massive lists in logs). + FirstEnrollmentID = "id_first" + + CommandUUID = "command_uuid" + RequestType = "request_type" + + InstanceID = "instance_id" + WorkflowName = "workflow_name" + StepName = "step_name" + + // a context-dependent numerical count/length of something + GenericCount = "count" +) diff --git a/log/nop.go b/log/nop.go new file mode 100644 index 0000000..c0800c5 --- /dev/null +++ b/log/nop.go @@ -0,0 +1,21 @@ +package log + +// Pacakge log is embedded (not imported) from: +// https://github.com/jessepeterson/go-log + +// nopLogger does nothing +type nopLogger struct{} + +// Info does nothing +func (*nopLogger) Info(_ ...interface{}) {} + +// Debug does nothing +func (*nopLogger) Debug(_ ...interface{}) {} + +// With returns (the same) logger +func (logger *nopLogger) With(_ ...interface{}) Logger { + return logger +} + +// NopLogger is a Logger that does nothing +var NopLogger = &nopLogger{} diff --git a/log/stdlogfmt/stdlog.go b/log/stdlogfmt/stdlog.go new file mode 100644 index 0000000..9721ba7 --- /dev/null +++ b/log/stdlogfmt/stdlog.go @@ -0,0 +1,116 @@ +package stdlogfmt + +import ( + "fmt" + stdlog "log" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/micromdm/nanocmd/log" +) + +// Logger wraps a standard library logger and adapts it to pkg/log. +type Logger struct { + logger *stdlog.Logger + context []interface{} + debug bool + depth int + ts bool +} + +type Option func(*Logger) + +// WithLogger sets the Go standard logger to use. +func WithLogger(logger *stdlog.Logger) Option { + return func(l *Logger) { + l.logger = logger + } +} + +// WithDebug turns on debug logging. +func WithDebug() Option { + return func(l *Logger) { + l.debug = true + } +} + +// WithDebugFlag sets debug logging on or off. +func WithDebugFlag(flag bool) Option { + return func(l *Logger) { + l.debug = flag + } +} + +// WithCallerDepth sets the call depth of the logger for filename and line +// logging. Set depth to 0 to disable filename and line logging. +func WithCallerDepth(depth int) Option { + return func(l *Logger) { + l.depth = depth + } +} + +// WithoutTimestamp disables outputting an RFC3339 timestamp. +func WithoutTimestamp() Option { + return func(l *Logger) { + l.ts = false + } +} + +// New creates a new logger that adapts the Go standard log package to Logger. +func New(opts ...Option) *Logger { + l := &Logger{ + logger: stdlog.New(os.Stderr, "", 0), + depth: 1, + ts: true, + } + for _, opt := range opts { + opt(l) + } + return l +} + +func (l *Logger) print(args ...interface{}) { + if l.ts { + args = append([]interface{}{"ts", time.Now().Format(time.RFC3339)}, args...) + } + if l.depth > 0 { + _, filename, line, ok := runtime.Caller(l.depth + 1) + if ok { + caller := fmt.Sprintf("%s:%d", filepath.Base(filename), line) + args = append(args, "caller", caller) + } + } + f := strings.Repeat(" %s=%v", len(args)/2)[1:] + if len(args)%2 == 1 { + f += " UNKNOWN=%v" + } + l.logger.Printf(f, args...) +} + +// Info logs using the "info" level +func (l *Logger) Info(args ...interface{}) { + logs := []interface{}{"level", "info"} + logs = append(logs, l.context...) + logs = append(logs, args...) + l.print(logs...) +} + +// Info logs using the "debug" level +func (l *Logger) Debug(args ...interface{}) { + if l.debug { + logs := []interface{}{"level", "debug"} + logs = append(logs, l.context...) + logs = append(logs, args...) + l.print(logs...) + } +} + +// With creates a new logger using args as context +func (l *Logger) With(args ...interface{}) log.Logger { + l2 := *l + l2.context = append(l2.context, args...) + return &l2 +} diff --git a/mdm/foss/dump.go b/mdm/foss/dump.go new file mode 100644 index 0000000..4ca42b1 --- /dev/null +++ b/mdm/foss/dump.go @@ -0,0 +1,29 @@ +package foss + +import ( + "context" + "io" + + "github.com/micromdm/nanocmd/workflow" +) + +// MDMEventDumper is an MDM eventer middleware that dumps command responses to an output writer. +type MDMEventDumper struct { + next MDMEventReceiver + output io.Writer +} + +func NewMDMEventDumper(next MDMEventReceiver, output io.Writer) *MDMEventDumper { + return &MDMEventDumper{next: next, output: output} +} + +// MDMCommandResponseEvent dumps the raw command response and processes the next eventer. +func (d *MDMEventDumper) MDMCommandResponseEvent(ctx context.Context, id string, uuid string, raw []byte, mdmContext *workflow.MDMContext) error { + d.output.Write(append(raw, '\n')) + return d.next.MDMCommandResponseEvent(ctx, id, uuid, raw, mdmContext) +} + +// MDMCheckinEvent processes the next eventer. +func (d *MDMEventDumper) MDMCheckinEvent(ctx context.Context, id string, checkin interface{}, mdmContext *workflow.MDMContext) error { + return d.next.MDMCheckinEvent(ctx, id, checkin, mdmContext) +} diff --git a/mdm/foss/foss.go b/mdm/foss/foss.go new file mode 100644 index 0000000..a0c65c0 --- /dev/null +++ b/mdm/foss/foss.go @@ -0,0 +1,257 @@ +// Package foss implements communication with with Free/Open Source MDM servers. +package foss + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" +) + +var ErrNoIDsInIDChunk = errors.New("no ids in id chunk") + +// Doer executes an HTTP request. +type Doer interface { + Do(*http.Request) (*http.Response, error) +} + +// FossMDM sends requests to Free/Open Source MDM servers for enqueueing MDM commands and sending APNs pushes. +// Ostensibly this means NanoMDM and MicroMDM servers, but any server +// that supports compatible API endpoints could work, too. +type FossMDM struct { + logger log.Logger + client Doer + + // maximum number of multi-targeted pushes or enqueueings supported. + // if set to one this effectively disables multi-command enqueueings. + max int + + user string // HTTP Basic username + apiKey string // HTTP Basic password + enqMethod string // HTTP method + + enqURL *url.URL // "base" URL for enqueueing commands + pushURL *url.URL // "base" URL for sending APNs pushes +} + +type Option func(*FossMDM) error + +func WithLogger(logger log.Logger) Option { + return func(m *FossMDM) error { + m.logger = logger + return nil + } +} + +// WithMicroMDM uses MicroMDM API conventions. +func WithMicroMDM() Option { + return func(m *FossMDM) error { + m.max = 1 + m.user = "micromdm" + m.enqMethod = http.MethodPost + return nil + } +} + +func prepURL(ref string) (*url.URL, error) { + if !strings.HasSuffix(ref, "/") { + // endpoints work by concatenating enrollment ID(s) to the base + // URLs as an additional "path." Make sure our URLs end with / + // to make this work properly. + ref += "/" + } + return url.Parse(ref) +} + +// WithPush configures sending APNs push requests to ref base URL. +func WithPush(ref string) Option { + return func(m *FossMDM) (err error) { + m.pushURL, err = prepURL(ref) + if err != nil { + err = fmt.Errorf("preparing push URL: %w", err) + } + return + } +} + +// 30 is a conservative estimate for a reasonable number of URL +// parameters in a request path considering typical limitations. +const defaultMaxIDs = 30 + +// NewFossMDM creates a new FossMDM. The enqueue and push URL "base" is +// specified with enqRef. By default we target NanoMDM conventions. +func NewFossMDM(enqRef, apiKey string, opts ...Option) (*FossMDM, error) { + m := &FossMDM{ + client: http.DefaultClient, + logger: log.NopLogger, + + max: defaultMaxIDs, + + user: "nanomdm", + apiKey: apiKey, + enqMethod: http.MethodPut, + } + var err error + m.enqURL, err = prepURL(enqRef) + if err != nil { + return m, fmt.Errorf("preparing enqueue URL: %w", err) + } + for _, opt := range opts { + err = opt(m) + if err != nil { + return m, fmt.Errorf("processing option: %w", err) + } + } + return m, nil +} + +// SupportsMultiCommands reports whether we support multi-targeted commands. +// These are commands that can be sent to multiple devices (i.e. using +// the same UUID). +func (m *FossMDM) SupportsMultiCommands() bool { + return m.max > 1 +} + +func concatURL(base *url.URL, ids []string) (string, error) { + if base == nil { + return "", errors.New("invalid base URL") + } + joinedIDs, err := url.Parse(strings.Join(ids, ",")) + if err != nil { + return "", err + } + return base.ResolveReference(joinedIDs).String(), nil +} + +func chunk(s []string, n int) (chunks [][]string) { + for i := 0; i < len(s); i += n { + end := i + n + if end > len(s) { + end = len(s) + } + chunks = append(chunks, s[i:end]) + } + return +} + +// Enqueue sends the HTTP request to enqueue rawCommand to ids on the MDM server. +func (m *FossMDM) Enqueue(ctx context.Context, ids []string, rawCommand []byte) error { + if m.max == 1 && len(ids) > 1 { + // err on the side of caution so that we don't try to enqueue + // the same command UUID onto different ids. + return errors.New("multiple ids not supported") + } + buf := bytes.NewBuffer(rawCommand) + logger := ctxlog.Logger(ctx, m.logger).With("request", "enqueue") + // TODO: perhaps parallelize? + for _, idChunk := range chunk(ids, m.max) { + if len(idChunk) < 1 { + logger.Info(logkeys.Error, ErrNoIDsInIDChunk) + continue + } + idsLogger := logger.With( + logkeys.GenericCount, len(idChunk), + logkeys.FirstEnrollmentID, idChunk[0], + ) + ref, err := concatURL(m.enqURL, idChunk) + if err != nil { + idsLogger.Info( + logkeys.Message, "creating enqueue URL", + logkeys.Error, err, + ) + continue + } + req, err := http.NewRequestWithContext(ctx, m.enqMethod, ref, buf) + if err != nil { + idsLogger.Info( + logkeys.Message, "creating HTTP request", + logkeys.Error, err, + ) + continue + } + req.SetBasicAuth(m.user, m.apiKey) + resp, err := m.client.Do(req) + if err != nil { + idsLogger.Info( + logkeys.Message, "executing HTTP request", + logkeys.Error, err, + ) + continue + } + idsLogger.Debug( + logkeys.Message, "enqueue command", + "http_status_code", resp.StatusCode, + "http_status", resp.Status, + ) + if err = resp.Body.Close(); err != nil { + idsLogger.Info( + logkeys.Message, "closing body", + logkeys.Error, err, + ) + } + } + return nil +} + +// Push sends the HTTP request to send APNs pushes to ids on the MDM server. +func (m *FossMDM) Push(ctx context.Context, ids []string) error { + if m.pushURL == nil { + return errors.New("push not configured") + } + logger := ctxlog.Logger(ctx, m.logger).With("request", "push") + // TODO: perhaps parallelize? + for _, idChunk := range chunk(ids, m.max) { + if len(idChunk) < 1 { + logger.Info(logkeys.Error, ErrNoIDsInIDChunk) + continue + } + idsLogger := logger.With( + logkeys.GenericCount, len(idChunk), + logkeys.FirstEnrollmentID, idChunk[0], + ) + ref, err := concatURL(m.pushURL, idChunk) + if err != nil { + idsLogger.Info( + logkeys.Message, "creating push URL", + logkeys.Error, err, + ) + continue + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ref, nil) + if err != nil { + idsLogger.Info( + logkeys.Message, "creating HTTP request", + logkeys.Error, err, + ) + continue + } + req.SetBasicAuth(m.user, m.apiKey) + resp, err := m.client.Do(req) + if err != nil { + idsLogger.Info( + logkeys.Message, "executing HTTP request", + logkeys.Error, err, + ) + continue + } + idsLogger.Debug( + logkeys.Message, "push", + "http_status_code", resp.StatusCode, + "http_status", resp.Status, + ) + if err = resp.Body.Close(); err != nil { + idsLogger.Info( + logkeys.Message, "closing body", + logkeys.Error, err, + ) + } + } + return nil +} diff --git a/mdm/foss/process.go b/mdm/foss/process.go new file mode 100644 index 0000000..c5f151c --- /dev/null +++ b/mdm/foss/process.go @@ -0,0 +1,79 @@ +package foss + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/groob/plist" + "github.com/micromdm/nanocmd/mdm" + "github.com/micromdm/nanocmd/workflow" +) + +type MDMCommandResponseEventer interface { + MDMCommandResponseEvent(ctx context.Context, id string, uuid string, raw []byte, mdmContext *workflow.MDMContext) error +} + +type MDMCheckinEventer interface { + MDMCheckinEvent(ctx context.Context, id string, checkin interface{}, mdmContext *workflow.MDMContext) error +} + +type MDMEventReceiver interface { + MDMCommandResponseEventer + MDMCheckinEventer +} + +func idAndContext(udid, eid string, params map[string]string) (id string, mdmContext *workflow.MDMContext) { + id = udid + if id == "" { + id = eid + } + if len(params) > 0 { + mdmContext = &workflow.MDMContext{Params: params} + } + return +} + +func processAcknowledgeEvent(ctx context.Context, e *AcknowledgeEvent, ev MDMCommandResponseEventer) error { + if e == nil { + return errors.New("empty acknowledge event") + } + if e.Status == "Idle" || e.CommandUUID == "" { + return nil + } + id, mdmContext := idAndContext(e.UDID, e.EnrollmentID, e.Params) + return ev.MDMCommandResponseEvent(ctx, id, e.CommandUUID, e.RawPayload, mdmContext) +} + +func processCheckinEvent(ctx context.Context, topic string, e *CheckinEvent, ev MDMCheckinEventer) error { + if e == nil { + return errors.New("empty checkin event") + } + if !strings.HasPrefix(topic, "mdm.") { + // we're assuming the topic is just a prefixed MessageType + return errors.New("checkin topic incorrect prefix") + } + topic = topic[4:] + checkin := mdm.NewCheckinFromMessageType(topic) + if checkin == nil { + return fmt.Errorf("no checkin type for message type: %s", topic) + } + if err := plist.Unmarshal(e.RawPayload, checkin); err != nil { + return fmt.Errorf("unmarshal checkin: %w", err) + } + if tu, ok := checkin.(*mdm.TokenUpdate); ok && e.TokenUpdateTally != nil { + // wrap the token update to include our enrolling status + tue := &mdm.TokenUpdateEnrolling{TokenUpdate: tu} + if *e.TokenUpdateTally == 1 { + tue.Enrolling = true + } + if !tue.Valid() { + return fmt.Errorf("invalid token update wrapper") + } + // use the wrapped version + checkin = tue + } + id, mdmContext := idAndContext(e.UDID, e.EnrollmentID, e.Params) + return ev.MDMCheckinEvent(ctx, id, checkin, mdmContext) +} diff --git a/mdm/foss/testdata/tokenupdate.json b/mdm/foss/testdata/tokenupdate.json new file mode 100644 index 0000000..4247494 --- /dev/null +++ b/mdm/foss/testdata/tokenupdate.json @@ -0,0 +1,11 @@ +{ + "topic": "mdm.TokenUpdate", + "event_id": "", + "created_at": "2023-05-24T14:28:06.253316-07:00", + "checkin_event": { + "udid": "FF269FDC-7A93-5F12-A4B7-09923F0D1F7F", + "url_params": {}, + "raw_payload": "PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NUWVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VOIiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4wLmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+QXdhaXRpbmdDb25maWd1cmF0aW9uPC9rZXk+Cgk8ZmFsc2UvPgoJPGtleT5NZXNzYWdlVHlwZTwva2V5PgoJPHN0cmluZz5Ub2tlblVwZGF0ZTwvc3RyaW5nPgoJPGtleT5QdXNoTWFnaWM8L2tleT4KCTxzdHJpbmc+MDRFQUMzNTEtNTZFQS00RkZGLTg5QTctQzc2QjEzMDZGMzZBPC9zdHJpbmc+Cgk8a2V5PlRva2VuPC9rZXk+Cgk8ZGF0YT4KCXZLU0VNd0dhOUUzYktkUG1tb1dmYVN3cTNnUTdRcCtTQW5GeXJwa215YVE9Cgk8L2RhdGE+Cgk8a2V5PlRvcGljPC9rZXk+Cgk8c3RyaW5nPmNvbS5hcHBsZS5tZ210LkV4dGVybmFsLmUxYmQxZWFjLTEyMTctNGM4ZS04YTY3LWRkMTdkM2RkMzVkOTwvc3RyaW5nPgoJPGtleT5VRElEPC9rZXk+Cgk8c3RyaW5nPkZGMjY5RkRDLTdBOTMtNUYxMi1BNEI3LTA5OTIzRjBEMUY3Rjwvc3RyaW5nPgo8L2RpY3Q+CjwvcGxpc3Q+Cg==", + "token_update_tally": 1 + } +} diff --git a/mdm/foss/webhook.go b/mdm/foss/webhook.go new file mode 100644 index 0000000..3a469ca --- /dev/null +++ b/mdm/foss/webhook.go @@ -0,0 +1,95 @@ +package foss + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" +) + +type Event struct { + Topic string `json:"topic"` + EventID string `json:"event_id"` + CreatedAt time.Time `json:"created_at"` + + AcknowledgeEvent *AcknowledgeEvent `json:"acknowledge_event,omitempty"` + CheckinEvent *CheckinEvent `json:"checkin_event,omitempty"` +} + +type AcknowledgeEvent struct { + UDID string `json:"udid,omitempty"` + EnrollmentID string `json:"enrollment_id,omitempty"` + Status string `json:"status"` + CommandUUID string `json:"command_uuid,omitempty"` + Params map[string]string `json:"url_params,omitempty"` + RawPayload []byte `json:"raw_payload"` +} + +type CheckinEvent struct { + UDID string `json:"udid,omitempty"` + EnrollmentID string `json:"enrollment_id,omitempty"` + Params map[string]string `json:"url_params"` + RawPayload []byte `json:"raw_payload"` + + // signals which tokenupdate this is to be able to tell whether this + // is the initial enrollment vs. a following tokenupdate + TokenUpdateTally *int `json:"token_update_tally,omitempty"` +} + +// WebhookHandler parses the F/OSS MDM webhook callback for hand-off for futher processing. +func WebhookHandler(recv MDMEventReceiver, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + + event := new(Event) + if err := json.NewDecoder(r.Body).Decode(event); err != nil { + logger.Info(logkeys.Message, "decoding body", logkeys.Error, err) + http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + return + } + + logger = logger.With(logsFromEvent(event)...) + + if event.Topic == "mdm.Connect" { + if err := processAcknowledgeEvent(r.Context(), event.AcknowledgeEvent, recv); err != nil { + logger.Info(logkeys.Message, "process acknowledge event", logkeys.Error, err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return + } + } else { + if err := processCheckinEvent(r.Context(), event.Topic, event.CheckinEvent, recv); err != nil { + logger.Info(logkeys.Message, "process checkin event", logkeys.Error, err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return + } + } + logger.Debug(logkeys.Message, "webhook event") + } +} + +func appendIfNotEmpty(slice *[]interface{}, key, value string) { + if value != "" { + *slice = append(*slice, key, value) + } +} + +func logsFromEvent(e *Event) (logs []interface{}) { + if e == nil { + return + } + logs = []interface{}{"topic", e.Topic} + if e.AcknowledgeEvent != nil { + appendIfNotEmpty(&logs, "udid", e.AcknowledgeEvent.UDID) + appendIfNotEmpty(&logs, "enrollment_id", e.AcknowledgeEvent.EnrollmentID) + appendIfNotEmpty(&logs, "status", e.AcknowledgeEvent.Status) + appendIfNotEmpty(&logs, logkeys.CommandUUID, e.AcknowledgeEvent.CommandUUID) + } else if e.CheckinEvent != nil { + appendIfNotEmpty(&logs, "udid", e.CheckinEvent.UDID) + appendIfNotEmpty(&logs, "enrollment_id", e.CheckinEvent.EnrollmentID) + } + + return +} diff --git a/mdm/foss/webhook_test.go b/mdm/foss/webhook_test.go new file mode 100644 index 0000000..c3902ca --- /dev/null +++ b/mdm/foss/webhook_test.go @@ -0,0 +1,103 @@ +package foss + +import ( + "context" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/mdm" + "github.com/micromdm/nanocmd/workflow" +) + +type event struct { + resp bool + id string + uuid string + raw []byte + chkin interface{} + ctx *workflow.MDMContext +} + +type eventRecorder struct { + events []event +} + +func (r *eventRecorder) MDMCommandResponseEvent(ctx context.Context, id string, uuid string, raw []byte, mdmContext *workflow.MDMContext) error { + r.events = append(r.events, event{ + resp: true, + id: id, + uuid: uuid, + raw: raw, + ctx: mdmContext, + }) + return nil +} + +func (r *eventRecorder) MDMCheckinEvent(ctx context.Context, id string, checkin interface{}, mdmContext *workflow.MDMContext) error { + r.events = append(r.events, event{ + resp: false, + id: id, + chkin: checkin, + ctx: mdmContext, + }) + return nil +} + +func TestWebhook(t *testing.T) { + eventRec := &eventRecorder{} + hf := WebhookHandler(eventRec, log.NopLogger) + + r, err := http.NewRequestWithContext(context.Background(), "GET", "/webhook", nil) + if err != nil { + t.Fatal(err) + } + + r.Body, err = os.Open("testdata/tokenupdate.json") + if err != nil { + t.Fatal(err) + } + defer r.Body.Close() + + recorder := httptest.NewRecorder() + + hf.ServeHTTP(recorder, r) + + if have, want := len(eventRec.events), 1; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + tEvent := eventRec.events[0] + + if have, want := tEvent.resp, false; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + udid := "FF269FDC-7A93-5F12-A4B7-09923F0D1F7F" + + if have, want := tEvent.id, udid; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + if have, want := tEvent.resp, false; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + + if tEvent.ctx != nil { + t.Error("expected nil") + } + + tu, ok := tEvent.chkin.(*mdm.TokenUpdateEnrolling) + if ok && tu != nil { + if have, want := tu.Enrolling, true; ok && have != want { + t.Errorf("have: %v, want: %v", have, want) + } + if have, want := tu.UDID, udid; ok && have != want { + t.Errorf("have: %v, want: %v", have, want) + } + } else { + t.Error("incorrect type from parsed webhook") + } +} diff --git a/mdm/mdm.go b/mdm/mdm.go new file mode 100644 index 0000000..6601b4a --- /dev/null +++ b/mdm/mdm.go @@ -0,0 +1,84 @@ +// Package mdm defines types for the core MDM protocol. +package mdm + +// Checkin contains fields for MDM checkin messages. +type Checkin struct { + MessageType string +} + +// Enrollment contains various enrollment identifier fields. +type Enrollment struct { + UDID string `plist:",omitempty"` + UserID string `plist:",omitempty"` + UserShortName string `plist:",omitempty"` + UserLongName string `plist:",omitempty"` + EnrollmentID string `plist:",omitempty"` + EnrollmentUserID string `plist:",omitempty"` +} + +// Authenticate Checkin Message. MessageType field should be "Authenticate". +// See https://developer.apple.com/documentation/devicemanagement/authenticaterequest +type Authenticate struct { + Checkin + Enrollment + BuildVersion string `plist:",omitempty"` + DeviceName string + IMEI string `plist:",omitempty"` + MEID string `plist:",omitempty"` + Model string + ModelName string + OSVersion string `plist:",omitempty"` + ProductName string `plist:",omitempty"` + SerialNumber string `plist:",omitempty"` + Topic string +} + +// TokenUpdate Checkin Message. MessageType field should be "TokenUpdate". +// See https://developer.apple.com/documentation/devicemanagement/tokenupdaterequest +type TokenUpdate struct { + Checkin + Enrollment + AwaitingConfiguration bool `plist:",omitempty"` + MessageType string // supported value: TokenUpdate + NotOnConsole bool + PushMagic string + Token []byte + Topic string + UnlockToken []byte +} + +// TokenUpdateEnrolling is a wrapper around TokenUpdate that indicates a new enrollment. +type TokenUpdateEnrolling struct { + *TokenUpdate + Enrolling bool // if this is the very first TokenUpdate (i.e. enrolling) +} + +// Valid checks for nil pointers. +func (tue *TokenUpdateEnrolling) Valid() bool { + if tue == nil || tue.TokenUpdate == nil { + return false + } + return true +} + +// Checkout Checkin Message. MessageType field should be "CheckOut". +// See https://developer.apple.com/documentation/devicemanagement/checkoutrequest +type CheckOut struct { + Checkin + Enrollment + Topic string +} + +// NewCheckinFromMessageType creates a new checkin struct given a message type. +func NewCheckinFromMessageType(messageType string) interface{} { + switch messageType { + case "Authenticate": + return new(Authenticate) + case "TokenUpdate": + return new(TokenUpdate) + case "CheckOut": + return new(CheckOut) + default: + return nil + } +} diff --git a/subsystem/cmdplan/http/http.go b/subsystem/cmdplan/http/http.go new file mode 100644 index 0000000..7b35e68 --- /dev/null +++ b/subsystem/cmdplan/http/http.go @@ -0,0 +1,81 @@ +// Package http contains HTTP handlers for working with Command Plans. +package http + +import ( + "encoding/json" + "errors" + "net/http" + + "github.com/alexedwards/flow" + "github.com/micromdm/nanocmd/http/api" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" +) + +var ( + ErrNoName = errors.New("no name provided") +) + +// GetHandler returns an HTTP handler that fetches a command plan. +func GetHandler(store storage.ReadStorage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "name parameter", logkeys.Error, ErrNoName) + api.JSONError(w, ErrNoName, http.StatusBadRequest) + return + } + + logger = logger.With("name", name) + cmdPlan, err := store.RetrieveCMDPlan(r.Context(), name) + if err != nil { + logger.Info(logkeys.Message, "retrieve cmdplan", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + logger.Debug( + logkeys.Message, "retrieved cmdplan", + logkeys.GenericCount, len(cmdPlan.ProfileNames), + ) + w.Header().Set("Content-Type", "application/json") + if err = json.NewEncoder(w).Encode(cmdPlan); err != nil { + logger.Info(logkeys.Message, "encoding json to body", logkeys.Error, err) + return + } + } +} + +// PutHandler returns an HTTP handler for uploading a command plan. +func PutHandler(store storage.Storage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "name parameter", logkeys.Error, ErrNoName) + api.JSONError(w, ErrNoName, http.StatusBadRequest) + return + } + + logger = logger.With("name", name) + cmdplan := new(storage.CMDPlan) + err := json.NewDecoder(r.Body).Decode(cmdplan) + if err != nil { + logger.Info(logkeys.Message, "decoding body", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + if err = store.StoreCMDPlan(r.Context(), name, cmdplan); err != nil { + logger.Info(logkeys.Message, "storing cmdplan", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + + logger.Debug(logkeys.Message, "stored cmdplan") + w.WriteHeader(http.StatusNoContent) + } +} diff --git a/subsystem/cmdplan/storage/diskv/diskv.go b/subsystem/cmdplan/storage/diskv/diskv.go new file mode 100644 index 0000000..aecb372 --- /dev/null +++ b/subsystem/cmdplan/storage/diskv/diskv.go @@ -0,0 +1,27 @@ +// Package inmem implements a command plan storage backend backed by an on-disk key-valye store. +package diskv + +import ( + "path/filepath" + + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvdiskv" + "github.com/peterbourgon/diskv/v3" +) + +// Diskv is a command plan storage backend backed by an on-disk key-valye store. +type Diskv struct { + *kv.KV +} + +// New creates a new initialized CMDPlan data store. +func New(path string) *Diskv { + flatTransform := func(s string) []string { return []string{} } + return &Diskv{ + KV: kv.New(kvdiskv.NewBucket(diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "cmdplan"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }))), + } +} diff --git a/subsystem/cmdplan/storage/diskv/diskv_test.go b/subsystem/cmdplan/storage/diskv/diskv_test.go new file mode 100644 index 0000000..962ed76 --- /dev/null +++ b/subsystem/cmdplan/storage/diskv/diskv_test.go @@ -0,0 +1,14 @@ +package diskv + +import ( + "os" + "testing" + + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/test" +) + +func TestDiskv(t *testing.T) { + test.TestCMDPlanStorage(t, func() storage.Storage { return New("teststor") }) + os.RemoveAll("teststor") +} diff --git a/subsystem/cmdplan/storage/inmem/inmem.go b/subsystem/cmdplan/storage/inmem/inmem.go new file mode 100644 index 0000000..7471e7f --- /dev/null +++ b/subsystem/cmdplan/storage/inmem/inmem.go @@ -0,0 +1,16 @@ +// Package inmem implements a command plan storage backend backed by an in-memory key-valye store. +package inmem + +import ( + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvmap" +) + +// InMem is a command plan storage backend backed by an in-memory key-valye store. +type InMem struct { + *kv.KV +} + +func New() *InMem { + return &InMem{KV: kv.New(kvmap.NewBucket())} +} diff --git a/subsystem/cmdplan/storage/inmem/inmem_test.go b/subsystem/cmdplan/storage/inmem/inmem_test.go new file mode 100644 index 0000000..a0020ab --- /dev/null +++ b/subsystem/cmdplan/storage/inmem/inmem_test.go @@ -0,0 +1,12 @@ +package inmem + +import ( + "testing" + + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage/test" +) + +func TestInMem(t *testing.T) { + test.TestCMDPlanStorage(t, func() storage.Storage { return New() }) +} diff --git a/subsystem/cmdplan/storage/kv/kv.go b/subsystem/cmdplan/storage/kv/kv.go new file mode 100644 index 0000000..2652c34 --- /dev/null +++ b/subsystem/cmdplan/storage/kv/kv.go @@ -0,0 +1,51 @@ +// Package kv implements a cmdplan storage backend using JSON with key-value storage. +package kv + +import ( + "context" + "encoding/json" + "sync" + + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" + "github.com/micromdm/nanocmd/utils/kv" +) + +// KV is a cmdplan storage backend using JSON with key-value storage. +type KV struct { + mu sync.RWMutex + b kv.Bucket +} + +func New(b kv.Bucket) *KV { + return &KV{b: b} +} + +// RetrieveCMDPlan unmarshals the JSON stored using name and returns the command plan. +func (s *KV) RetrieveCMDPlan(ctx context.Context, name string) (*storage.CMDPlan, error) { + s.mu.RLock() + defer s.mu.RUnlock() + raw, err := s.b.Get(ctx, name) + if err != nil { + return nil, err + } + cmdPlan := new(storage.CMDPlan) + return cmdPlan, json.Unmarshal(raw, cmdPlan) +} + +// StoreCMDPlan marshals p into JSON and stores it using name. +func (s *KV) StoreCMDPlan(ctx context.Context, name string, p *storage.CMDPlan) error { + s.mu.Lock() + defer s.mu.Unlock() + raw, err := json.Marshal(p) + if err != nil { + return err + } + return s.b.Set(ctx, name, raw) +} + +// DeleteCMDPlan deletes the JSON stored using name. +func (s *KV) DeleteCMDPlan(ctx context.Context, name string) error { + s.mu.Lock() + defer s.mu.Unlock() + return s.b.Delete(ctx, name) +} diff --git a/subsystem/cmdplan/storage/storage.go b/subsystem/cmdplan/storage/storage.go new file mode 100644 index 0000000..ec28868 --- /dev/null +++ b/subsystem/cmdplan/storage/storage.go @@ -0,0 +1,22 @@ +// Package storage defines types supporting Command Plans. +package storage + +import "context" + +// CMDPlans define approximate MDM command sequences. +type CMDPlan struct { + ProfileNames []string `json:"profile_names,omitempty"` + ManifestURLs []string `json:"manifest_urls,omitempty"` + DeviceConfigured *bool `json:"device_configured,omitempty"` + // AccountConfig *AccountConfig +} + +type ReadStorage interface { + RetrieveCMDPlan(ctx context.Context, name string) (*CMDPlan, error) +} + +type Storage interface { + ReadStorage + StoreCMDPlan(ctx context.Context, name string, p *CMDPlan) error + DeleteCMDPlan(ctx context.Context, name string) error +} diff --git a/subsystem/cmdplan/storage/test/test.go b/subsystem/cmdplan/storage/test/test.go new file mode 100644 index 0000000..df4856f --- /dev/null +++ b/subsystem/cmdplan/storage/test/test.go @@ -0,0 +1,43 @@ +package test + +import ( + "context" + "reflect" + "testing" + + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" +) + +func TestCMDPlanStorage(t *testing.T, newStorage func() storage.Storage) { + s := newStorage() + ctx := context.Background() + + plan := &storage.CMDPlan{ + ProfileNames: []string{"hello"}, + ManifestURLs: []string{"gopher://example.com/1/news"}, + } + + err := s.StoreCMDPlan(ctx, "test1", plan) + if err != nil { + t.Fatal(err) + } + + plan2, err := s.RetrieveCMDPlan(ctx, "test1") + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(plan, plan2) { + t.Error("not equal") + } + + err = s.DeleteCMDPlan(ctx, "test1") + if err != nil { + t.Fatal(err) + } + + _, err = s.RetrieveCMDPlan(ctx, "test1") + if err == nil { + t.Fatal("expected error") + } +} diff --git a/subsystem/filevault/http/http.go b/subsystem/filevault/http/http.go new file mode 100644 index 0000000..d562646 --- /dev/null +++ b/subsystem/filevault/http/http.go @@ -0,0 +1,16 @@ +// Package http provides HTTP handlers related to the FileVault enable workflow. +package http + +import ( + "net/http" + + "github.com/micromdm/nanocmd/workflow/fvenable" +) + +// GetProfileTemplate returns an HTTP handler that serves the fvenable profile template. +func GetProfileTemplate() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-type", "application/x-apple-aspen-config") + w.Write([]byte(fvenable.ProfileTemplate)) + } +} diff --git a/subsystem/filevault/storage/diskv/diskv.go b/subsystem/filevault/storage/diskv/diskv.go new file mode 100644 index 0000000..7496791 --- /dev/null +++ b/subsystem/filevault/storage/diskv/diskv.go @@ -0,0 +1,31 @@ +// Package diskv implements a diskv-backed FileVault storage backend. +package diskv + +import ( + "context" + "path/filepath" + + "github.com/micromdm/nanocmd/subsystem/filevault/storage" + "github.com/micromdm/nanocmd/subsystem/filevault/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvdiskv" + "github.com/peterbourgon/diskv/v3" +) + +// Diskv implements a diskv-backed FileVault storage backend. +type Diskv struct { + *kv.KV +} + +func New(path string, p storage.PRKStorage) (*Diskv, error) { + flatTransform := func(s string) []string { return []string{} } + kvStore, err := kv.New( + context.Background(), + kvdiskv.NewBucket(diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "fvkey"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + })), + p, + ) + return &Diskv{KV: kvStore}, err +} diff --git a/subsystem/filevault/storage/inmem/inmem.go b/subsystem/filevault/storage/inmem/inmem.go new file mode 100644 index 0000000..9c3ab41 --- /dev/null +++ b/subsystem/filevault/storage/inmem/inmem.go @@ -0,0 +1,20 @@ +// Package diskv implements an in-memory FileVault storage backend. +package inmem + +import ( + "context" + + "github.com/micromdm/nanocmd/subsystem/filevault/storage" + "github.com/micromdm/nanocmd/subsystem/filevault/storage/kv" + "github.com/micromdm/nanocmd/utils/kv/kvmap" +) + +// InMem implements an in-memory FileVault storage backend. +type InMem struct { + *kv.KV +} + +func New(p storage.PRKStorage) (*InMem, error) { + kvStore, err := kv.New(context.Background(), kvmap.NewBucket(), p) + return &InMem{KV: kvStore}, err +} diff --git a/subsystem/filevault/storage/invprk/invprk.go b/subsystem/filevault/storage/invprk/invprk.go new file mode 100644 index 0000000..a19f26c --- /dev/null +++ b/subsystem/filevault/storage/invprk/invprk.go @@ -0,0 +1,50 @@ +// Package invprk implements retrieving and storing PRKs in inventory storage. +package invprk + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" +) + +// InvPRK retrieves and stores PRKs in inventory storage. +type InvPRK struct { + i storage.Storage +} + +func NewInvPRK(i storage.Storage) *InvPRK { + return &InvPRK{i: i} +} + +func (s *InvPRK) StorePRK(ctx context.Context, id, prk string) error { + return s.i.StoreInventoryValues(ctx, id, storage.Values{ + storage.KeyLastSource: "InvPRK", + storage.KeyModified: time.Now(), + storage.KeyPRK: prk, + }) +} + +func (s *InvPRK) RetrievePRK(ctx context.Context, id string) (string, error) { + idVals, err := s.i.RetrieveInventory(ctx, &storage.SearchOptions{IDs: []string{id}}) + if err != nil { + return "", fmt.Errorf("retrieve inventory: %w", err) + } + if idVals == nil { + return "", errors.New("no values returned from inventory") + } + vals, ok := idVals[id] + if !ok || vals == nil { + return "", fmt.Errorf("id not in inventory: %s", id) + } + prkVal, ok := vals[storage.KeyPRK] + var prk string + if !ok || prkVal == nil { + return "", fmt.Errorf("inventory does not contain PRK value: %s", id) + } else if prk, ok = prkVal.(string); !ok { + return "", errors.New("PRK incorrect inventory type") + } + return prk, nil +} diff --git a/subsystem/filevault/storage/invprk/invprk_test.go b/subsystem/filevault/storage/invprk/invprk_test.go new file mode 100644 index 0000000..e27d4f6 --- /dev/null +++ b/subsystem/filevault/storage/invprk/invprk_test.go @@ -0,0 +1,26 @@ +package invprk + +import ( + "context" + "testing" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage/inmem" +) + +func TestInvPRK(t *testing.T) { + ctx := context.Background() + inv := inmem.New() + invPRK := NewInvPRK(inv) + prk := "PRK-321-ZYX" + err := invPRK.StorePRK(ctx, "ID1", "PRK-321-ZYX") + if err != nil { + t.Fatal(err) + } + prkRet, err := invPRK.RetrievePRK(ctx, "ID1") + if err != nil { + t.Fatal(err) + } + if have, want := prk, prkRet; prk != prkRet { + t.Errorf("have: %v, want: %v", have, want) + } +} diff --git a/subsystem/filevault/storage/kv/kv.go b/subsystem/filevault/storage/kv/kv.go new file mode 100644 index 0000000..107ce36 --- /dev/null +++ b/subsystem/filevault/storage/kv/kv.go @@ -0,0 +1,118 @@ +// Package kv implements a key-value FileVault storage. +package kv + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "fmt" + + "github.com/micromdm/nanocmd/subsystem/filevault/storage" + "github.com/micromdm/nanocmd/utils/cryptoutil" + "github.com/micromdm/nanocmd/utils/kv" + "go.mozilla.org/pkcs7" +) + +// KV is a FileVault storage backend based on a key-value store. +// Its primary storage duties are initially generating and then +// subsequently loading the keypair from storage. +// It uses a single keypair for all FileVault PSK encryption/decryption. +// The actual PSK storage and retrieval (i.e. once decrypted) is +// abstracted to another storage interface. +type KV struct { + b kv.Bucket + p storage.PRKStorage +} + +const ( + certCN = "filevault" + certValidityDays = 10 * 365 + + kvKeyKey = "key" + kvKeyCert = "cert" +) + +func New(ctx context.Context, b kv.Bucket, p storage.PRKStorage) (*KV, error) { + kv := &KV{b: b, p: p} + if err := kv.assureKeypairExists(ctx); err != nil { + return kv, err + } + return kv, nil +} + +// assureKeypairExists checks that a keypair exists or generates a new keypair +func (s *KV) assureKeypairExists(ctx context.Context) error { + // check for cert and key + certOK, err := s.b.Has(ctx, kvKeyCert) + if err != nil { + return fmt.Errorf("checking cert exists: %w", err) + } + keyOK, err := s.b.Has(ctx, kvKeyKey) + if err != nil { + return fmt.Errorf("checking key exists: %w", err) + } + if certOK && keyOK { + return nil + } + // generate new + key, cert, err := cryptoutil.SelfSignedRSAKeypair(certCN, certValidityDays) + if err != nil { + return fmt.Errorf("generating self-signed keypair: %w", err) + } + if err = s.b.Set(ctx, kvKeyKey, x509.MarshalPKCS1PrivateKey(key)); err != nil { + return fmt.Errorf("setting key: %w", err) + } + if err = s.b.Set(ctx, kvKeyCert, cert.Raw); err != nil { + return fmt.Errorf("setting cert: %w", err) + } + return nil +} + +func (s *KV) RetrievePRKCertRaw(ctx context.Context, _ string) ([]byte, error) { + return s.b.Get(ctx, kvKeyCert) +} + +func (s *KV) RetrievePRK(ctx context.Context, id string) (string, error) { + return s.p.RetrievePRK(ctx, id) +} + +// getKeypair retrieves the certificate and private key from the bucket. +func (s *KV) getKeypair(ctx context.Context) (*rsa.PrivateKey, *x509.Certificate, error) { + certBytes, err := s.b.Get(ctx, kvKeyCert) + if err != nil { + return nil, nil, fmt.Errorf("getting cert: %w", err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, cert, fmt.Errorf("parsing cert: %w", err) + } + keyBytes, err := s.b.Get(ctx, kvKeyKey) + if err != nil { + return nil, cert, fmt.Errorf("getting key: %w", err) + } + key, err := x509.ParsePKCS1PrivateKey(keyBytes) + if err != nil { + return key, cert, fmt.Errorf("parsing key: %w", err) + } + return key, cert, nil +} + +// EscrowPRK decrypts the CMS PRK and stores it. +func (s *KV) EscrowPRK(ctx context.Context, id string, cms []byte) error { + p7, err := pkcs7.Parse(cms) + if err != nil { + return fmt.Errorf("parse PRK CMS: %w", err) + } + key, cert, err := s.getKeypair(ctx) + if err != nil { + return fmt.Errorf("getting keypair: %w", err) + } + prkBytes, err := p7.Decrypt(cert, key) + if err != nil { + return fmt.Errorf("decrypting PRK CMS: %w", err) + } + if err = s.p.StorePRK(ctx, id, string(prkBytes)); err != nil { + return fmt.Errorf("store PRK: %w", err) + } + return nil +} diff --git a/subsystem/filevault/storage/storage.go b/subsystem/filevault/storage/storage.go new file mode 100644 index 0000000..df6a468 --- /dev/null +++ b/subsystem/filevault/storage/storage.go @@ -0,0 +1,39 @@ +// Package storage defines types supporting FileVault FDE commands and responses. +package storage + +import ( + "context" +) + +// PRKCertRetriever retrieves the raw DER certificate bytes used for encrypting the PRK for an id. +type PRKCertRetriever interface { + RetrievePRKCertRaw(ctx context.Context, id string) ([]byte, error) +} + +// PRKRetriever retrieves the existing (already escrowed and decrypted) PRK for an enrollment. +type PRKRetriever interface { + RetrievePRK(ctx context.Context, id string) (string, error) +} + +// PRKEscrower escrows the encrypted CMS of the PRK. +type PRKEscrower interface { + EscrowPRK(ctx context.Context, id string, cms []byte) error +} + +// FVEnable is intended for enabling FileVault and escrowing PRKs. +type FVEnable interface { + PRKCertRetriever + PRKEscrower +} + +// FVRotate is intended for rotating encrypted PRKs. +type FVRotate interface { + FVEnable + PRKRetriever +} + +// PRKStorage retrieves and stores unencrypted PRKs. +type PRKStorage interface { + PRKRetriever + StorePRK(ctx context.Context, id, prk string) error +} diff --git a/subsystem/inventory/http/http.go b/subsystem/inventory/http/http.go new file mode 100644 index 0000000..e3c042b --- /dev/null +++ b/subsystem/inventory/http/http.go @@ -0,0 +1,59 @@ +// Package http contains HTTP handlers for working with the inventory subsytem. +package http + +import ( + "encoding/json" + "errors" + "net/http" + + "github.com/micromdm/nanocmd/http/api" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/subsystem/inventory/storage" +) + +var ( + ErrNoIDs = errors.New("no IDs provided") + ErrNoStorage = errors.New("no storage backend") +) + +// RetrieveInventory returns an HTTP handler that retrieves inventory data for enrollment IDs. +func RetrieveInventory(store storage.ReadStorage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + if store == nil { + logger.Info(logkeys.Message, "retrieve inventory", logkeys.Error, ErrNoStorage) + api.JSONError(w, ErrNoStorage, 0) + return + } + + ids := r.URL.Query()["id"] + if len(ids) < 1 { + logger.Info(logkeys.Message, "parameters", logkeys.Error, ErrNoIDs) + api.JSONError(w, ErrNoIDs, http.StatusBadRequest) + return + } + + logger = logger.With( + logkeys.FirstEnrollmentID, ids[0], + logkeys.GenericCount, len(ids), + ) + opts := &storage.SearchOptions{IDs: ids} + idValues, err := store.RetrieveInventory(r.Context(), opts) + if err != nil { + logger.Info(logkeys.Message, "retrieve inventory", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + logger.Debug( + logkeys.Message, "retrieved inventory", + ) + w.Header().Set("Content-type", "application/json") + err = json.NewEncoder(w).Encode(idValues) + if err != nil { + logger.Info(logkeys.Message, "encode response", logkeys.Error, err) + return + } + } +} diff --git a/subsystem/inventory/storage/diskv/diskv.go b/subsystem/inventory/storage/diskv/diskv.go new file mode 100644 index 0000000..1f79a93 --- /dev/null +++ b/subsystem/inventory/storage/diskv/diskv.go @@ -0,0 +1,87 @@ +// Package diskv implements a diskv-backed inventory subsystem storage backend. +package diskv + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" + "github.com/peterbourgon/diskv/v3" +) + +// Diskv is an on-disk enrollment inventory data store. +type Diskv struct { + diskv *diskv.Diskv +} + +// New creates a new initialized inventory data store. +func New(path string) *Diskv { + flatTransform := func(s string) []string { return []string{} } + return &Diskv{ + diskv: diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "inventory"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }), + } +} + +// RetrieveInventory retrieves the inventory data for enrollment IDs. +func (s *Diskv) RetrieveInventory(ctx context.Context, opt *storage.SearchOptions) (map[string]storage.Values, error) { + ret := make(map[string]storage.Values) + for _, id := range opt.IDs { + if !s.diskv.Has(id) { + continue + } + raw, err := s.diskv.Read(id) + if err != nil { + return ret, fmt.Errorf("reading values for %s: %w", id, err) + } + var vals storage.Values + if err = json.Unmarshal(raw, &vals); err != nil { + return ret, fmt.Errorf("unmarshal values for %s: %w", id, err) + } + ret[id] = vals + } + return ret, nil +} + +// StoreInventoryValues stores inventory data about the specified ID. +func (s *Diskv) StoreInventoryValues(ctx context.Context, id string, values storage.Values) error { + var err error + var raw []byte + var vals storage.Values + if s.diskv.Has(id) { + // this is likely race-prone as we perform a read-process-write on the same key. + if raw, err = s.diskv.Read(id); err != nil { + return fmt.Errorf("reading values: %w", err) + } + if len(raw) > 0 { + if err = json.Unmarshal(raw, &vals); err != nil { + return fmt.Errorf("unmarshal values: %w", err) + } + if vals != nil { + for k := range values { + vals[k] = values[k] + } + } + } + } + if vals == nil { + vals = values + } + if raw, err = json.Marshal(vals); err != nil { + return fmt.Errorf("marshal values: %w", err) + } + if err = s.diskv.Write(id, raw); err != nil { + return fmt.Errorf("write values: %w", err) + } + return nil +} + +// DeleteInventory deletes all inventory data for an enrollment ID. +func (s *Diskv) DeleteInventory(ctx context.Context, id string) error { + return s.diskv.Erase(id) +} diff --git a/subsystem/inventory/storage/diskv/diskv_test.go b/subsystem/inventory/storage/diskv/diskv_test.go new file mode 100644 index 0000000..263b480 --- /dev/null +++ b/subsystem/inventory/storage/diskv/diskv_test.go @@ -0,0 +1,14 @@ +package diskv + +import ( + "os" + "testing" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" + "github.com/micromdm/nanocmd/subsystem/inventory/storage/test" +) + +func TestDiskv(t *testing.T) { + test.TestStorage(t, func() storage.Storage { return New("teststor") }) + os.RemoveAll("teststor") +} diff --git a/subsystem/inventory/storage/inmem/inmem.go b/subsystem/inventory/storage/inmem/inmem.go new file mode 100644 index 0000000..605682e --- /dev/null +++ b/subsystem/inventory/storage/inmem/inmem.go @@ -0,0 +1,61 @@ +// Package inmem implements an in-memory inventory subsystem storage backend. +package inmem + +import ( + "context" + "sync" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" +) + +// InMem represents the in-memory enrollment inventory data store. +type InMem struct { + mu sync.RWMutex + inv map[string]storage.Values +} + +// New creates a new initialized inventory data store. +func New() *InMem { + return &InMem{inv: make(map[string]storage.Values)} +} + +// RetrieveInventory retrieves the inventory data for enrollment IDs. +func (s *InMem) RetrieveInventory(ctx context.Context, opt *storage.SearchOptions) (map[string]storage.Values, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if opt == nil || len(opt.IDs) <= 0 { + return nil, nil + } + ret := make(map[string]storage.Values) + for _, id := range opt.IDs { + if vals, ok := s.inv[id]; ok { + ret[id] = make(storage.Values) + for k, v := range vals { + ret[id][k] = v + } + } + } + return ret, nil +} + +// StoreInventoryValues stores inventory data about the specified ID. +func (s *InMem) StoreInventoryValues(ctx context.Context, id string, values storage.Values) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.inv[id] == nil { + s.inv[id] = values + } else { + for k, v := range values { + s.inv[id][k] = v + } + } + return nil +} + +// DeleteInventory deletes all inventory data for an enrollment ID. +func (s *InMem) DeleteInventory(ctx context.Context, id string) error { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.inv, id) + return nil +} diff --git a/subsystem/inventory/storage/inmem/inmem_test.go b/subsystem/inventory/storage/inmem/inmem_test.go new file mode 100644 index 0000000..f526cee --- /dev/null +++ b/subsystem/inventory/storage/inmem/inmem_test.go @@ -0,0 +1,12 @@ +package inmem + +import ( + "testing" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" + "github.com/micromdm/nanocmd/subsystem/inventory/storage/test" +) + +func TestInMem(t *testing.T) { + test.TestStorage(t, func() storage.Storage { return New() }) +} diff --git a/subsystem/inventory/storage/keys.go b/subsystem/inventory/storage/keys.go new file mode 100644 index 0000000..129f1f6 --- /dev/null +++ b/subsystem/inventory/storage/keys.go @@ -0,0 +1,21 @@ +package storage + +const ( + KeySerialNumber = "serial_number" // string + KeyModel = "model" // string + KeyModelName = "model_name" // string + KeyDeviceName = "device_name" // string + KeyBuildVersion = "build_version" // string + KeyOSVersion = "os_version" // string + KeyEthernetMAC = "ethernet_mac" // string + KeySIPEnabled = "sip_enabled" // bool + KeyFDEEnabled = "fde_enabled" // bool + KeyPRK = "prk" // string + KeySupervised = "supervised" // bool + KeyLastSource = "last_source" // string + KeyModified = "modified" // time.Time + KeyHasBattery = "has_battery" // bool + KeyIsMultiUser = "is_multiuser" // bool + KeySupportsLOM = "supports_lom" // bool + KeyAppleSilicon = "apple_silicon" // bool +) diff --git a/subsystem/inventory/storage/storage.go b/subsystem/inventory/storage/storage.go new file mode 100644 index 0000000..211b1be --- /dev/null +++ b/subsystem/inventory/storage/storage.go @@ -0,0 +1,25 @@ +// Package storage defines types and interfaces to support the inventory subsystem. +package storage + +import ( + "context" +) + +// SearchOptions is a basic query for inventory of enrollment IDs. +type SearchOptions struct { + IDs []string // slice of enrollment IDs to query against +} + +// Values maps inventory storage keys to values. +type Values map[string]interface{} + +type ReadStorage interface { + // RetrieveInventory queries and returns the inventory values by mapped by enrollment ID. + RetrieveInventory(ctx context.Context, opt *SearchOptions) (map[string]Values, error) +} + +type Storage interface { + ReadStorage + StoreInventoryValues(ctx context.Context, id string, values Values) error + DeleteInventory(ctx context.Context, id string) error +} diff --git a/subsystem/inventory/storage/test/test.go b/subsystem/inventory/storage/test/test.go new file mode 100644 index 0000000..053a5da --- /dev/null +++ b/subsystem/inventory/storage/test/test.go @@ -0,0 +1,62 @@ +package test + +import ( + "context" + "testing" + + "github.com/micromdm/nanocmd/subsystem/inventory/storage" +) + +func TestStorage(t *testing.T, newStorage func() storage.Storage) { + s := newStorage() + ctx := context.Background() + + id := "AA11BB22" + + updValues := storage.Values{"a": "hi"} + + err := s.StoreInventoryValues(ctx, id, updValues) + if err != nil { + t.Error(err) + } + + q := &storage.SearchOptions{IDs: []string{id}} + idVals, err := s.RetrieveInventory(ctx, q) + if err != nil { + t.Error(err) + } + + vals, ok := idVals[id] + if !ok { + t.Error("expected id in id values map") + } + + testVal, ok := vals["a"] + if !ok { + t.Error("expected map key exists") + } else { + testValString, ok := testVal.(string) + if !ok { + t.Error("test value incorrect") + } + + if have, want := testValString, updValues["a"]; have != want { + t.Errorf("want: %v, have: %v", want, have) + } + } + + err = s.DeleteInventory(ctx, id) + if err != nil { + t.Fatal(err) + } + + idVals, err = s.RetrieveInventory(ctx, q) + if err != nil { + t.Error(err) + } + + _, ok = idVals[id] + if ok { + t.Error("expected id to missing in id values map") + } +} diff --git a/subsystem/profile/http/http.go b/subsystem/profile/http/http.go new file mode 100644 index 0000000..125a316 --- /dev/null +++ b/subsystem/profile/http/http.go @@ -0,0 +1,142 @@ +// Package http provides HTTP handlers for the Profile subsystem. +package http + +import ( + "encoding/json" + "errors" + "io" + "net/http" + + "github.com/alexedwards/flow" + "github.com/micromdm/nanocmd/http/api" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/utils/mobileconfig" +) + +// GetProfilesHandler returns an HTTP handler that returns profile metadata for all profile names. +func GetProfilesHandler(store storage.ReadStorage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + profiles, err := store.RetrieveProfileInfos(r.Context(), r.URL.Query()["name"]) + if err != nil { + logger.Info(logkeys.Message, "retrieve profiles", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + logger.Debug(logkeys.Message, "retrieve profiles", "length", len(profiles)) + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(profiles) + if err != nil { + logger.Info(logkeys.Message, "encoding json", logkeys.Error, err) + return + } + } +} + +var ( + ErrEmptyName = errors.New("empty name") + ErrNoSuchName = errors.New("no such name") + ErrEmptyBody = errors.New("empty body") +) + +// DeleteProfileHandler returns an HTTP handler that deletes a named profile. +func DeleteProfileHandler(store storage.Storage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "name check", logkeys.Error, ErrEmptyName) + api.JSONError(w, ErrEmptyName, http.StatusBadRequest) + return + } + logger = logger.With("name", name) + err := store.DeleteProfile(r.Context(), name) + if err != nil { + logger.Info(logkeys.Message, "delete profile", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + w.WriteHeader(http.StatusNoContent) + } +} + +// GetProfileHandler returns an HTTP handler that returns a named raw profile. +func GetProfileHandler(store storage.ReadStorage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "name check", logkeys.Error, ErrEmptyName) + api.JSONError(w, ErrEmptyName, http.StatusBadRequest) + return + } + logger = logger.With("name", name) + profiles, err := store.RetrieveRawProfiles(r.Context(), []string{name}) + if err != nil { + logger.Info(logkeys.Message, "retrieve profile", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + raw, ok := profiles[name] + if !ok { + // shouldn't actually happen, but be cautious just in case + logger.Info(logkeys.Message, "access retrieved profile", logkeys.Error, ErrNoSuchName) + api.JSONError(w, ErrNoSuchName, 0) + + return + } + w.Header().Set("Content-Type", "application/x-apple-aspen-config") + w.Write(raw) + } +} + +// StoreProfileHandler returns an HTTP handler that uploads a named raw profile. +func StoreProfileHandler(store storage.Storage, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + logger := ctxlog.Logger(r.Context(), logger) + name := flow.Param(r.Context(), "name") + if name == "" { + logger.Info(logkeys.Message, "name check", logkeys.Error, ErrEmptyName) + api.JSONError(w, ErrEmptyName, http.StatusBadRequest) + return + } + logger = logger.With("name", name) + raw, err := io.ReadAll(r.Body) + if err != nil { + logger.Info(logkeys.Message, "reading body", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + if len(raw) < 1 { + logger.Info(logkeys.Message, "body check", logkeys.Error, ErrEmptyBody) + api.JSONError(w, ErrEmptyBody, http.StatusBadRequest) + return + } + mc := mobileconfig.Mobileconfig(raw) + payload, _, err := mc.Parse() + if err != nil { + logger.Info(logkeys.Message, "parsing mobileconfig", logkeys.Error, err) + api.JSONError(w, err, http.StatusBadRequest) + return + } + info := storage.ProfileInfo{ + Identifier: payload.PayloadIdentifier, + UUID: payload.PayloadUUID, + } + err = store.StoreProfile(r.Context(), name, info, raw) + if err != nil { + logger.Info(logkeys.Message, "store profile", logkeys.Error, err) + api.JSONError(w, err, 0) + return + } + logger.Debug( + logkeys.Message, "store profile", + "identifier", info.Identifier, + "uuid", info.UUID, + ) + w.WriteHeader(http.StatusNoContent) + } +} diff --git a/subsystem/profile/storage/diskv/diskv.go b/subsystem/profile/storage/diskv/diskv.go new file mode 100644 index 0000000..bc3301d --- /dev/null +++ b/subsystem/profile/storage/diskv/diskv.go @@ -0,0 +1,107 @@ +// Package inmem implements a storage backend for the Profile subsystem backed by diskv. +package diskv + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/peterbourgon/diskv/v3" +) + +// Diskv is a storage backend for the Profile subsystem backed by diskv. +type Diskv struct { + diskv *diskv.Diskv +} + +// New creates a new initialized profile data store. +func New(path string) *Diskv { + flatTransform := func(s string) []string { return []string{} } + return &Diskv{ + diskv: diskv.New(diskv.Options{ + BasePath: filepath.Join(path, "profile"), + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }), + } +} + +// RetrieveProfileInfos implements the storage interface. +func (s *Diskv) RetrieveProfileInfos(ctx context.Context, names []string) (map[string]storage.ProfileInfo, error) { + if len(names) < 1 { + for name := range s.diskv.Keys(nil) { + if strings.HasSuffix(name, ".identifier") { + names = append(names, name[:len(name)-11]) + } + } + } + ret := make(map[string]storage.ProfileInfo) + for _, name := range names { + if !s.diskv.Has(name + ".identifier") { + return ret, fmt.Errorf("profile not found for %s: %w", name, storage.ErrProfileNotFound) + } + idBytes, err := s.diskv.Read(name + ".identifier") + if err != nil { + return ret, fmt.Errorf("reading identifier for %s: %w", name, err) + } + uuidBytes, err := s.diskv.Read(name + ".uuid") + if err != nil { + return ret, fmt.Errorf("reading uuid for %s: %w", name, err) + } + ret[name] = storage.ProfileInfo{ + Identifier: string(idBytes), + UUID: string(uuidBytes), + } + } + return ret, nil +} + +// RetrieveRawProfiles implements the storage interface. +func (s *Diskv) RetrieveRawProfiles(ctx context.Context, names []string) (map[string][]byte, error) { + if len(names) < 1 { + return nil, storage.ErrNoNames + } + ret := make(map[string][]byte) + for _, name := range names { + if !s.diskv.Has(name + ".raw") { + continue + } + var err error + if ret[name], err = s.diskv.Read(name + ".raw"); err != nil { + return ret, fmt.Errorf("reading raw for %s: %w", name, err) + } + } + return ret, nil +} + +// StoreProfile implements the storage interface. +func (s *Diskv) StoreProfile(ctx context.Context, name string, info storage.ProfileInfo, raw []byte) error { + err := s.diskv.Write(name+".raw", raw) + if err != nil { + return fmt.Errorf("writing raw: %w", err) + } + if err = s.diskv.Write(name+".identifier", []byte(info.Identifier)); err != nil { + return fmt.Errorf("writing identifier: %w", err) + } + if err = s.diskv.Write(name+".uuid", []byte(info.UUID)); err != nil { + return fmt.Errorf("writing uuid: %w", err) + } + return nil +} + +// DeleteProfile implements the storage interface. +func (s *Diskv) DeleteProfile(ctx context.Context, name string) error { + err := s.diskv.Erase(name + ".identifier") + if err != nil { + return fmt.Errorf("delete identifier for %s: %w", name, err) + } + if err := s.diskv.Erase(name + ".uuid"); err != nil { + return fmt.Errorf("delete uuid for %s: %w", name, err) + } + if err := s.diskv.Erase(name + ".raw"); err != nil { + return fmt.Errorf("delete raw for %s: %w", name, err) + } + return nil +} diff --git a/subsystem/profile/storage/diskv/diskv_test.go b/subsystem/profile/storage/diskv/diskv_test.go new file mode 100644 index 0000000..13e06c3 --- /dev/null +++ b/subsystem/profile/storage/diskv/diskv_test.go @@ -0,0 +1,14 @@ +package diskv + +import ( + "os" + "testing" + + "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/subsystem/profile/storage/test" +) + +func TestDiskv(t *testing.T) { + test.TestProfileStorage(t, func() storage.Storage { return New("teststor") }) + os.RemoveAll("teststor") +} diff --git a/subsystem/profile/storage/inmem/inmem.go b/subsystem/profile/storage/inmem/inmem.go new file mode 100644 index 0000000..a79bf5b --- /dev/null +++ b/subsystem/profile/storage/inmem/inmem.go @@ -0,0 +1,84 @@ +// Package inmem implements an in-memory storage backend for the Profile subsystem. +package inmem + +import ( + "context" + "fmt" + "sync" + + "github.com/micromdm/nanocmd/subsystem/profile/storage" +) + +type profile struct { + info storage.ProfileInfo + raw []byte +} + +// InMem is an in-memory storage backend for the Profile subsystem. +type InMem struct { + m sync.RWMutex + p map[string]profile +} + +func New() *InMem { + return &InMem{p: make(map[string]profile)} +} + +// RetrieveProfileInfos implements the storage interface. +func (s *InMem) RetrieveProfileInfos(ctx context.Context, names []string) (map[string]storage.ProfileInfo, error) { + s.m.RLock() + defer s.m.RUnlock() + if len(names) < 1 { + names = make([]string, 0, len(s.p)) + for key := range s.p { + names = append(names, key) + } + } + ret := make(map[string]storage.ProfileInfo) + for _, name := range names { + profile, ok := s.p[name] + if !ok { + return ret, fmt.Errorf("%w: %s", storage.ErrProfileNotFound, name) + } + ret[name] = profile.info + } + return ret, nil +} + +// RetrieveRawProfiles implements the storage interface. +func (s *InMem) RetrieveRawProfiles(ctx context.Context, names []string) (map[string][]byte, error) { + if len(names) < 1 { + return nil, storage.ErrNoNames + } + s.m.RLock() + defer s.m.RUnlock() + ret := make(map[string][]byte) + for _, name := range names { + profile, ok := s.p[name] + if !ok { + return ret, fmt.Errorf("%w: %s", storage.ErrProfileNotFound, name) + } + ret[name] = profile.raw + } + return ret, nil +} + +// StoreProfile implements the storage interface. +func (s *InMem) StoreProfile(ctx context.Context, name string, info storage.ProfileInfo, raw []byte) error { + s.m.Lock() + defer s.m.Unlock() + s.p[name] = profile{info: info, raw: raw} + return nil +} + +// DeleteProfile implements the storage interface. +func (s *InMem) DeleteProfile(ctx context.Context, name string) error { + s.m.Lock() + defer s.m.Unlock() + _, ok := s.p[name] + if !ok { + return storage.ErrProfileNotFound + } + delete(s.p, name) + return nil +} diff --git a/subsystem/profile/storage/inmem/inmem_test.go b/subsystem/profile/storage/inmem/inmem_test.go new file mode 100644 index 0000000..7286485 --- /dev/null +++ b/subsystem/profile/storage/inmem/inmem_test.go @@ -0,0 +1,12 @@ +package inmem + +import ( + "testing" + + "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/subsystem/profile/storage/test" +) + +func TestInMem(t *testing.T) { + test.TestProfileStorage(t, func() storage.Storage { return New() }) +} diff --git a/subsystem/profile/storage/storage.go b/subsystem/profile/storage/storage.go new file mode 100644 index 0000000..d21d5c6 --- /dev/null +++ b/subsystem/profile/storage/storage.go @@ -0,0 +1,55 @@ +// Package storage defines types and methods for a profile storage backend. +package storage + +import ( + "context" + "errors" +) + +var ( + ErrProfileNotFound = errors.New("profile not found") + ErrNoNames = errors.New("no profile names supplied") +) + +// ProfileInfo is metadata about an Apple Configuration profile. +// It is meant to be taken/parsed directly from an actual profile. +// See https://developer.apple.com/documentation/devicemanagement/toplevel +type ProfileInfo struct { + Identifier string `json:"identifier"` // top-level PayloadIdentifier of the profile. + UUID string `json:"uuid"` // top-level PayloadUUID of the profile. +} + +// Valid checks the validity of the profile metadata. +func (p *ProfileInfo) Valid() bool { + if p == nil || p.Identifier == "" || p.UUID == "" { + return false + } + return true +} + +type ReadStorage interface { + // RetrieveProfileInfos returns the profile metadata by name. + // Implementations have the choice to return all profile metadata if + // no names were provided or not. ErrProfileNotFound is returned for + // a name that hasn't been stored. + RetrieveProfileInfos(ctx context.Context, names []string) (map[string]ProfileInfo, error) + + // RetrieveRawProfiles returns the raw profile bytes by name. + // Implementations should not return all profiles if no names were provided. + // ErrProfileNotFound is returned for a name that hasn't been stored. + // ErrNoNames is returned if names is empty. + RetrieveRawProfiles(ctx context.Context, names []string) (map[string][]byte, error) +} + +type Storage interface { + ReadStorage + + // StoreProfile stores a raw profile and associated info in the profile storage by name. + // It is up to the caller to make sure info is correctly populated + // and matches the raw profile bytes. + StoreProfile(ctx context.Context, name string, info ProfileInfo, raw []byte) error + + // DeleteProfile deletes a profile from profile storage. + // ErrProfileNotFound is returned for a name that hasn't been stored. + DeleteProfile(ctx context.Context, name string) error +} diff --git a/subsystem/profile/storage/storage_test.go b/subsystem/profile/storage/storage_test.go new file mode 100644 index 0000000..c9a2bc0 --- /dev/null +++ b/subsystem/profile/storage/storage_test.go @@ -0,0 +1,24 @@ +package storage + +import "testing" + +func TestProfileInfoValid(t *testing.T) { + tests := []struct { + name string + profile *ProfileInfo + expected bool + }{ + {"valid profile", &ProfileInfo{Identifier: "com.example.profile", UUID: "01FEBD58-42B6-4167-BF37-95E14D8F2D26"}, true}, + {"empty Identifier", &ProfileInfo{Identifier: "", UUID: "01FEBD58-42B6-4167-BF37-95E14D8F2D26"}, false}, + {"empty UUID", &ProfileInfo{Identifier: "com.example.profile", UUID: ""}, false}, + {"nil profile", nil, false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if valid := test.profile.Valid(); valid != test.expected { + t.Errorf("Expected profile validity to be %v, but got %v", test.expected, valid) + } + }) + } +} diff --git a/subsystem/profile/storage/test/test.go b/subsystem/profile/storage/test/test.go new file mode 100644 index 0000000..a602c32 --- /dev/null +++ b/subsystem/profile/storage/test/test.go @@ -0,0 +1,86 @@ +package test + +import ( + "bytes" + "context" + "errors" + "reflect" + "testing" + + "github.com/micromdm/nanocmd/subsystem/profile/storage" +) + +func TestProfileStorage(t *testing.T, newStorage func() storage.Storage) { + s := newStorage() + ctx := context.Background() + + info := storage.ProfileInfo{Identifier: "com.test", UUID: "01AB"} + raw := []byte("23CD") + + err := s.StoreProfile(ctx, "test", info, raw) + if err != nil { + t.Fatal(err) + } + + infos, err := s.RetrieveProfileInfos(ctx, []string{"test"}) + if err != nil { + t.Fatal(err) + } + + info2, ok := infos["test"] + if !ok { + t.Error("key not found after retrieval") + } + + if !reflect.DeepEqual(info, info2) { + t.Error("info not equal") + } + + // test with no names (should return all) + infos, err = s.RetrieveProfileInfos(ctx, nil) + if err != nil { + t.Fatal(err) + } + + info2, ok = infos["test"] + if !ok { + t.Error("key not found after retrieval (retrieving all keys)") + } + + if !reflect.DeepEqual(info, info2) { + t.Error("info not equal") + } + + raws, err := s.RetrieveRawProfiles(ctx, []string{"test"}) + if err != nil { + t.Fatal(err) + } + + raw2, ok := raws["test"] + if !ok { + t.Error("key not found after retrieval") + } + + if !bytes.Equal(raw, raw2) { + t.Error("raw not equal") + } + + raws, err = s.RetrieveRawProfiles(ctx, []string{}) + if len(raws) > 0 { + t.Error("should not return any profiles when using no names") + } + if !errors.Is(err, storage.ErrNoNames) { + t.Fatal("expected ErrNoNames") + } + + err = s.DeleteProfile(ctx, "test") + if err != nil { + t.Fatal(err) + } + + _, err = s.RetrieveProfileInfos(ctx, []string{"test"}) + if !errors.Is(err, storage.ErrProfileNotFound) { + t.Fatal("expected ErrProfileNotFound") + } + +} diff --git a/utils/cryptoutil/cert.go b/utils/cryptoutil/cert.go new file mode 100644 index 0000000..2b66106 --- /dev/null +++ b/utils/cryptoutil/cert.go @@ -0,0 +1,39 @@ +package cryptoutil + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// SelfSignedRSAKeypair generates a 2048-bit RSA private key and self-signs an +// X.509 certificate using it. You can set the Common Name in cn and the +// validity duration with days. +func SelfSignedRSAKeypair(cn string, days int) (*rsa.PrivateKey, *x509.Certificate, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + timeNow := time.Now() + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: timeNow.Add(time.Minute * -10), + NotAfter: timeNow.Add(time.Duration(days) * 24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + } + if cn != "" { + template.Subject = pkix.Name{CommonName: cn} + } + certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) + if err != nil { + return nil, nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, nil, err + } + return key, cert, err +} diff --git a/utils/kv/kv.go b/utils/kv/kv.go new file mode 100644 index 0000000..5dd857a --- /dev/null +++ b/utils/kv/kv.go @@ -0,0 +1,45 @@ +// Package kv defines an interface for key-value store. +package kv + +import ( + "context" + "fmt" +) + +// Bucket defines basic CRUD operations for key-value pairs in a single "namespace." +type Bucket interface { + Get(ctx context.Context, k string) (v []byte, err error) + Set(ctx context.Context, k string, v []byte) error + Has(ctx context.Context, k string) (found bool, err error) + Delete(ctx context.Context, k string) error +} + +// TraversingBucket allows us to get a list of the keys in the bucket as well. +type TraversingBucket interface { + Bucket + // Keys returns the unordered keys in the bucket + Keys(cancel <-chan struct{}) <-chan string +} + +// SetMap iterates over m to set the keys in b and returns any error. +func SetMap(ctx context.Context, b Bucket, m map[string][]byte) error { + var err error + for k, v := range m { + if err = b.Set(ctx, k, v); err != nil { + return fmt.Errorf("setting %s: %w", k, err) + } + } + return nil +} + +// SetMap iterates over keys to get the values in b and returns any error. +func GetMap(ctx context.Context, b Bucket, keys []string) (map[string][]byte, error) { + var err error + ret := make(map[string][]byte) + for _, k := range keys { + if ret[k], err = b.Get(ctx, k); err != nil { + return ret, fmt.Errorf("getting %s: %w", k, err) + } + } + return ret, nil +} diff --git a/utils/kv/kvdiskv/kvdiskv.go b/utils/kv/kvdiskv/kvdiskv.go new file mode 100644 index 0000000..8ae1dbd --- /dev/null +++ b/utils/kv/kvdiskv/kvdiskv.go @@ -0,0 +1,37 @@ +// Package kvdiskv wraps diskv to a standard interface for a key-value store. +package kvdiskv + +import ( + "context" + + "github.com/peterbourgon/diskv/v3" +) + +// KVDiskv wraps a diskv object to implement an on-disk key-value store. +type KVDiskv struct { + diskv *diskv.Diskv +} + +func NewBucket(dv *diskv.Diskv) *KVDiskv { + return &KVDiskv{diskv: dv} +} + +func (s *KVDiskv) Get(_ context.Context, k string) ([]byte, error) { + return s.diskv.Read(k) +} + +func (s *KVDiskv) Set(_ context.Context, k string, v []byte) error { + return s.diskv.Write(k, v) +} + +func (s *KVDiskv) Has(_ context.Context, k string) (bool, error) { + return s.diskv.Has(k), nil +} + +func (s *KVDiskv) Delete(_ context.Context, k string) error { + return s.diskv.Erase(k) +} + +func (s *KVDiskv) Keys(cancel <-chan struct{}) <-chan string { + return s.diskv.Keys(cancel) +} diff --git a/utils/kv/kvmap/kvmap.go b/utils/kv/kvmap/kvmap.go new file mode 100644 index 0000000..f33590e --- /dev/null +++ b/utils/kv/kvmap/kvmap.go @@ -0,0 +1,70 @@ +// Package kvmap implements an in-memory key-value store backed by a Go map. +package kvmap + +import ( + "context" + "fmt" + "sync" +) + +// KVMap is an in-memory key-value store backed by a Go map. +type KVMap struct { + mu sync.RWMutex + m map[string][]byte +} + +func NewBucket() *KVMap { + return &KVMap{m: make(map[string][]byte)} +} + +func (s *KVMap) Get(_ context.Context, k string) ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.m[k] + if !ok { + return nil, fmt.Errorf("key not found: %s", k) + } + return v, nil +} + +func (s *KVMap) Set(_ context.Context, k string, v []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + s.m[k] = v + return nil +} + +func (s *KVMap) Has(_ context.Context, k string) (bool, error) { + s.mu.RLock() + defer s.mu.RUnlock() + _, ok := s.m[k] + return ok, nil +} + +func (s *KVMap) Delete(_ context.Context, k string) error { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.m, k) + return nil +} + +// Keys returns the keys in this bucket. +// Note that this function spawns a go routine that keeps a read lock on +// the internal map. This means that if you attempt to write to the map +// while you're, say, iterating over a keys list you will likely deadlock. +func (s *KVMap) Keys(cancel <-chan struct{}) <-chan string { + r := make(chan string) + go func() { + s.mu.RLock() + defer s.mu.RUnlock() + defer close(r) + for k := range s.m { + select { + case <-cancel: + return + case r <- k: + } + } + }() + return r +} diff --git a/utils/mobileconfig/mobileconfig.go b/utils/mobileconfig/mobileconfig.go new file mode 100644 index 0000000..3697d81 --- /dev/null +++ b/utils/mobileconfig/mobileconfig.go @@ -0,0 +1,72 @@ +// Package mobileconfig parses Apple Configuration profiles for basic information. +package mobileconfig + +import ( + "bytes" + "errors" + "fmt" + + "github.com/groob/plist" + "go.mozilla.org/pkcs7" +) + +// Payload is some of the "top-level" configuration profile information. +// See https://developer.apple.com/documentation/devicemanagement/toplevel +type Payload struct { + PayloadDescription string `plist:",omitempty"` + PayloadDisplayName string `plist:",omitempty"` + PayloadIdentifier string + PayloadOrganization string `plist:",omitempty"` + PayloadUUID string + PayloadType string + PayloadVersion int +} + +var ErrInvalidPayload = errors.New("invalid payload") + +// Validate tests a Payload against basic validity of required fields. +func (p *Payload) Validate() error { + if p == nil { + return fmt.Errorf("%w: empty payload", ErrInvalidPayload) + } + if p.PayloadIdentifier == "" { + return fmt.Errorf("%w: PayloadIdentifier is empty", ErrInvalidPayload) + } + if p.PayloadUUID == "" { + return fmt.Errorf("%w: PayloadUUID is empty", ErrInvalidPayload) + } + if p.PayloadType == "" { + return fmt.Errorf("%w: PayloadType is empty", ErrInvalidPayload) + } + if p.PayloadVersion != 1 { + return fmt.Errorf("%w: PayloadVersion is not 1", ErrInvalidPayload) + } + return nil +} + +type Mobileconfig []byte + +// Parse parses an Apple Configuration Profile to extract profile information. +// Profile signed status is also returned. +func (mc Mobileconfig) Parse() (*Payload, bool, error) { + signed := false + if !bytes.HasPrefix(mc, []byte(" + + + + PayloadContent + + + DefaultBrowserSettingEnabled + + EnableMediaRouter + + PayloadDisplayName + Google Chrome + PayloadIdentifier + com.google.Chrome.DB0791F4-AF79-4DCF-8A74-32A9169BEC8B + PayloadType + com.google.Chrome + PayloadUUID + BCD365EE-8534-4A52-B73A-53A2F9612A7B + PayloadVersion + 1 + + + DefaultBrowserPromptingState2 + 1 + DefaultBrowserPromptingState3 + 2 + PayloadDisplayName + Safari + PayloadIdentifier + com.apple.Safari.D93BD016-56DB-4F71-8F99-62D109AC1901 + PayloadType + com.apple.Safari + PayloadUUID + 822A0441-20B8-4C6C-A2C2-10C88332ABE2 + PayloadVersion + 1 + + + PayloadDisplayName + Google Chrome Default Browser + PayloadIdentifier + 0DA6B871-623D-400A-B0EB-3BE489E39F2A + PayloadOrganization + Org + PayloadType + Configuration + PayloadUUID + D0CCE647-B1D6-49B0-82BC-C1BCC8A33218 + PayloadVersion + 1 + + diff --git a/utils/uuid/uuid.go b/utils/uuid/uuid.go new file mode 100644 index 0000000..d373c1f --- /dev/null +++ b/utils/uuid/uuid.go @@ -0,0 +1,41 @@ +// Package uuid provides UUID generation and test utilities. +package uuid + +import "github.com/google/uuid" + +// IDers generate identifiers. +type IDer interface { + ID() string +} + +// UUID is an ID generator utilizing a UUID. +type UUID struct{} + +// NewUUID creates a new UUID ID generator. +func NewUUID() *UUID { + return &UUID{} +} + +// ID generates a new UUID ID. +func (u *UUID) ID() string { + return uuid.NewString() +} + +// StaticID is an ID generator thats cycles through provided IDs. +type StaticIDs struct { + ids []string + i int +} + +// NewStaticID creates a new static ID generator. +func NewStaticIDs(ids ...string) *StaticIDs { + return &StaticIDs{ids: ids} +} + +// ID returns the next ID. +// It will continually cycle through the IDs. +func (s *StaticIDs) ID() string { + id := s.ids[s.i%len(s.ids)] + s.i++ + return id +} diff --git a/utils/uuid/uuid_test.go b/utils/uuid/uuid_test.go new file mode 100644 index 0000000..b3fa07f --- /dev/null +++ b/utils/uuid/uuid_test.go @@ -0,0 +1,21 @@ +package uuid + +import ( + "testing" +) + +func TestUUIDUnique(t *testing.T) { + u := NewUUID() + if u.ID() == u.ID() { + t.Error("UUIDs are not unique") + } +} + +func TestStaticIDs(t *testing.T) { + u := NewStaticIDs("A", "B") + for _, expected := range []string{"A", "B", "A", "B", "A"} { + if have, want := u.ID(), expected; have != want { + t.Errorf("unexpected ID: have: %v, want: %v", have, want) + } + } +} diff --git a/workflow/cmdplan/workflow.go b/workflow/cmdplan/workflow.go new file mode 100644 index 0000000..407492d --- /dev/null +++ b/workflow/cmdplan/workflow.go @@ -0,0 +1,222 @@ +// Package cmdplan implements a NanoCMD Workflow for sending pre-configured commands to enrollments. +package cmdplan + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/mdm" + "github.com/micromdm/nanocmd/subsystem/cmdplan/storage" + profstorage "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +const WorkflowName = "io.micromdm.wf.cmdplan.v1" + +type Workflow struct { + enq workflow.StepEnqueuer + ider uuid.IDer + logger log.Logger + store storage.ReadStorage + profStore profstorage.ReadStorage +} + +type Option func(*Workflow) + +func WithLogger(logger log.Logger) Option { + return func(w *Workflow) { + w.logger = logger + } +} + +func New(enq workflow.StepEnqueuer, store storage.ReadStorage, profStorage profstorage.ReadStorage, opts ...Option) (*Workflow, error) { + w := &Workflow{ + enq: enq, + ider: uuid.NewUUID(), + logger: log.NopLogger, + store: store, + profStore: profStorage, + } + for _, opt := range opts { + opt(w) + } + w.logger = w.logger.With(logkeys.WorkflowName, w.Name()) + return w, nil +} + +func (w *Workflow) Name() string { + return WorkflowName +} + +func (w *Workflow) Config() *workflow.Config { + return nil +} + +func (w *Workflow) NewContextValue(name string) workflow.ContextMarshaler { + return new(workflow.StringContext) +} + +// TODO: create a map of command UUID to more useful types for logging +func (w *Workflow) commandsFromCMDPlan(ctx context.Context, cmdPlan *storage.CMDPlan, name string, e *workflow.Event) ([]interface{}, error) { + // bail if invalid + if cmdPlan == nil { + return nil, errors.New("invalid cmdplan") + } + + var commands []interface{} + + if len(cmdPlan.ProfileNames) > 0 { + // get our raw profiles + rawProfiles, err := w.profStore.RetrieveRawProfiles(ctx, cmdPlan.ProfileNames) + if err != nil { + return nil, fmt.Errorf("retrieving profiles: %w", err) + } + + // build the profile MDM commands + for _, name := range cmdPlan.ProfileNames { + rawProfile, ok := rawProfiles[name] + if !ok { + return commands, fmt.Errorf("raw profile not found: %s", name) + } + + c := mdmcommands.NewInstallProfileCommand(w.ider.ID()) + c.Command.Payload = rawProfile + commands = append(commands, c) + } + } + + // build the install application MDM commands + for _, url := range cmdPlan.ManifestURLs { + c := mdmcommands.NewInstallApplicationCommand(w.ider.ID()) + mgmtFlag := 1 + c.Command.ManagementFlags = &mgmtFlag + c.Command.ManifestURL = &url + commands = append(commands, c) + } + + // determine if we need to send the device configured command + // TODO: this may require using a separate step if we can't guarantee + // ordered queueing of MDM commands in step enqueuings. + if cmdPlan.DeviceConfigured != nil && *cmdPlan.DeviceConfigured { + devConfErrMsg := "" + if e == nil { + devConfErrMsg = "empty event" + } else if e.EventFlag != workflow.EventEnrollment { + devConfErrMsg = fmt.Sprintf("event type mismatch: %s", e.EventFlag) + } else if tu, ok := e.EventData.(*mdm.TokenUpdate); !ok { + devConfErrMsg = "event data type mismatch" + } else if !tu.AwaitingConfiguration { + devConfErrMsg = "not awaiting configuration" + } + if devConfErrMsg == "" { + commands = append(commands, mdmcommands.NewDeviceConfiguredCommand(w.ider.ID())) + } else { + ctxlog.Logger(ctx, w.logger).Info( + logkeys.Message, "device configured", + "name", name, + logkeys.Error, devConfErrMsg, + ) + } + } + + return commands, nil +} + +// expandParams perform shell-like ${var} expansion on s and replaces values from p. +// An optional colon-separated "default" value can be provided as well. +func expandParams(s string, p map[string]string) string { + return os.Expand(s, func(v string) string { + vs := strings.SplitN(v, ":", 2) + var r string + var ok bool + if p != nil { + r, ok = p[vs[0]] + } + if !ok && len(vs) > 1 { + return vs[1] + } + return r + }) +} + +func (w *Workflow) Start(ctx context.Context, step *workflow.StepStart) error { + // convert context into a string + ctxVal, ok := step.Context.(*workflow.StringContext) + if !ok { + return workflow.ErrIncorrectContextType + } + + if *ctxVal == "" { + return errors.New("missing cmdplan name") + } + + // expand parameter values + name := expandParams(string(*ctxVal), step.Params) + + // fetch the cmdplan + cmdplan, err := w.store.RetrieveCMDPlan(ctx, name) + if err != nil { + return fmt.Errorf("retrieving cmdplan: %w", err) + } + + // gather commands from cmdplan + commands, err := w.commandsFromCMDPlan(ctx, cmdplan, name, step.Event) + if err != nil { + return fmt.Errorf("creating commands from cmdplan: %w", err) + } + if len(commands) < 1 { + return errors.New("no commands to queue") + } + + // assemble our StepEnqueuing + se := step.NewStepEnqueueing() + se.Commands = commands // assign all the commands to the step + + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) +} + +func (w *Workflow) StepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + // TODO: implement a logger and iter over the commands to see if there's any errors + // TODO2: implement a map struct so we can log even better errors (i.e. which specific profile, etc.) + logger := ctxlog.Logger(ctx, w.logger).With(logkeys.InstanceID, stepResult.InstanceID) + statuses := make(map[string]int) + for _, resp := range stepResult.CommandResults { + genResper, ok := resp.(mdmcommands.GenericResponser) + if !ok { + continue + } + genResp := genResper.GetGenericResponse() + statuses[genResp.Status] += 1 + // TODO: log the association from command UUID to command details in context struct + if err := genResp.Validate(); err != nil { + logger.Info( + logkeys.Message, "validate MDM response", + logkeys.CommandUUID, genResp.CommandUUID, + logkeys.Error, err, + ) + } + } + logs := []interface{}{logkeys.Message, "workflow complete"} + for k, v := range statuses { + logs = append(logs, "count_"+strings.ToLower(k), v) + } + logger.Debug(logs...) + return nil +} + +func (w *Workflow) StepTimeout(_ context.Context, _ *workflow.StepResult) error { + return workflow.ErrTimeoutNotUsed +} + +func (w *Workflow) Event(ctx context.Context, e *workflow.Event, id string, mdmCtx *workflow.MDMContext) error { + return workflow.ErrEventsNotSupported +} diff --git a/workflow/cmdplan/workflow_test.go b/workflow/cmdplan/workflow_test.go new file mode 100644 index 0000000..884ea0d --- /dev/null +++ b/workflow/cmdplan/workflow_test.go @@ -0,0 +1,24 @@ +package cmdplan + +import "testing" + +func TestExpandParam(t *testing.T) { + for _, test := range []struct { + input string + param map[string]string + expected string + }{ + {"a", nil, "a"}, + {"a${b}c", nil, "ac"}, + {"a${b:x}c", nil, "axc"}, + {"a${b}c", map[string]string{}, "ac"}, + {"a${b:x}c", map[string]string{}, "axc"}, + {"a${b:x}c", map[string]string{"b": "g"}, "agc"}, + {"a${b:x:1}c", map[string]string{}, "ax:1c"}, + } { + r := expandParams(test.input, test.param) + if have, want := r, test.expected; have != want { + t.Errorf("have: %v, want: %v", have, want) + } + } +} diff --git a/workflow/config.go b/workflow/config.go new file mode 100644 index 0000000..25dd64c --- /dev/null +++ b/workflow/config.go @@ -0,0 +1,47 @@ +package workflow + +import ( + "time" +) + +// Exclusivity is the exclusivity "mode" for a workflow. +type Exclusivity uint + +const ( + // Workflow can only run if no other pending step for this workflow + // for an enrollment id exists in the system. + // This is the default mode (0 value). + Exclusive Exclusivity = iota + + // Workflow can run simultaneous instances for an enrollment ID. + MultipleSimultaneous + + maxExclusivity +) + +func (we Exclusivity) Valid() bool { + return we < maxExclusivity +} + +// Config represents static workflow-wide configuration. +type Config struct { + // workflow default step timeout. + // if a workflow does not specify a timeout when enqueueing steps + // then this default is used. if this default is not sepcified then + // the engine's default timeout is used. + Timeout time.Duration + + // defines the workflow exclusivity style + Exclusivity + + // workflows have the option to receive command responses from + // any MDM command request type that the engine enqueues (i.e. from + // other workflows) — not just the commands that this workflow + // enqueues. specifiy the Request Types for those command here. They + // will be received by the workflow as an event. + AllCommandResponseRequestTypes []string + + // event subscriptions. this workflow will get called every time + // these events happen. use bitwise OR to specify multiple events. + Events EventFlag +} diff --git a/workflow/context.go b/workflow/context.go new file mode 100644 index 0000000..82f1306 --- /dev/null +++ b/workflow/context.go @@ -0,0 +1,100 @@ +package workflow + +import ( + "encoding" + "errors" + "strconv" +) + +// ContextMarshaler marshals and unmarshals types to and from byte slices. +// This encapsulates arbitrary context types to be passed around and +// stored as binary blobs by components (that are not a workflow ) that +// don't need to care about what the contents are (e.g. storage backends +// or HTTP handlers). +type ContextMarshaler interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +// MDMContext contains context related to the MDM server, enrollment, and/or MDM request. +type MDMContext struct { + // Params are the URL parameters included in the MDM request from an + // enrollment. These parameters would be set on the `CheckInURL` or + // `ServerURL` parameters in the enrollment profile. Note because + // these come from a connecting MDM client they may not be present + // in all contexts — only those that originate from an MDM request. + Params map[string]string +} + +// StepContext contains context for a step. +type StepContext struct { + // MDM client/server context. Note that a step can be more than one + // MDM command response. This means MDMContext will likely only + // come from the very last command to be seen that completed + // the step. Previous MDMConext will not be seen/provided. + MDMContext + + InstanceID string // Unique identifier of the workflow instance + + // Name is used by the workflow to identify which step is being processed. + // This value can help the workflow differentiate steps for a multi-step workflow. + // It is also passed to the NewContext() method to determine the data + // type for unmarshalling. Name is empty when starting a workflow. + Name string + + // Context is a generic holder of data that workflows will be handed when processing steps. + // Usually this will be an instance of whatever value the workflow + // NewContext() method returns for a given step name. + Context ContextMarshaler +} + +// NewForEnqueue is a helper for creating a new context from c. +// It copies the instance ID — ostensibly for creating a new Context for +// the next step enqueueing. +func (c *StepContext) NewForEnqueue() *StepContext { + return &StepContext{InstanceID: c.InstanceID} +} + +// StringContext is a simple string ContextMarshaler. +type StringContext string + +// MarshalBinary converts c into a byte slice. +func (c *StringContext) MarshalBinary() ([]byte, error) { + if c == nil { + return nil, errors.New("nil value") + } + return []byte(*c), nil +} + +// UnmarshalBinary converts and loads data into c. +func (c *StringContext) UnmarshalBinary(data []byte) error { + if c == nil { + return errors.New("nil value") + } + *c = StringContext(data) + return nil +} + +// IntContext is a simple integer ContextMarshaler. +type IntContext int + +// MarshalBinary converts c into a byte slice. +func (c *IntContext) MarshalBinary() ([]byte, error) { + if c == nil { + return nil, errors.New("nil value") + } + return []byte(strconv.Itoa(int(*c))), nil +} + +// UnmarshalBinary converts and loads data into c. +func (c *IntContext) UnmarshalBinary(data []byte) error { + if c == nil { + return errors.New("nil value") + } + i, err := strconv.Atoi(string(data)) + if err != nil { + return err + } + *c = IntContext(i) + return nil +} diff --git a/workflow/context_test.go b/workflow/context_test.go new file mode 100644 index 0000000..4a6f4d8 --- /dev/null +++ b/workflow/context_test.go @@ -0,0 +1,24 @@ +package workflow + +import "testing" + +func TestStringContext(t *testing.T) { + a := StringContext("test") + var cm ContextMarshaler = &a + + bin, err := cm.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + var b StringContext + cm = &b + err = cm.UnmarshalBinary(bin) + if err != nil { + t.Fatal(err) + } + + if want, have := a, b; want != have { + t.Errorf("want %q; have %q", want, have) + } +} diff --git a/workflow/doc.go b/workflow/doc.go new file mode 100644 index 0000000..49752d6 --- /dev/null +++ b/workflow/doc.go @@ -0,0 +1,79 @@ +/* +Package workflow defines workflow interfaces, types, and primitives. + +# Workflows + +Workflows are a concept that abstracts away some of the lower-level +tracking of Apple MDM "v1" command responses to better focus on +accomplishing higher-level end goals. + +To that end Workflows are, to the larger workflow engine, just MDM +command senders (enqueuers) and receivers for MDM command responses +(Result Reports). However the business of "routing" which MDM commands +are for which purpose including additional metadata is taken care of for +us in a so we can instead concentrate on the actully useful stuff (i.e. +the business logic) of MDM commands. + +The interfaces and types defined here are to facilitate that back-end +tracking of MDM commands (by i.e. Command UUID, workflow, and +associating any relevant context (if required) to the workflows so that +when the device responds to MDM command(s) we can restore that context +and "route" the response to the correct workflow for its higher-level +handling. + +Workflows are identified by names. By convention these are reverse-DNS +style and are indended to be unique amongst the workflow engine and be +human readable. The workflow names serve as the way to "route" workflow +actions to workflows. + +Newly started workflows are given an instance ID. This is just a unique +identifier for tracking or logging. The intent is to associate this ID +to a workflow that has been started and on which devices for logging or +other tracking. + +# Steps + +Workflows are facilitated by one or more steps. A step is a set of one +or more MDM commands. A newly started workflow enqueues (sends) a +step to one or more devices. A step is completed for an enrollment ID +when all commands in the step are received by a single device — whether +they have an error or not. `NotNow` handling is done for you: a workflow +will only receive a response for an `Acknowledge` or `Error` response to +an MDM command. The step can Timeout — this is when any of the enqueued +commands do not respond within the Timeout given when they were +enqueued. Steps are intended to be sequential for an enrollment ID — +that is a workflow's step completion handler should only enqueue one +step at a time (or none, if the workflow is finished). + +Steps are identified by name. There is no convention for these names as +they are workflow specific but they should be human readable as they +will likely be logged and keyed on. It is intended workflows will +identify specific step completions by the name of the step. + +# Context + +When you enqueue a step you can associate a context value with it. This +context is marshaled into binary (in any way the workflow may like, but +likely to be JSON or a "bare" string). Then, upon step completion, this +same context is unmarshaled and handed back to the workflow's step +completion handler. In this way a workflow can keep track of any data or +metadata between enqueued steps and their responses if you wish. As +mentioned above the step itself also has a name which may preclude the +need for any additional context, but if you need additional context or +data this context is present. + +When a workflow is started an initial context can be passed in. +Typically this will be from an API handler that takes data in. The step +name for a newly started workflow is the empty string. + +# Process model + +No assumptions should be made about the state of the workflow object +receiving method calls. In other words assume the worst: that it's a +shared object (vs, say, newly instantiated in a request context) and +that multiple calls of the methods on the same object will be running +concurrently. Protect any shared resources appropriately (e.g. mutexes +and locking). Even better is to push any saved state into the storage +layers anyway. +*/ +package workflow diff --git a/workflow/event.go b/workflow/event.go new file mode 100644 index 0000000..d9a60b0 --- /dev/null +++ b/workflow/event.go @@ -0,0 +1,75 @@ +package workflow + +import ( + "errors" + "fmt" +) + +// ErrEventsNotSupported returned from a workflow Event() method. +var ErrEventsNotSupported = errors.New("events not supported for this workflow") + +// EventFlag is a bitmask of event types. +type EventFlag uint + +// Storage backends (persistent storage) are likely to use these numeric +// values. Treat these as append-only: Order and position matter. +const ( + EventAllCommandResponse EventFlag = 1 << iota + EventAuthenticate + EventTokenUpdate + // TokenUpdate and Enrollment are considered distinct because an + // enrollment will only enroll once, but TokenUpdates can + // continually arrive. + EventEnrollment + EventCheckOut + maxEventFlag +) + +func (e EventFlag) Valid() bool { + return e > 0 && e < maxEventFlag +} + +func (e EventFlag) String() string { + switch e { + case EventAllCommandResponse: + return "AllCommandResponse" + case EventAuthenticate: + return "Authenticate" + case EventTokenUpdate: + return "TokenUpdate" + case EventEnrollment: + return "Enrollment" + case EventCheckOut: + return "CheckOut" + default: + return fmt.Sprintf("unknown event type: %d", e) + } +} + +func EventFlagForString(s string) EventFlag { + switch s { + case "AllCommandResponse": + return EventAllCommandResponse + case "Authenticate": + return EventAuthenticate + case "TokenUpdate": + return EventTokenUpdate + case "Enrollment": + return EventEnrollment + case "CheckOut": + return EventCheckOut + default: + return 0 + } +} + +// Event is a specific workflow MDM event. +type Event struct { + EventFlag + // EventData is likely a pointer to a struct of the relevent event data. + // You will need to know the data you're expecting and use Go type + // conversion to access it if you need it. + // For example the EventAuthenticate EventFlag will be + // a `*mdm.Authenticate` under the `interface{}`. + EventData interface{} +} diff --git a/workflow/fvenable/profile.go b/workflow/fvenable/profile.go new file mode 100644 index 0000000..60fb7f5 --- /dev/null +++ b/workflow/fvenable/profile.go @@ -0,0 +1,66 @@ +package fvenable + +const ProfileTemplate = ` + + + + PayloadContent + + + EncryptCertPayloadUUID + 00FD73C8-9AE6-4A1E-BBD0-22315CDE7533 + Location + MDM server + PayloadIdentifier + io.micromdm.wf.fvenable.v1.payload.escrow + PayloadType + com.apple.security.FDERecoveryKeyEscrow + PayloadUUID + 5943E786-24DD-4DE7-A27C-3F84B55A7A4B + PayloadVersion + 1 + + + Defer + + DeferForceAtUserLoginMaxBypassAttempts + 0 + Enable + On + PayloadIdentifier + io.micromdm.wf.fvenable.v1.payload.filevault + PayloadType + com.apple.MCX.FileVault2 + PayloadUUID + ED92D4D5-FF80-430B-AACC-67C32B685E59 + PayloadVersion + 1 + ShowRecoveryKey + + + + PayloadContent + __CERTIFICATE__ + PayloadIdentifier + io.micromdm.wf.fvenable.v1.payload.pkcs1 + PayloadType + com.apple.security.pkcs1 + PayloadUUID + 00FD73C8-9AE6-4A1E-BBD0-22315CDE7533 + PayloadVersion + 1 + + + PayloadDisplayName + FileVault + PayloadIdentifier + io.micromdm.wf.fvenable.v1.profile + PayloadType + Configuration + PayloadUUID + 49DDB449-163E-4408-B05D-FA4814CDEE3E + PayloadVersion + 1 + + +` diff --git a/workflow/fvenable/workflow.go b/workflow/fvenable/workflow.go new file mode 100644 index 0000000..7739c71 --- /dev/null +++ b/workflow/fvenable/workflow.go @@ -0,0 +1,260 @@ +// Package fvenable implements a NanoCMD Workflow for enabling FileVault on a Mac. +package fvenable + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "time" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + fvstorage "github.com/micromdm/nanocmd/subsystem/filevault/storage" + profstorage "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +const WorkflowName = "io.micromdm.wf.fvenable.v1" + +type Workflow struct { + enq workflow.StepEnqueuer + ider uuid.IDer + logger log.Logger + store fvstorage.FVEnable + profStore profstorage.ReadStorage +} + +const ( + stepNameInstall = "install" + stepNamePoll = "poll" + + pollInterval = 2 * time.Minute // polling interval + pollCounter = 180 // how many times we poll (total ~6 hrs) +) + +type Option func(*Workflow) + +func WithLogger(logger log.Logger) Option { + return func(w *Workflow) { + w.logger = logger + } +} + +func New(enq workflow.StepEnqueuer, store fvstorage.FVEnable, profStore profstorage.ReadStorage, opts ...Option) (*Workflow, error) { + if store == nil { + return nil, errors.New("empty store") + } + w := &Workflow{ + enq: enq, + ider: uuid.NewUUID(), + logger: log.NopLogger, + store: store, + profStore: profStore, + } + for _, opt := range opts { + opt(w) + } + w.logger = w.logger.With(logkeys.WorkflowName, w.Name()) + return w, nil +} + +func (w *Workflow) Name() string { + return WorkflowName +} + +func (w *Workflow) Config() *workflow.Config { + return nil +} + +func (w *Workflow) NewContextValue(name string) workflow.ContextMarshaler { + if name == stepNamePoll { + return new(workflow.IntContext) // poll counter + } + return nil +} + +// profileTemplate retrieves the FileVault profile template from the +// profile store or falls back to the static/hardcoded profile. +func (w *Workflow) profileTemplate(ctx context.Context) ([]byte, error) { + profiles, err := w.profStore.RetrieveRawProfiles(ctx, []string{WorkflowName}) + if err != nil { + return nil, fmt.Errorf("retrieving profile %s: %w", WorkflowName, err) + } + var profile []byte + if profiles != nil { + profile = profiles[WorkflowName] + } + if len(profile) < 1 { + profile = []byte(ProfileTemplate) + } + return profile, nil +} + +// createInstallProfileCommand creates the InstallProfile command for FV profiles. +// It will inject the certificate used for encrypting the PRK into the +// profile using text replacement. +func (w *Workflow) createInstallProfileCommand(ctx context.Context, id string, profileTemplate []byte) (*mdmcommands.InstallProfileCommand, error) { + // retrieve the encryption certificate + certRaw, err := w.store.RetrievePRKCertRaw(ctx, id) + if err != nil { + return nil, fmt.Errorf("retrieving PRK cert raw: %w", err) + } + // profiles encode binary data as base64. + certB64 := []byte(base64.StdEncoding.EncodeToString(certRaw)) + // inject the certificate into the profile payload. + profile := bytes.Replace(profileTemplate, []byte("__CERTIFICATE__"), certB64, 1) + // generate the command + cmd := mdmcommands.NewInstallProfileCommand(w.ider.ID()) + cmd.Command.Payload = profile + return cmd, nil +} + +// Start installs the initial FileVault profile. +func (w *Workflow) Start(ctx context.Context, step *workflow.StepStart) error { + profTemplate, err := w.profileTemplate(ctx) + if err != nil { + return fmt.Errorf("getting profile: %w", err) + } + + for _, id := range step.IDs { + cmd, err := w.createInstallProfileCommand(ctx, id, profTemplate) + if err != nil { + return fmt.Errorf("creating install profile command for %s: %w", id, err) + } + + // assemble our StepEnqueuing + se := step.NewStepEnqueueing() + se.IDs = []string{id} // scope to just this ID we're iterating over + se.Commands = []interface{}{cmd} + se.Name = stepNameInstall + + // enqueue our step! + if err = w.enq.EnqueueStep(ctx, w, se); err != nil { + return fmt.Errorf("enqueueing step for %s: %w", id, err) + } + } + return err +} + +// installStepCompleted verifies a good profile install and initiates polling with the SecurityInfo command. +func (w *Workflow) installStepCompleted(ctx context.Context, logger log.Logger, stepResult *workflow.StepResult) error { + if len(stepResult.CommandResults) != 1 { + return workflow.ErrStepResultCommandLenMismatch + } + response, ok := stepResult.CommandResults[0].(*mdmcommands.InstallProfileResponse) + if !ok { + return workflow.ErrIncorrectCommandType + } + if err := response.Validate(); err != nil { + return fmt.Errorf("validating install response: %w", err) + } + + logger.Debug(logkeys.Message, "install completed, initiating polling") + + // assemble our StepEnqueuing to kick off our polling + se := stepResult.NewStepEnqueueing() + se.Commands = []interface{}{mdmcommands.NewSecurityInfoCommand(w.ider.ID())} + se.Name = stepNamePoll + ctxVal := workflow.IntContext(pollCounter) + se.Context = &ctxVal + se.NotUntil = time.Now().Add(pollInterval) + + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) +} + +func boolPtr(b *bool) bool { + if b != nil { + return *b + } + return false +} + +func (w *Workflow) pollStepCompleted(ctx context.Context, logger log.Logger, stepResult *workflow.StepResult) error { + if len(stepResult.CommandResults) != 1 { + return workflow.ErrStepResultCommandLenMismatch + } + response, ok := stepResult.CommandResults[0].(*mdmcommands.SecurityInfoResponse) + if !ok { + return workflow.ErrIncorrectCommandType + } + + ctxVal, ok := stepResult.Context.(*workflow.IntContext) + if !ok { + return errors.New("invalid context value type") + } + if *ctxVal < 0 { + return errors.New("maximum poll counter reached, ending workflow") + } + + if err := response.Validate(); err != nil { + logger.Info( + logkeys.Message, "validating poll response", + logkeys.Error, err, + ) + } else if !boolPtr(response.SecurityInfo.FDEEnabled) { + logger.Info( + logkeys.Message, "checking FDE enabled", + logkeys.Error, "FDE not enabled", + ) + } else if response.SecurityInfo.FDEPersonalRecoveryKeyCMS == nil { + logger.Info( + logkeys.Message, "checking PRK CMS", + logkeys.Error, "FDE enabled but PRK CMS not present", + ) + } else if err = w.store.EscrowPRK(ctx, stepResult.ID, *response.SecurityInfo.FDEPersonalRecoveryKeyCMS); err != nil { + logger.Info( + logkeys.Message, "escrow PRK", + logkeys.Error, err, + ) + } else { + logger.Debug( + logkeys.Message, "escrowed PRK", + ) + return nil + } + + *ctxVal -= 1 // subtract one from our poll counter + logger.Debug( + logkeys.Message, "continuing polling", + "count_remaining", int(*ctxVal), + ) + + // assemble our StepEnqueuing + se := stepResult.NewStepEnqueueing() + se.Commands = []interface{}{mdmcommands.NewSecurityInfoCommand(w.ider.ID())} + se.Name = stepNamePoll + se.NotUntil = (time.Now().Add(pollInterval)) + se.Context = ctxVal + + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) +} + +func (w *Workflow) StepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + logger := ctxlog.Logger(ctx, w.logger).With( + logkeys.InstanceID, stepResult.InstanceID, + logkeys.EnrollmentID, stepResult.ID, + ) + switch stepResult.Name { + case stepNameInstall: + return w.installStepCompleted(ctx, logger, stepResult) + case stepNamePoll: + return w.pollStepCompleted(ctx, logger, stepResult) + default: + return fmt.Errorf("%w: %s", workflow.ErrUnknownStepName, stepResult.Name) + } +} + +func (w *Workflow) StepTimeout(_ context.Context, _ *workflow.StepResult) error { + return workflow.ErrTimeoutNotUsed +} + +func (w *Workflow) Event(_ context.Context, _ *workflow.Event, _ string, _ *workflow.MDMContext) error { + return workflow.ErrEventsNotSupported +} diff --git a/workflow/fvrotate/workflow.go b/workflow/fvrotate/workflow.go new file mode 100644 index 0000000..63ceacc --- /dev/null +++ b/workflow/fvrotate/workflow.go @@ -0,0 +1,126 @@ +// Package fvrotate implements a NanoCMD Workflow for FileVault key rotation. +package fvrotate + +import ( + "context" + "errors" + "fmt" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/subsystem/filevault/storage" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +const WorkflowName = "io.micromdm.wf.fvrotate.v1" + +type Workflow struct { + enq workflow.StepEnqueuer + ider uuid.IDer + logger log.Logger + store storage.FVRotate +} + +type Option func(*Workflow) + +func WithLogger(logger log.Logger) Option { + return func(w *Workflow) { + w.logger = logger + } +} + +func New(q workflow.StepEnqueuer, store storage.FVRotate, opts ...Option) (*Workflow, error) { + w := &Workflow{ + enq: q, + ider: uuid.NewUUID(), + logger: log.NopLogger, + store: store, + } + for _, opt := range opts { + opt(w) + } + w.logger = w.logger.With(logkeys.WorkflowName, w.Name()) + return w, nil +} + +func (w *Workflow) Name() string { + return WorkflowName +} + +func (w *Workflow) Config() *workflow.Config { + return nil +} + +func (w *Workflow) NewContextValue(name string) workflow.ContextMarshaler { + return nil +} + +func (w *Workflow) Start(ctx context.Context, step *workflow.StepStart) error { + for _, id := range step.IDs { + // fetch cert & PRK + certRaw, err := w.store.RetrievePRKCertRaw(ctx, id) + if err != nil { + return fmt.Errorf("retrieving PRK cert raw for %s: %w", id, err) + } + prk, err := w.store.RetrievePRK(ctx, id) + if err != nil { + return fmt.Errorf("retrieving PRK for %s: %w", id, err) + } + + // create MDM command + cmd := mdmcommands.NewRotateFileVaultKeyCommand(w.ider.ID()) + cmd.Command.KeyType = "personal" + cmd.Command.FileVaultUnlock.Password = &prk + cmd.Command.ReplyEncryptionCertificate = &certRaw + + // assemble our StepEnqueuing + se := step.NewStepEnqueueing() + se.IDs = []string{id} // scope to just this ID we're iterating over + se.Commands = []interface{}{cmd} + + // enqueue our step! + if err = w.enq.EnqueueStep(ctx, w, se); err != nil { + return fmt.Errorf("enqueueing step for %s: %w", id, err) + } + } + return nil +} + +func (w *Workflow) StepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + if len(stepResult.CommandResults) != 1 { + return workflow.ErrStepResultCommandLenMismatch + } + response, ok := stepResult.CommandResults[0].(*mdmcommands.RotateFileVaultKeyResponse) + if !ok { + return workflow.ErrIncorrectCommandType + } + if err := response.Validate(); err != nil { + return fmt.Errorf("validating rotate response: %w", err) + } + + if response.RotateResult == nil || response.RotateResult.EncryptedNewRecoveryKey == nil { + return errors.New("rotate result has missing (nil) fields") + } + + if err := w.store.EscrowPRK(ctx, stepResult.ID, *response.RotateResult.EncryptedNewRecoveryKey); err != nil { + return fmt.Errorf("escrow PRK for %s: %w", stepResult.ID, err) + } + + ctxlog.Logger(ctx, w.logger).Debug( + logkeys.InstanceID, stepResult.InstanceID, + logkeys.EnrollmentID, stepResult.ID, + logkeys.Message, "escrowed PRK", + ) + return nil +} + +func (w *Workflow) StepTimeout(_ context.Context, _ *workflow.StepResult) error { + return workflow.ErrTimeoutNotUsed +} + +func (w *Workflow) Event(_ context.Context, _ *workflow.Event, _ string, _ *workflow.MDMContext) error { + return workflow.ErrEventsNotSupported +} diff --git a/workflow/inventory/testdata/devinfo.plist b/workflow/inventory/testdata/devinfo.plist new file mode 100644 index 0000000..2e89d9c --- /dev/null +++ b/workflow/inventory/testdata/devinfo.plist @@ -0,0 +1,21 @@ + + + + + CommandUUID + 53115671-3f45-49f5-b7cb-22ede8b8afdb + QueryResponses + + Model + MacBookPro11,3 + ProductName + MacBookPro11,3 + UDID + AAABBBCCC111222333 + + Status + Acknowledged + UDID + AAABBBCCC111222333 + + diff --git a/workflow/inventory/testdata/secinfo.plist b/workflow/inventory/testdata/secinfo.plist new file mode 100644 index 0000000..ca748fb --- /dev/null +++ b/workflow/inventory/testdata/secinfo.plist @@ -0,0 +1,70 @@ + + + + + CommandUUID + 53115671-3f45-49f5-b7cb-22ede8b8afdc + SecurityInfo + + AuthenticatedRootVolumeEnabled + + BootstrapTokenAllowedForAuthentication + not supported + BootstrapTokenRequiredForKernelExtensionApproval + + BootstrapTokenRequiredForSoftwareUpdate + + FDE_Enabled + + FirewallSettings + + AllowSigned + + AllowSignedApp + + Applications + + BlockAllIncoming + + FirewallEnabled + + LoggingEnabled + + LoggingOption + throttled + StealthMode + + + FirmwarePasswordStatus + + ManagementStatus + + EnrolledViaDEP + + IsActivationLockManageable + + IsUserEnrollment + + UserApprovedEnrollment + + + RemoteDesktopEnabled + + SecureBoot + + ExternalBootLevel + not supported + SecureBootLevel + off + WindowsBootLevel + not supported + + SystemIntegrityProtectionEnabled + + + Status + Acknowledged + UDID + AAABBBCCC111222333 + + diff --git a/workflow/inventory/workflow.go b/workflow/inventory/workflow.go new file mode 100644 index 0000000..de4b1a6 --- /dev/null +++ b/workflow/inventory/workflow.go @@ -0,0 +1,154 @@ +// Package inventory implements a NanoCMD Workflow that updates an inventory system. +package inventory + +import ( + "context" + "fmt" + "time" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/subsystem/inventory/storage" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +const WorkflowName = "io.micromdm.wf.inventory.v1" + +var WorkflowConfig = &workflow.Config{ + // we want all SecurityInfo commands, regardless of whether this workflow sent them. + AllCommandResponseRequestTypes: []string{"SecurityInfo"}, +} + +// Workflow is a workflow that updates inventory storage. +type Workflow struct { + enq workflow.StepEnqueuer + ider uuid.IDer + store storage.Storage + logger log.Logger +} + +type Option func(*Workflow) + +func New(enq workflow.StepEnqueuer, store storage.Storage, opts ...Option) (*Workflow, error) { + return &Workflow{ + enq: enq, + ider: uuid.NewUUID(), + store: store, + logger: log.NopLogger, + }, nil +} + +func (w *Workflow) Name() string { + return WorkflowName +} + +func (w *Workflow) Config() *workflow.Config { + return WorkflowConfig +} + +func (w *Workflow) NewContextValue(_ string) workflow.ContextMarshaler { + return nil +} + +func (w *Workflow) Start(ctx context.Context, step *workflow.StepStart) error { + // build a DeviceInformation command + cmd := mdmcommands.NewDeviceInformationCommand(w.ider.ID()) + cmd.Command.Queries = []string{ + "Model", + "SerialNumber", + "Model", + "ModelName", + "DeviceName", + "BuildVersion", + "OSVersion", + "EthernetMAC", + "IsAppleSilicon", + "HasBattery", + "IsMultiUser", + "SupportsLOMDevice", + } + + // build a SecurityInfo command + cmd2 := mdmcommands.NewSecurityInfoCommand(w.ider.ID()) + + // assemble our StepEnqueuing + se := step.NewStepEnqueueing() + se.Commands = []interface{}{cmd, cmd2} + + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) +} + +func storeIfPresent[T any](v storage.Values, k string, p *T) { + if p == nil { + return + } + v[k] = *p +} + +func (w *Workflow) StepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + if len(stepResult.CommandResults) != 2 { + return workflow.ErrStepResultCommandLenMismatch + } + + for _, response := range stepResult.CommandResults { + switch r := response.(type) { + case *mdmcommands.DeviceInformationResponse: + // we did send a SecurityInfo command, too, but we only care about our + // DeviceInfo in the step completeion. we process the SecurityInfo + // command as an event. + + if err := r.Validate(); err != nil { + return fmt.Errorf("device info response: %w", err) + } + + v := make(storage.Values) + qr := r.QueryResponses + storeIfPresent(v, storage.KeySerialNumber, qr.SerialNumber) + storeIfPresent(v, storage.KeyModel, qr.Model) + storeIfPresent(v, storage.KeyModelName, qr.ModelName) + storeIfPresent(v, storage.KeyDeviceName, qr.DeviceName) + storeIfPresent(v, storage.KeyBuildVersion, qr.BuildVersion) + storeIfPresent(v, storage.KeyOSVersion, qr.OSVersion) + storeIfPresent(v, storage.KeyEthernetMAC, qr.EthernetMAC) + storeIfPresent(v, storage.KeySupervised, qr.IsSupervised) + storeIfPresent(v, storage.KeyAppleSilicon, qr.IsAppleSilicon) + storeIfPresent(v, storage.KeyHasBattery, qr.HasBattery) + storeIfPresent(v, storage.KeySupportsLOM, qr.SupportsLOMDevice) + storeIfPresent(v, storage.KeyIsMultiUser, qr.IsMultiUser) + if len(v) > 0 { + v[storage.KeyLastSource] = mdmcommands.DeviceInformationRequestType + v[storage.KeyModified] = time.Now() + return w.store.StoreInventoryValues(ctx, stepResult.ID, v) + } + } + } + return nil +} + +func (w *Workflow) StepTimeout(_ context.Context, _ *workflow.StepResult) error { + return workflow.ErrTimeoutNotUsed +} + +func (w *Workflow) Event(ctx context.Context, e *workflow.Event, id string, mdmCtx *workflow.MDMContext) error { + switch evData := e.EventData.(type) { + case *mdmcommands.SecurityInfoResponse: + if err := evData.Validate(); err != nil { + return fmt.Errorf("security info response: %w", err) + } + + v := make(storage.Values) + si := evData.SecurityInfo + storeIfPresent(v, storage.KeySIPEnabled, si.SystemIntegrityProtectionEnabled) + storeIfPresent(v, storage.KeyFDEEnabled, si.FDEEnabled) + if len(v) > 0 { + v[storage.KeyLastSource] = mdmcommands.SecurityInfoRequestType + v[storage.KeyModified] = time.Now() + return w.store.StoreInventoryValues(ctx, id, v) + } + default: + return fmt.Errorf("unknown event data type for event: %s", e.EventFlag) + } + return nil +} diff --git a/workflow/inventory/workflow_test.go b/workflow/inventory/workflow_test.go new file mode 100644 index 0000000..8df2ebb --- /dev/null +++ b/workflow/inventory/workflow_test.go @@ -0,0 +1,141 @@ +package inventory + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/micromdm/nanocmd/engine" + enginestorage "github.com/micromdm/nanocmd/engine/storage/inmem" + "github.com/micromdm/nanocmd/subsystem/inventory/storage" + "github.com/micromdm/nanocmd/subsystem/inventory/storage/inmem" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +type nullEnqueuer struct{} + +func (n *nullEnqueuer) Enqueue(_ context.Context, _ []string, _ []byte) error { return nil } + +func (n *nullEnqueuer) SupportsMultiCommands() bool { return true } + +type testStep struct { + wfName string + es *workflow.StepEnqueueing +} + +type collectionEnqueuer struct { + next workflow.StepEnqueuer + steps []testStep +} + +func (c *collectionEnqueuer) EnqueueStep(ctx context.Context, n workflow.Namer, es *workflow.StepEnqueueing) error { + c.steps = append(c.steps, testStep{ + wfName: n.Name(), + es: es, + }) + return c.next.EnqueueStep(ctx, n, es) +} + +func TestWorkflow(t *testing.T) { + e := engine.New(enginestorage.New(), &nullEnqueuer{}) + + c := &collectionEnqueuer{next: e} + + s := inmem.New() + + w, err := New(c, s) + if err != nil { + t.Fatal(err) + } + w.ider = uuid.NewStaticIDs( + // note: order is important and depends on values in plist testdata + "53115671-3f45-49f5-b7cb-22ede8b8afdb", + "53115671-3f45-49f5-b7cb-22ede8b8afdc", + ) + + ctx := context.Background() + + // enrollment id + id := "AAABBBCCC111222333" + + e.RegisterWorkflow(w) + + // read it back out of the engine + w2 := e.Workflow(w.Name()).(*Workflow) + + if w.Name() != w2.Name() { + t.Fatal("workflows not equal after registration") + } + + _, err = e.StartWorkflow(ctx, w.Name(), nil, []string{id}, nil, nil) + if err != nil { + t.Fatal(err) + } + + if want, have := 1, len(c.steps); want != have { + t.Fatalf("wanted: %d; have: %d", want, have) + } + + if want, have := 1, len(c.steps[0].es.IDs); want != have { + t.Fatalf("wanted: %d; have: %d", want, have) + } + + if want, have := id, c.steps[0].es.IDs[0]; want != have { + t.Errorf("wanted: %s; have: %s", want, have) + } + + fakeSR := &workflow.StepResult{ + ID: id, + CommandResults: []interface{}{"stuff", "stuff2", "stuff3"}, + } + // pretend our workflow finished, submit some data with the wrong count of results + err = w.StepCompleted(ctx, fakeSR) + if !errors.Is(err, workflow.ErrStepResultCommandLenMismatch) { + t.Error("expected error ErrStepResultCommandLenMismatch") + } + + model := "MacBookPro11,3" + + devInfo, err := os.ReadFile("testdata/devinfo.plist") + if err != nil { + t.Fatal(err) + } + + err = e.MDMCommandResponseEvent(ctx, id, "53115671-3f45-49f5-b7cb-22ede8b8afdb", devInfo, nil) + if err != nil { + t.Fatal(err) + } + + secInfo, err := os.ReadFile("testdata/secinfo.plist") + if err != nil { + t.Fatal(err) + } + + err = e.MDMCommandResponseEvent(ctx, id, "53115671-3f45-49f5-b7cb-22ede8b8afdc", secInfo, nil) + if err != nil { + t.Fatal(err) + } + + // retreive data from workflow we've processed results + idValues, err := s.RetrieveInventory(ctx, &storage.SearchOptions{IDs: []string{id}}) + if err != nil { + t.Error(err) + } + if len(idValues) != 1 { + t.Fatal("enrollment not found") + } + + values := idValues[id] + if values == nil { + t.Fatal("nil inventory") + } + + if wanted, have := model, values[storage.KeyModel]; wanted != have { + t.Errorf("wanted: %s; have: %s", wanted, have) + } + if wanted, have := true, values[storage.KeySIPEnabled]; wanted != have { + t.Errorf("wanted: %v; have: %v", wanted, have) + } +} diff --git a/workflow/profile/context.go b/workflow/profile/context.go new file mode 100644 index 0000000..30f90a6 --- /dev/null +++ b/workflow/profile/context.go @@ -0,0 +1,26 @@ +package profile + +import ( + "errors" + "strings" +) + +// CommaStringSliceContext is a very simple ContextMarshaler. +type CommaStringSliceContext []string + +// MarshalBinary converts c into a byte slice. +func (c *CommaStringSliceContext) MarshalBinary() ([]byte, error) { + if c == nil { + return nil, errors.New("nil value") + } + return []byte(strings.Join(*c, ",")), nil +} + +// UnmarshalBinary converts and loads data into c. +func (c *CommaStringSliceContext) UnmarshalBinary(data []byte) error { + if c == nil { + return errors.New("nil value") + } + *c = CommaStringSliceContext(strings.Split(string(data), ",")) + return nil +} diff --git a/workflow/profile/workflow.go b/workflow/profile/workflow.go new file mode 100644 index 0000000..6a5939b --- /dev/null +++ b/workflow/profile/workflow.go @@ -0,0 +1,279 @@ +// Package profile implements a NanoCMD Workflow for "statefully" installing and removing profiles. +package profile + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/jessepeterson/mdmcommands" + "github.com/micromdm/nanocmd/log" + "github.com/micromdm/nanocmd/log/ctxlog" + "github.com/micromdm/nanocmd/log/logkeys" + "github.com/micromdm/nanocmd/subsystem/profile/storage" + "github.com/micromdm/nanocmd/utils/uuid" + "github.com/micromdm/nanocmd/workflow" +) + +const WorkflowName = "io.micromdm.wf.profile.v1" + +// Workflow "statefully" installs and removes profiles. +type Workflow struct { + enq workflow.StepEnqueuer + store storage.ReadStorage + ider uuid.IDer + logger log.Logger +} + +type Option func(*Workflow) + +func WithLogger(logger log.Logger) Option { + return func(w *Workflow) { + w.logger = logger + } +} + +func New(enq workflow.StepEnqueuer, store storage.ReadStorage, opts ...Option) (*Workflow, error) { + w := &Workflow{ + enq: enq, + store: store, + ider: uuid.NewUUID(), + logger: log.NopLogger, + } + for _, opt := range opts { + opt(w) + } + w.logger = w.logger.With(logkeys.WorkflowName, w.Name()) + return w, nil +} + +func (w *Workflow) Name() string { + return WorkflowName +} + +func (w *Workflow) Config() *workflow.Config { + return nil +} + +func (w *Workflow) NewContextValue(name string) workflow.ContextMarshaler { + switch name { + case "", "list": + // for the start and list steps, use the comma context type + return new(CommaStringSliceContext) + default: + return nil + } +} + +func (w *Workflow) Start(ctx context.Context, step *workflow.StepStart) error { + // make sure our context is of the correct type + manageList, ok := step.Context.(*CommaStringSliceContext) + if !ok { + return workflow.ErrIncorrectContextType + } + + // sanity check + if len(*manageList) < 1 { + return errors.New("no managed profiles supplied in context") + } + + // parse and get our profiles list. + // TODO: we could cache these results in the context and re-use them later. + all, _ := splitInstallRemove(*manageList) + + // retrive the infos to make sure they exist. this avoids starting + // the workflow if we supplied an invalid set of profile names. + if _, err := w.store.RetrieveProfileInfos(ctx, all); err != nil { + return fmt.Errorf("retrieving profile info: %w", err) + } + + ctxlog.Logger(ctx, w.logger).Debug( + logkeys.InstanceID, step.InstanceID, + logkeys.FirstEnrollmentID, step.IDs[0], + logkeys.GenericCount, len(step.IDs), + logkeys.Message, "enqueuing step", + "profile_count", len(all), + "profile_first", all[0], + ) + + // build a ProfileList command + cmd := mdmcommands.NewProfileListCommand(w.ider.ID()) + managedOnly := true + cmd.Command.ManagedOnly = &managedOnly + + // assemble our StepEnqueuing + se := step.NewStepEnqueueing() + se.Commands = []interface{}{cmd} + se.Context = manageList // re-use our passed-in context (i.e. the list of profiles to manage) + se.Name = "list" // will get handed back to us in StepCompleted + + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) +} + +const ( + manageInstallReq int = iota + 1 // install "requested" + manageRemoveReq // removal "requested" + manageToInstall // confirmed to install + manageToRemove // confirmed to remove +) + +// split the context profiles into a slice of all of them and +// a map-to-management style (i.e. install or remove) +func splitInstallRemove(s CommaStringSliceContext) (all []string, ret map[string]int) { + ret = make(map[string]int) + for _, name := range s { + if strings.HasPrefix(name, "-") { + // if one of the entries is prefixed with a "-" dash (minus) + // then it is for removal. + ret[name[1:]] = manageRemoveReq + all = append(all, name[1:]) + } else { + ret[name] = manageToInstall + all = append(all, name) + } + } + return +} + +func (w *Workflow) listStepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + if len(stepResult.CommandResults) != 1 { + return workflow.ErrStepResultCommandLenMismatch + } + profListResp, ok := stepResult.CommandResults[0].(*mdmcommands.ProfileListResponse) + if !ok { + return fmt.Errorf("%w: not a profile list", workflow.ErrIncorrectCommandType) + } + if err := profListResp.Validate(); err != nil { + return fmt.Errorf("validating profile list: %w", err) + } + + // make sure our context is of the correct type + manageList, ok := stepResult.Context.(*CommaStringSliceContext) + if !ok { + return workflow.ErrIncorrectContextType + } + + all, manageMap := splitInstallRemove(*manageList) + + // retrieve the list of profiles provided to the workflow when started + allProfsToManage, err := w.store.RetrieveProfileInfos(ctx, all) + if err != nil { + return fmt.Errorf("retrieving profile info: %w", err) + } + + // find out which profiles we need to install of the requested +loop: + for name, info := range allProfsToManage { + manageStyle := manageMap[name] + for _, profListItem := range profListResp.ProfileList { + if profListItem.PayloadIdentifier == info.Identifier { + if manageStyle == manageRemoveReq { + // found profile on system but we need to remove it + manageMap[name] = manageToRemove + continue loop + } + if manageStyle == manageToInstall && profListItem.PayloadUUID == info.UUID { + // matching UUID and identifier: don't install + manageMap[name] = manageInstallReq + continue loop + } + } + } + } + + // convert the map to a slice of names for our raw profile retrieval + var profToInstSlice []string + for name, manageStyle := range manageMap { + if manageStyle == manageToInstall { + profToInstSlice = append(profToInstSlice, name) + } + } + + // get our raw profiles (only if we need to) + var profToInstRaw map[string][]byte + if len(profToInstSlice) > 0 { + // retrieve the raw profiles from the store + profToInstRaw, err = w.store.RetrieveRawProfiles(ctx, profToInstSlice) + if err != nil { + return fmt.Errorf("retrieving raw profiles: %w", err) + } + } + + // create our step enqueueing + se := stepResult.NewStepEnqueueing() + se.Name = "install" + + // assemble our collection of InstallProfile and RemoveProfile MDM + // commands and append them to the command list + for name, manageStyle := range manageMap { + switch manageStyle { + case manageToInstall: + cmd := mdmcommands.NewInstallProfileCommand(w.ider.ID()) + cmd.Command.Payload = profToInstRaw[name] + se.Commands = append(se.Commands, cmd) + case manageToRemove: + cmd := mdmcommands.NewRemoveProfileCommand(w.ider.ID()) + cmd.Command.Identifier = allProfsToManage[name].Identifier + se.Commands = append(se.Commands, cmd) + } + } + + if len(se.Commands) > 0 { + // enqueue our step! + return w.enq.EnqueueStep(ctx, w, se) + } + ctxlog.Logger(ctx, w.logger).Debug( + logkeys.InstanceID, stepResult.InstanceID, + logkeys.StepName, stepResult.Name, + logkeys.EnrollmentID, stepResult.ID, + logkeys.Message, "no profiles to install or remove after profile list", + ) + return nil +} + +func (w *Workflow) StepCompleted(ctx context.Context, stepResult *workflow.StepResult) error { + switch stepResult.Name { + case "list": + return w.listStepCompleted(ctx, stepResult) + case "install": + logger := ctxlog.Logger(ctx, w.logger).With( + logkeys.InstanceID, stepResult.InstanceID, + logkeys.StepName, stepResult.Name, + ) + statuses := make(map[string]int) + for _, resp := range stepResult.CommandResults { + genResper, ok := resp.(mdmcommands.GenericResponser) + if !ok { + continue + } + genResp := genResper.GetGenericResponse() + statuses[genResp.Status] += 1 + // TODO: log the association from command UUID to profile name + if err := genResp.Validate(); err != nil { + logger.Info( + logkeys.Message, "validate MDM response", + logkeys.CommandUUID, genResp.CommandUUID, + logkeys.Error, err, + ) + } + } + logs := []interface{}{logkeys.Message, "workflow complete"} + for k, v := range statuses { + logs = append(logs, "count_"+strings.ToLower(k), v) + } + logger.Debug(logs...) + return nil + default: + return fmt.Errorf("%w: %s", workflow.ErrUnknownStepName, stepResult.Name) + } +} + +func (w *Workflow) StepTimeout(_ context.Context, _ *workflow.StepResult) error { + return workflow.ErrTimeoutNotUsed +} + +func (w *Workflow) Event(_ context.Context, _ *workflow.Event, _ string, _ *workflow.MDMContext) error { + return workflow.ErrEventsNotSupported +} diff --git a/workflow/step.go b/workflow/step.go new file mode 100644 index 0000000..f1cecd5 --- /dev/null +++ b/workflow/step.go @@ -0,0 +1,84 @@ +package workflow + +import ( + "errors" + "time" +) + +var ( + // ErrTimeoutNotUsed returned from a workflow Event() method. + ErrTimeoutNotUsed = errors.New("workflow does not utilize timeouts") + + // ErrStepResultCommandLenMismatch indicates mismatched MDM commands expected. + // Steps are enqueued with n MDM commands and should only return with + // that number of commands. This error is for indicating that this + // was not the case. + ErrStepResultCommandLenMismatch = errors.New("mismatched number of commands in step result") + + // ErrUnknownStepName occurs when a workflow encounters a step name + // it does not know about. + ErrUnknownStepName = errors.New("unknown step name") + + // ErrIncorrectCommandType occurs when a step's expected command is + // not of the correct type. Workflows should not depend on the ordering + // of commands in the returned step command slice. + ErrIncorrectCommandType = errors.New("incorrect command type") + + // ErrIncorrectContextType indicates a step did not receive the + // correctly instantiated context type for this step name. + ErrIncorrectContextType = errors.New("incorrect context type") +) + +// StepEnqueueing encapsulates a step and is passed to an enqueuer for command delivery to MDM enrollments. +// Note that a workflow may only enqueue commands to multiple enrollment IDs when starting. +type StepEnqueueing struct { + StepContext + IDs []string // Enrollment IDs + Commands []interface{} + + // Timeout specifies a timeout. If any of the commands in this step do + // not complete by this time then the entire step is considered to have + // timed out. + Timeout time.Time + + // A step should not be enqueued (that is, sent to enrollments) + // until after this time has passed. A delay of sorts. + NotUntil time.Time +} + +// StepStart is provided to a workflow when starting a new workflow instance. +// Note that a workflow may only enqueue commands to multiple enrollment IDs when starting. +type StepStart struct { + StepContext + Event *Event + IDs []string // Enrollment IDs +} + +// StepEnqueueing preserves some context and IDs from step for enqueueing. +func (step *StepStart) NewStepEnqueueing() *StepEnqueueing { + if step == nil { + return nil + } + return &StepEnqueueing{ + StepContext: *step.StepContext.NewForEnqueue(), + IDs: step.IDs, + } +} + +// StepResult is given to a workflow when a step has completed or timed out. +type StepResult struct { + StepContext + ID string + CommandResults []interface{} +} + +// StepEnqueueing preserves some context and IDs from step for enqueueing. +func (step *StepResult) NewStepEnqueueing() *StepEnqueueing { + if step == nil { + return nil + } + return &StepEnqueueing{ + StepContext: *step.StepContext.NewForEnqueue(), + IDs: []string{step.ID}, + } +} diff --git a/workflow/workflow.go b/workflow/workflow.go new file mode 100644 index 0000000..c09af0f --- /dev/null +++ b/workflow/workflow.go @@ -0,0 +1,46 @@ +package workflow + +import "context" + +// Namers provide a name string. +type Namer interface { + // Name returns the name of the workflow; reverse-DNS style by convention. + // This string is generally used to route actions to this workflow. + Name() string +} + +// Workflows send MDM commands and process the results using steps. +type Workflow interface { + Namer + + // Config returns the workflow configuration. + Config() *Config + + // NewContextValue returns a newly instantiated context value. + // This will usally be used by a workflow engine to unmarshal and pass in + // stored context on a StepContext. + NewContextValue(stepName string) ContextMarshaler + + // Start starts a new workflow instance for MDM enrollments. + Start(context.Context, *StepStart) error + + // StepCompleted is the action called when all step MDM commands have reported results. + // Note that these results may be errors, but NotNow responses are handled for the workflow. + StepCompleted(context.Context, *StepResult) error + + // StepTimeout occurs when at least one command in a step has failed to complete in time. + // Timeouts are defined by the step, then any workflow default, then + // any engine default. + StepTimeout(context.Context, *StepResult) error + + // Event is called when MDM events happen that are intended for this workflow. + // A workflow can subscribe to events in its Config struct. + Event(ctx context.Context, e *Event, id string, mdmCtx *MDMContext) error +} + +// StepEnqueuers send steps (MDM commands) to enrollments. +type StepEnqueuer interface { + // EnqueueStep enqueues MDM commands to ids in StepEnqueue. + // The enqueing system should be able to find this workflow again with Namer. + EnqueueStep(context.Context, Namer, *StepEnqueueing) error +}