diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3ba4aba6..613e3ef9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,14 +1,17 @@ name: "Test" on: pull_request: + merge_group: push: + branches: + - master jobs: tests: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v16 + - uses: cachix/install-nix-action@v31 #- run: nix flake check - run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi diff --git a/.github/workflows/update-flakes.yml b/.github/workflows/update-flakes.yml new file mode 100644 index 00000000..b5c0c2dd --- /dev/null +++ b/.github/workflows/update-flakes.yml @@ -0,0 +1,28 @@ +name: "Update Flakes" +on: + schedule: + # Run weekly on Monday at 00:00 UTC + - cron: '0 0 * * 1' + workflow_dispatch: +jobs: + update-flakes: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v3 + - uses: cachix/install-nix-action@v31 + - name: Update flake inputs + run: nix flake update + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + commit-message: "flake.lock: Update" + title: "Update flake inputs" + body: | + Automated flake input updates. + + This PR was automatically created by the update-flakes workflow. + branch: update-flakes + delete-branch: true \ No newline at end of file diff --git a/.gitignore b/.gitignore index 799db665..12df926f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,47 +1,9 @@ -/.pls_cache -*.o *~ -Makefile -Makefile.in -.deps -.hydra-data -/config.guess -/config.log -/config.status -/config.sub -/configure -/depcomp -/libtool -/ltmain.sh -/autom4te.cache -/aclocal.m4 -/missing -/install-sh +.test_info.* /src/sql/hydra-postgresql.sql /src/sql/hydra-sqlite.sql /src/sql/tmp.sqlite -/src/hydra-eval-jobs/hydra-eval-jobs -/src/root/static/bootstrap -/src/root/static/js/flot -/tests -/doc/manual/images -/doc/manual/manual.html -/doc/manual/manual.pdf -/t/.bzr* -/t/.git* -/t/.hg* -/t/nix -/t/data -/t/jobs/config.nix -t/jobs/declarative/project.json -/inst -hydra-config.h -hydra-config.h.in +.hydra-data result +result-* outputs -config -stamp-h1 -src/hydra-evaluator/hydra-evaluator -src/hydra-queue-runner/hydra-queue-runner -src/root/static/fontawesome/ -src/root/static/bootstrap*/ diff --git a/.yath.rc b/.yath.rc deleted file mode 100644 index 19bb35af..00000000 --- a/.yath.rc +++ /dev/null @@ -1,2 +0,0 @@ -[test] --I=rel(t/lib) diff --git a/Makefile.am b/Makefile.am deleted file mode 100644 index e744cc33..00000000 --- a/Makefile.am +++ /dev/null @@ -1,8 +0,0 @@ -SUBDIRS = src t doc -BOOTCLEAN_SUBDIRS = $(SUBDIRS) -DIST_SUBDIRS = $(SUBDIRS) -EXTRA_DIST = hydra-module.nix - -install-data-local: hydra-module.nix - $(INSTALL) -d $(DESTDIR)$(datadir)/nix - $(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/ diff --git a/README.md b/README.md index 54cb9a93..54b95549 100644 --- a/README.md +++ b/README.md @@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta #### Creating A Project Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows: -- **Identifier**: `hello` +- **Identifier**: `hello-project` - **Display name**: `hello` - **Description**: `hello project` Click "_Create project_". #### Creating A Jobset -After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values: +After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values: -- **Identifier**: `hello` +- **Identifier**: `hello-project` - **Nix expression**: `examples/hello.nix` in `hydra` - **Check interval**: 60 - **Scheduling shares**: 1 @@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_ - **Input name**: `nixpkgs` - **Type**: `Git checkout` -- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03` +- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05` - **Input name**: `hydra` - **Type**: `Git checkout` @@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_ You can build Hydra via `nix-build` using the provided [default.nix](./default.nix): ``` -$ nix-build +$ nix build ``` ### Development Environment You can use the provided shell.nix to get a working development environment: ``` -$ nix-shell -$ ./bootstrap -$ configurePhase # NOTE: not ./configure -$ make +$ nix develop +$ mesonConfigurePhase +$ ninja ``` ### Executing Hydra During Development @@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from can be done using [foreman](https://github.com/ddollar/foreman): ``` -$ nix-shell +$ nix develop $ # hack hack -$ make +$ ninja -C build $ foreman start ``` @@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme Then, you can run the tests and the perlcritic linter together with: ```console -$ nix-shell -$ make check +$ nix develop +$ ninja -C build test ``` You can run a single test with: ``` -$ nix-shell -$ yath test ./t/foo/bar.t +$ nix develop +$ cd build +$ meson test --test-args=../t/Hydra/Event.t testsuite ``` And you can run just perlcritic with: ``` -$ nix-shell -$ make perlcritic +$ nix develop +$ cd build +$ meson test perlcritic ``` ### JSON API @@ -140,7 +141,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd ## Additional Resources - [Hydra User's Guide](https://nixos.org/hydra/manual/) -- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra) +- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra) - [hydra-cli](https://github.com/nlewo/hydra-cli) - [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ) diff --git a/bootstrap b/bootstrap deleted file mode 100755 index 091b0ee4..00000000 --- a/bootstrap +++ /dev/null @@ -1,2 +0,0 @@ -#! /bin/sh -e -exec autoreconf -vfi diff --git a/configure.ac b/configure.ac deleted file mode 100644 index 0c823696..00000000 --- a/configure.ac +++ /dev/null @@ -1,85 +0,0 @@ -AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])]) -AC_CONFIG_AUX_DIR(config) -AM_INIT_AUTOMAKE([foreign serial-tests]) - -AC_LANG([C++]) - -AC_PROG_CC -AC_PROG_INSTALL -AC_PROG_LN_S -AC_PROG_LIBTOOL -AC_PROG_CXX - -CXXFLAGS+=" -std=c++17" - -AC_PATH_PROG([XSLTPROC], [xsltproc]) - -AC_ARG_WITH([docbook-xsl], - [AS_HELP_STRING([--with-docbook-xsl=PATH], - [path of the DocBook XSL stylesheets])], - [docbookxsl="$withval"], - [docbookxsl="/docbook-xsl-missing"]) -AC_SUBST([docbookxsl]) - - -AC_DEFUN([NEED_PROG], -[ -AC_PATH_PROG($1, $2) -if test -z "$$1"; then - AC_MSG_ERROR([$2 is required]) -fi -]) - -NEED_PROG(perl, perl) - -NEED_PROG([NIX_STORE_PROGRAM], [nix-store]) - -AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough]) -if test -n "$NIX_STORE" -a -n "$TMPDIR" -then - # This may be executed from within a build chroot, so pacify - # `nix-store' instead of letting it choke while trying to mkdir - # /nix/var. - NIX_STATE_DIR="$TMPDIR" - export NIX_STATE_DIR -fi -if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) - AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.]) -fi - -PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store]) - -testPath="$(dirname $(type -p expr))" -AC_SUBST(testPath) - -jobsPath="$(realpath ./t/jobs)" -AC_SUBST(jobsPath) - -CXXFLAGS+=" -include nix/config.h" - -AC_CONFIG_FILES([ - Makefile - doc/Makefile - doc/manual/Makefile - src/Makefile - src/hydra-evaluator/Makefile - src/hydra-eval-jobs/Makefile - src/hydra-queue-runner/Makefile - src/sql/Makefile - src/ttf/Makefile - src/lib/Makefile - src/root/Makefile - src/script/Makefile - t/Makefile - t/jobs/config.nix - t/jobs/declarative/project.json -]) - -AC_CONFIG_COMMANDS([executable-scripts], []) - -AC_CONFIG_HEADER([hydra-config.h]) - -AC_OUTPUT diff --git a/default.nix b/default.nix index d4c7ec29..b81119c3 100644 --- a/default.nix +++ b/default.nix @@ -1,6 +1,6 @@ # The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and # returns an attribute set of the shape `{ defaultNix, shellNix }` -(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) { +(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { src = ./.; }).defaultNix diff --git a/doc/Makefile.am b/doc/Makefile.am deleted file mode 100644 index 9ac91d24..00000000 --- a/doc/Makefile.am +++ /dev/null @@ -1,4 +0,0 @@ -SUBDIRS = manual -BOOTCLEAN_SUBDIRS = $(SUBDIRS) -DIST_SUBDIRS = $(SUBDIRS) - diff --git a/doc/manual/Makefile.am b/doc/manual/Makefile.am deleted file mode 100644 index ec732166..00000000 --- a/doc/manual/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -MD_FILES = src/*.md - -EXTRA_DIST = $(MD_FILES) - -install: $(MD_FILES) - mdbook build . -d $(docdir) diff --git a/doc/manual/meson.build b/doc/manual/meson.build new file mode 100644 index 00000000..11178809 --- /dev/null +++ b/doc/manual/meson.build @@ -0,0 +1,36 @@ +srcs = files( + 'src/SUMMARY.md', + 'src/about.md', + 'src/api.md', + 'src/configuration.md', + 'src/hacking.md', + 'src/installation.md', + 'src/introduction.md', + 'src/jobs.md', + 'src/monitoring/README.md', + 'src/notifications.md', + 'src/plugins/README.md', + 'src/plugins/RunCommand.md', + 'src/plugins/declarative-projects.md', + 'src/projects.md', + 'src/webhooks.md', +) + +manual = custom_target( + 'manual', + command: [ + mdbook, + 'build', + '@SOURCE_ROOT@/doc/manual', + '-d', meson.current_build_dir() / 'html' + ], + depend_files: srcs, + output: ['html'], + build_by_default: true, +) + +install_subdir( + manual.full_path(), + install_dir: get_option('datadir') / 'doc/hydra', + strip_directory: true, +) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index 2700625d..bd8141a3 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -51,10 +51,12 @@ base_uri example.com `base_uri` should be your hydra servers proxied URL. If you are using Hydra nixos module then setting `hydraURL` option should be enough. -If you want to serve Hydra with a prefix path, for example -[http://example.com/hydra]() then you need to configure your reverse -proxy to pass `X-Request-Base` to hydra, with prefix path as value. For -example if you are using nginx, then use configuration similar to +You also need to configure your reverse proxy to pass `X-Request-Base` +to hydra, with the same value as `base_uri`. +This also covers the case of serving Hydra with a prefix path, +as in [http://example.com/hydra](). + +For example if you are using nginx, then use configuration similar to following: server { @@ -63,8 +65,7 @@ following: .. other configuration .. location /hydra/ { - proxy_pass http://127.0.0.1:3000; - proxy_redirect http://127.0.0.1:3000 https://example.com/hydra; + proxy_pass http://127.0.0.1:3000/; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; @@ -74,6 +75,33 @@ following: } } +Note the trailing slash on the `proxy_pass` directive, which causes nginx to +strip off the `/hydra/` part of the URL before passing it to hydra. + +Populating a Cache +------------------ + +A common use for Hydra is to pre-build and cache derivations which +take a long time to build. While it is possible to direcly access the +Hydra server's store over SSH, a more scalable option is to upload +built derivations to a remote store like an [S3-compatible object +store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting +the `store_uri` parameter will cause Hydra to sign and upload +derivations as they are built: + +``` +store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key +``` + +This example uses [Zstandard](https://github.com/facebook/zstd) +compression on derivations to reduce CPU usage on the server, but +[Brotli](https://brotli.org/) compression for derivation listings and +build logs because it has better browser support. + +See [`nix help +stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html) +for a description of the store URI format. + Statsd Configuration -------------------- @@ -131,8 +159,8 @@ use LDAP to manage roles and users. This is configured by defining the `` block in the configuration file. In this block it's possible to configure the authentication plugin in the `` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`. -The documentation for the available settings can be found [here] -(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). +The documentation for the available settings can be found +[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). Note that the bind password (if needed) should be supplied as an included file to prevent it from leaking to the Nix store. @@ -179,13 +207,15 @@ Example configuration: deref = always + # Make all users in the hydra_admin group Hydra admins hydra_admin = admin - # Allow all users in the dev group to restart jobs and cancel builds + # Allow all users in the dev group to eval jobsets, restart jobs and cancel builds + dev = eval-jobset dev = restart-jobs - dev = cancel-builds + dev = cancel-build ``` diff --git a/doc/manual/src/hacking.md b/doc/manual/src/hacking.md index 9d98b00c..8b2b13ba 100644 --- a/doc/manual/src/hacking.md +++ b/doc/manual/src/hacking.md @@ -12,24 +12,26 @@ To enter a shell in which all environment variables (such as `PERL5LIB`) and dependencies can be found: ```console -$ nix-shell +$ nix develop ``` To build Hydra, you should then do: ```console -[nix-shell]$ ./bootstrap -[nix-shell]$ configurePhase -[nix-shell]$ make +$ mesonConfigurePhase +$ ninja ``` You start a local database, the webserver, and other components with foreman: ```console +$ ninja -C build $ foreman start ``` +The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar" + You can run just the Hydra web server in your source tree as follows: ```console @@ -39,18 +41,11 @@ $ ./src/script/hydra-server You can run Hydra's test suite with the following: ```console -[nix-shell]$ make check -[nix-shell]$ # to run as many tests as you have cores: -[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES -[nix-shell]$ # or run yath directly: -[nix-shell]$ yath test -[nix-shell]$ # to run as many tests as you have cores: -[nix-shell]$ yath test -j $NIX_BUILD_CORES +$ meson test +# to run as many tests as you have cores: +$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test ``` -When using `yath` instead of `make check`, ensure you have run `make` -in the root of the repository at least once. - **Warning**: Currently, the tests can fail if run with high parallelism [due to an issue in `Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40) @@ -67,7 +62,7 @@ will reload the page every time you save. To build Hydra and its dependencies: ```console -$ nix-build release.nix -A build.x86_64-linux +$ nix build .#packages.x86_64-linux.default ``` ## Development Tasks @@ -92,7 +87,7 @@ On NixOS: ```nix { - nix.trustedUsers = [ "YOURUSER" ]; + nix.settings.trusted-users = [ "YOURUSER" ]; } ``` diff --git a/doc/manual/src/installation.md b/doc/manual/src/installation.md index cbf3f907..39a86885 100644 --- a/doc/manual/src/installation.md +++ b/doc/manual/src/installation.md @@ -48,7 +48,7 @@ Getting Nix If your server runs NixOS you are all set to continue with installation of Hydra. Otherwise you first need to install Nix. The latest stable version can be found one [the Nix web -site](http://nixos.org/nix/download.html), along with a manual, which +site](https://nixos.org/download/), along with a manual, which includes installation instructions. Installation diff --git a/doc/manual/src/plugins/README.md b/doc/manual/src/plugins/README.md index 26ee2649..93aa80b4 100644 --- a/doc/manual/src/plugins/README.md +++ b/doc/manual/src/plugins/README.md @@ -42,7 +42,7 @@ Sets CircleCI status. ## Compress build logs -Compresses build logs after a build with bzip2. +Compresses build logs after a build with bzip2 or zstd. ### Configuration options @@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2. Enable log compression +- `compress_build_logs_compression` + +Which compression format to use. Valid values are bzip2 (default) and zstd. + +- `compress_build_logs_silent` + +Whether to compress logs silently. + ### Example ```xml @@ -172,17 +180,6 @@ Sets Gitlab CI status. - `gitlab_authorization.` -## HipChat notification - -Sends hipchat chat notifications when a build finish. - -### Configuration options - -- `hipchat.[].jobs` -- `hipchat.[].builds` -- `hipchat.[].token` -- `hipchat.[].notify` - ## InfluxDB notification Writes InfluxDB events when a builds finished. diff --git a/doc/manual/src/projects.md b/doc/manual/src/projects.md index 95174f1b..f7c4975f 100644 --- a/doc/manual/src/projects.md +++ b/doc/manual/src/projects.md @@ -378,13 +378,18 @@ This section describes how it can be implemented for `gitea`, but the approach f analogous: * [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication) -* Add it to your `hydra.conf` like this: +* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information + ``` + + your_username=your_token + + ``` + +* Include the file in your `hydra.conf` like this: ``` nix { services.hydra-dev.extraConfig = '' - - your_username=your_token - + Include /path/to/secret/file ''; } ``` @@ -399,3 +404,10 @@ analogous: | `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* | | `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional | +Content-addressed derivations +----------------------------- + +Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62). +To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake. + +Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point. diff --git a/doc/manual/src/webhooks.md b/doc/manual/src/webhooks.md index 2b26cd61..674e1064 100644 --- a/doc/manual/src/webhooks.md +++ b/doc/manual/src/webhooks.md @@ -1,9 +1,12 @@ # Webhooks -Hydra can be notified by github's webhook to trigger a new evaluation when a +Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a jobset has a github repo in its input. -To set up a github webhook go to `https://github.com///settings` and in the `Webhooks` tab -click on `Add webhook`. + +## GitHub + +To set up a webhook for a GitHub repository go to `https://github.com///settings` +and in the `Webhooks` tab click on `Add webhook`. - In `Payload URL` fill in `https:///api/push-github`. - In `Content type` switch to `application/json`. @@ -11,3 +14,14 @@ click on `Add webhook`. - For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`. Then add the hook with `Add webhook`. + +## Gitea + +To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance +and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down. + +- In `Target URL` fill in `https:///api/push-gitea`. +- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`. +- Change the branch filter to match the git branch hydra builds. + +Then add the hook with `Add webhook`. diff --git a/examples/hello.nix b/examples/hello.nix index 5a5d2585..84707025 100644 --- a/examples/hello.nix +++ b/examples/hello.nix @@ -1,5 +1,5 @@ # -# jobset example file. This file canbe referenced as Nix expression +# jobset example file. This file can be referenced as Nix expression # in a jobset configuration along with inputs for nixpkgs and the # repository containing this file. # diff --git a/flake.lock b/flake.lock index 5c726a24..0ca074f3 100644 --- a/flake.lock +++ b/flake.lock @@ -1,94 +1,59 @@ { "nodes": { - "lowdown-src": { + "nix": { "flake": false, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "newNixpkgs": { - "locked": { - "lastModified": 1647380550, - "narHash": "sha256-909TI9poX7CIUiFx203WL29YON6m/I6k0ExbZvR7bLM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "6e3ee8957637a60f5072e33d78e05c0f65c54366", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable-small", - "repo": "nixpkgs", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs", - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1649172203, - "narHash": "sha256-Q3nYaXqbseDOvZrlePKeIrx0/KzqyrtNpxHIUbtFHuI=", + "lastModified": 1750777360, + "narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=", "owner": "NixOS", "repo": "nix", - "rev": "5fe4fe823c193cbb7bfa05a468de91eeab09058d", + "rev": "7bb200199705eddd53cb34660a76567c6f1295d9", "type": "github" }, "original": { - "id": "nix", - "type": "indirect" + "owner": "NixOS", + "ref": "2.29-maintenance", + "repo": "nix", + "type": "github" + } + }, + "nix-eval-jobs": { + "flake": false, + "locked": { + "lastModified": 1748680938, + "narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=", + "owner": "nix-community", + "repo": "nix-eval-jobs", + "rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-eval-jobs", + "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1645296114, - "narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=", + "lastModified": 1750736827, + "narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1", + "rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec", "type": "github" }, "original": { - "id": "nixpkgs", - "ref": "nixos-21.05-small", - "type": "indirect" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", + "ref": "nixos-25.05-small", "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" - }, - "original": { - "id": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "indirect" } }, "root": { "inputs": { - "newNixpkgs": "newNixpkgs", "nix": "nix", - "nixpkgs": [ - "nix", - "nixpkgs" - ] + "nix-eval-jobs": "nix-eval-jobs", + "nixpkgs": "nixpkgs" } } }, diff --git a/flake.nix b/flake.nix index 01b0c988..e67a3a99 100644 --- a/flake.nix +++ b/flake.nix @@ -1,1023 +1,130 @@ { description = "A Nix-based continuous build system"; - # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes, - # even 2.7.0's Nixpkgs pin). - inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; - inputs.nixpkgs.follows = "nix/nixpkgs"; - #inputs.nix.url = github:NixOS/nix/2.7.0; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small"; - outputs = { self, newNixpkgs, nixpkgs, nix }: + inputs.nix = { + url = "github:NixOS/nix/2.29-maintenance"; + # We want to control the deps precisely + flake = false; + }; + + inputs.nix-eval-jobs = { + url = "github:nix-community/nix-eval-jobs"; + # We want to control the deps precisely + flake = false; + }; + + outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }: let - - version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}"; - - pkgs = import nixpkgs { - system = "x86_64-linux"; - overlays = [ self.overlay nix.overlay ]; - }; - - # NixOS configuration used for VM tests. - hydraServer = - { config, pkgs, ... }: - { - imports = [ self.nixosModules.hydraTest ]; - - virtualisation.memorySize = 1024; - virtualisation.writableStore = true; - - environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ]; - - nix = { - # Without this nix tries to fetch packages from the default - # cache.nixos.org which is not reachable from this sandboxed NixOS test. - binaryCaches = [ ]; - }; - }; - + systems = [ "x86_64-linux" "aarch64-linux" ]; + forEachSystem = nixpkgs.lib.genAttrs systems; in rec { # A Nixpkgs overlay that provides a 'hydra' package. - overlay = final: prev: { - - # Overlay these packages to use dependencies from the Nixpkgs everything - # else uses, to side-step the version difference: glibc is 2.32 in the - # nix-pinned Nixpkgs, but 2.33 in the newNixpkgs commit. - civetweb = (final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }).overrideAttrs - # Can be dropped once newNixpkgs points to a revision containing - # https://github.com/NixOS/nixpkgs/pull/167751 - ({ cmakeFlags ? [ ], ... }: { - cmakeFlags = cmakeFlags ++ [ - "-DCIVETWEB_ENABLE_IPV6=1" - ]; + overlays.default = final: prev: { + nixDependenciesForHydra = final.lib.makeScope final.newScope + (import (nix + "/packaging/dependencies.nix") { + pkgs = final; + inherit (final) stdenv; + inputs = {}; }); - prometheus-cpp = final.callPackage "${newNixpkgs}/pkgs/development/libraries/prometheus-cpp" { }; - - # Add LDAP dependencies that aren't currently found within nixpkgs. - perlPackages = prev.perlPackages // { - TestPostgreSQL = final.perlPackages.buildPerlModule { - pname = "Test-PostgreSQL"; - version = "1.28-1"; - src = final.fetchFromGitHub { - owner = "grahamc"; - repo = "Test-postgresql"; - rev = "release-1.28-1"; - hash = "sha256-SFC1C3q3dbcBos18CYd/s0TIcfJW4g04ld0+XQXVToQ="; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ]; - propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ]; - - makeMakerFlags = "POSTGRES_HOME=${final.postgresql}"; - - meta = { - homepage = "https://github.com/grahamc/Test-postgresql/releases/tag/release-1.28-1"; - description = "PostgreSQL runner for tests"; - license = with final.lib.licenses; [ artistic2 ]; - }; - }; - - FunctionParameters = final.perlPackages.buildPerlPackage { - pname = "Function-Parameters"; - version = "2.001003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/M/MA/MAUKE/Function-Parameters-2.001003.tar.gz"; - sha256 = "eaa22c6b43c02499ec7db0758c2dd218a3b2ab47a714b2bdf8010b5ee113c242"; - }; - buildInputs = with final.perlPackages; [ DirSelf TestFatal ]; - meta = { - description = "Define functions and methods with parameter lists (\"subroutine signatures\")"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CatalystPluginPrometheusTiny = final.perlPackages.buildPerlPackage { - pname = "Catalyst-Plugin-PrometheusTiny"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/S/SY/SYSPETE/Catalyst-Plugin-PrometheusTiny-0.005.tar.gz"; - sha256 = "a42ef09efdc3053899ae007c41220d3ed7207582cc86e491b4f534539c992c5a"; - }; - buildInputs = with final.perlPackages; [ HTTPMessage Plack SubOverride TestDeep ]; - propagatedBuildInputs = with final.perlPackages; [ CatalystRuntime Moose PrometheusTiny PrometheusTinyShared ]; - meta = { - description = "Prometheus metrics for Catalyst"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CryptArgon2 = final.perlPackages.buildPerlModule { - pname = "Crypt-Argon2"; - version = "0.010"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Argon2-0.010.tar.gz"; - sha256 = "3ea1c006f10ef66fd417e502a569df15c4cc1c776b084e35639751c41ce6671a"; - }; - nativeBuildInputs = [ pkgs.ld-is-cc-hook ]; - meta = { - description = "Perl interface to the Argon2 key derivation functions"; - license = final.lib.licenses.cc0; - }; - }; - - CryptPassphrase = final.perlPackages.buildPerlPackage { - pname = "Crypt-Passphrase"; - version = "0.003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-0.003.tar.gz"; - sha256 = "685aa090f8179a86d6896212ccf8ccfde7a79cce857199bb14e2277a10d240ad"; - }; - meta = { - description = "A module for managing passwords in a cryptographically agile manner"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - CryptPassphraseArgon2 = final.perlPackages.buildPerlPackage { - pname = "Crypt-Passphrase-Argon2"; - version = "0.002"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-Argon2-0.002.tar.gz"; - sha256 = "3906ff81697d13804ee21bd5ab78ffb1c4408b4822ce020e92ecf4737ba1f3a8"; - }; - propagatedBuildInputs = with final.perlPackages; [ CryptArgon2 CryptPassphrase ]; - meta = { - description = "An Argon2 encoder for Crypt::Passphrase"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - DataRandom = final.perlPackages.buildPerlPackage { - pname = "Data-Random"; - version = "0.13"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/B/BA/BAREFOOT/Data-Random-0.13.tar.gz"; - sha256 = "eb590184a8db28a7e49eab09e25f8650c33f1f668b6a472829de74a53256bfc0"; - }; - buildInputs = with final.perlPackages; [ FileShareDirInstall TestMockTime ]; - meta = { - description = "Perl module to generate random data"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - DirSelf = final.perlPackages.buildPerlPackage { - pname = "Dir-Self"; - version = "0.11"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/M/MA/MAUKE/Dir-Self-0.11.tar.gz"; - sha256 = "e251a51abc7d9ba3e708f73c2aa208e09d47a0c528d6254710fa78cc8d6885b5"; - }; - meta = { - homepage = "https://github.com/mauke/Dir-Self"; - description = "A __DIR__ constant for the directory your source file is in"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - HashSharedMem = final.perlPackages.buildPerlModule { - pname = "Hash-SharedMem"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/Z/ZE/ZEFRAM/Hash-SharedMem-0.005.tar.gz"; - sha256 = "324776808602f7bdc44adaa937895365454029a926fa611f321c9bf6b940bb5e"; - }; - buildInputs = with final.perlPackages; [ ScalarString ]; - meta = { - description = "Efficient shared mutable hash"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - PrometheusTiny = final.perlPackages.buildPerlPackage { - pname = "Prometheus-Tiny"; - version = "0.007"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz"; - sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec"; - }; - buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ]; - meta = { - homepage = "https://github.com/robn/Prometheus-Tiny"; - description = "A tiny Prometheus client"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - PrometheusTinyShared = final.perlPackages.buildPerlPackage { - pname = "Prometheus-Tiny-Shared"; - version = "0.023"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-Shared-0.023.tar.gz"; - sha256 = "7c2c72397be5d8e4839d1bf4033c1800f467f2509689673c6419df48794f2abe"; - }; - buildInputs = with final.perlPackages; [ DataRandom HTTPMessage Plack TestDifferences TestException ]; - propagatedBuildInputs = with final.perlPackages; [ HashSharedMem JSONXS PrometheusTiny ]; - meta = { - homepage = "https://github.com/robn/Prometheus-Tiny-Shared"; - description = "A tiny Prometheus client with a shared database behind it"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - ReadonlyX = final.perlPackages.buildPerlModule { - pname = "ReadonlyX"; - version = "1.04"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/S/SA/SANKO/ReadonlyX-1.04.tar.gz"; - sha256 = "81bb97dba93ac6b5ccbce04a42c3590eb04557d75018773ee18d5a30fcf48188"; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny TestFatal ]; - meta = { - homepage = "https://github.com/sanko/readonly"; - description = "Faster facility for creating read-only scalars, arrays, hashes"; - license = final.lib.licenses.artistic2; - }; - }; - - TieHashMethod = final.perlPackages.buildPerlPackage { - pname = "Tie-Hash-Method"; - version = "0.02"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/Y/YV/YVES/Tie-Hash-Method-0.02.tar.gz"; - sha256 = "d513fbb51413f7ca1e64a1bdce6194df7ec6076dea55066d67b950191eec32a9"; - }; - meta = { - description = "Tied hash with specific methods overriden by callbacks"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - Test2Harness = final.perlPackages.buildPerlPackage { - pname = "Test2-Harness"; - version = "1.000042"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Harness-1.000042.tar.gz"; - sha256 = "aaf231a68af1a6ffd6a11188875fcf572e373e43c8285945227b9d687b43db2d"; - }; - - checkPhase = '' - patchShebangs ./t ./scripts/yath - ./scripts/yath test -j $NIX_BUILD_CORES - ''; - - propagatedBuildInputs = with final.perlPackages; [ DataUUID Importer LongJump ScopeGuard TermTable Test2PluginMemUsage Test2PluginUUID Test2Suite gotofile ]; - meta = { - description = "A new and improved test harness with better Test2 integration"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - Test2PluginMemUsage = prev.perlPackages.buildPerlPackage { - pname = "Test2-Plugin-MemUsage"; - version = "0.002003"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-MemUsage-0.002003.tar.gz"; - sha256 = "5e0662d5a823ae081641f5ce82843111eec1831cd31f883a6c6de54afdf87c25"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Collect and display memory usage information"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - Test2PluginUUID = prev.perlPackages.buildPerlPackage { - pname = "Test2-Plugin-UUID"; - version = "0.002001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-UUID-0.002001.tar.gz"; - sha256 = "4c6c8d484d7153d8779dc155a992b203095b5c5aa1cfb1ee8bcedcd0601878c9"; - }; - buildInputs = with final.perlPackages;[ Test2Suite ]; - propagatedBuildInputs = with final.perlPackages; [ DataUUID ]; - meta = { - description = "Use REAL UUIDs in Test2"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - LongJump = final.perlPackages.buildPerlPackage { - pname = "Long-Jump"; - version = "0.000001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/Long-Jump-0.000001.tar.gz"; - sha256 = "d5d6456d86992b559d8f66fc90960f919292cd3803c13403faac575762c77af4"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Mechanism for returning to a specific point from a deeply nested stack"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - gotofile = final.perlPackages.buildPerlPackage { - pname = "goto-file"; - version = "0.005"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/E/EX/EXODIST/goto-file-0.005.tar.gz"; - sha256 = "c6cdd5ee4a6cdcbdbf314d92a4f9985dbcdf9e4258048cae76125c052aa31f77"; - }; - buildInputs = with final.perlPackages; [ Test2Suite ]; - meta = { - description = "Stop parsing the current file and move on to a different one"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - NetLDAPServer = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-Server"; - version = "0.43"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/A/AA/AAR/Net-LDAP-Server-0.43.tar.gz"; - sha256 = "0qmh3cri3fpccmwz6bhwp78yskrb3qmalzvqn0a23hqbsfs4qv6x"; - }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP ConvertASN1 ]; - meta = { - description = "LDAP server side protocol handling"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - NetLDAPSID = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-SID"; - version = "0.0001"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-SID-0.001.tar.gz"; - sha256 = "1mnnpkmj8kpb7qw50sm8h4sd8py37ssy2xi5hhxzr5whcx0cvhm8"; - }; - meta = { - description = "Active Directory Security Identifier manipulation"; - license = with final.lib.licenses; [ artistic2 ]; - }; - }; - - NetLDAPServerTest = prev.perlPackages.buildPerlPackage { - pname = "Net-LDAP-Server-Test"; - version = "0.22"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-Server-Test-0.22.tar.gz"; - sha256 = "13idip7jky92v4adw60jn2gcc3zf339gsdqlnc9nnvqzbxxp285i"; - }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP NetLDAPServer TestMore DataDump NetLDAPSID ]; - meta = { - description = "test Net::LDAP code"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - CatalystAuthenticationStoreLDAP = prev.perlPackages.buildPerlPackage { - pname = "Catalyst-Authentication-Store-LDAP"; - version = "1.016"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/I/IL/ILMARI/Catalyst-Authentication-Store-LDAP-1.016.tar.gz"; - sha256 = "0cm399vxqqf05cjgs1j5v3sk4qc6nmws5nfhf52qvpbwc4m82mq8"; - }; - propagatedBuildInputs = with final.perlPackages; [ NetLDAP CatalystPluginAuthentication ClassAccessorFast ]; - buildInputs = with final.perlPackages; [ TestMore TestMockObject TestException NetLDAPServerTest ]; - meta = { - description = "Authentication from an LDAP Directory"; - license = with final.lib.licenses; [ artistic1 ]; - }; - }; - - PerlCriticCommunity = prev.perlPackages.buildPerlModule { - pname = "Perl-Critic-Community"; - version = "1.0.0"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/D/DB/DBOOK/Perl-Critic-Community-v1.0.0.tar.gz"; - sha256 = "311b775da4193e9de94cf5225e993cc54dd096ae1e7ef60738cdae1d9b8854e7"; - }; - buildInputs = with final.perlPackages; [ ModuleBuildTiny ]; - propagatedBuildInputs = with final.perlPackages; [ PPI PathTiny PerlCritic PerlCriticPolicyVariablesProhibitLoopOnHash PerlCriticPulp ]; - meta = { - homepage = "https://github.com/Grinnz/Perl-Critic-Freenode"; - description = "Community-inspired Perl::Critic policies"; - license = final.lib.licenses.artistic2; - }; - }; - - PerlCriticPolicyVariablesProhibitLoopOnHash = prev.perlPackages.buildPerlPackage { - pname = "Perl-Critic-Policy-Variables-ProhibitLoopOnHash"; - version = "0.008"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/X/XS/XSAWYERX/Perl-Critic-Policy-Variables-ProhibitLoopOnHash-0.008.tar.gz"; - sha256 = "12f5f0be96ea1bdc7828058577bd1c5c63ca23c17fac9c3709452b3dff5b84e0"; - }; - propagatedBuildInputs = with final.perlPackages; [ PerlCritic ]; - meta = { - description = "Don't write loops on hashes, only on keys and values of hashes"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - PerlCriticPulp = prev.perlPackages.buildPerlPackage { - pname = "Perl-Critic-Pulp"; - version = "99"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KR/KRYDE/Perl-Critic-Pulp-99.tar.gz"; - sha256 = "b8fda842fcbed74d210257c0a284b6dc7b1d0554a47a3de5d97e7d542e23e7fe"; - }; - propagatedBuildInputs = with final.perlPackages; [ IOString ListMoreUtils PPI PerlCritic PodMinimumVersion ]; - meta = { - homepage = "http://user42.tuxfamily.org/perl-critic-pulp/index.html"; - description = "Some add-on policies for Perl::Critic"; - license = final.lib.licenses.gpl3Plus; - }; - }; - - PodMinimumVersion = prev.perlPackages.buildPerlPackage { - pname = "Pod-MinimumVersion"; - version = "50"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/K/KR/KRYDE/Pod-MinimumVersion-50.tar.gz"; - sha256 = "0bd2812d9aacbd99bb71fa103a4bb129e955c138ba7598734207dc9fb67b5a6f"; - }; - propagatedBuildInputs = with final.perlPackages; [ IOString PodParser ]; - meta = { - homepage = "http://user42.tuxfamily.org/pod-minimumversion/index.html"; - description = "Determine minimum Perl version of POD directives"; - license = final.lib.licenses.free; - }; - }; - - StringCompareConstantTime = final.perlPackages.buildPerlPackage { - pname = "String-Compare-ConstantTime"; - version = "0.321"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/F/FR/FRACTAL/String-Compare-ConstantTime-0.321.tar.gz"; - sha256 = "0b26ba2b121d8004425d4485d1d46f59001c83763aa26624dff6220d7735d7f7"; - }; - meta = { - description = "Timing side-channel protected string compare"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - - UUID4Tiny = final.perlPackages.buildPerlPackage { - pname = "UUID4-Tiny"; - version = "0.002"; - src = final.fetchurl { - url = "mirror://cpan/authors/id/C/CV/CVLIBRARY/UUID4-Tiny-0.002.tar.gz"; - sha256 = "e7535b31e386d432dec7adde214348389e1d5cf753e7ed07f1ae04c4360840cf"; - }; - meta = { - description = "Cryptographically secure v4 UUIDs for Linux x64"; - license = with final.lib.licenses; [ artistic1 gpl1Plus ]; - }; - }; - + nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope + (import (nix + "/packaging/components.nix") { + officialRelease = true; + inherit (final) lib; + pkgs = final; + src = nix; + maintainers = [ ]; + }); + nix-eval-jobs = final.callPackage nix-eval-jobs { + nixComponents = final.nixComponentsForHydra; }; - - hydra = with final; let - perlDeps = buildEnv { - name = "hydra-perl-deps"; - paths = with perlPackages; lib.closePropagation - [ - AuthenSASL - CatalystActionREST - CatalystAuthenticationStoreDBIxClass - CatalystAuthenticationStoreLDAP - CatalystDevel - CatalystPluginAccessLog - CatalystPluginAuthorizationRoles - CatalystPluginCaptcha - CatalystPluginPrometheusTiny - CatalystPluginSessionStateCookie - CatalystPluginSessionStoreFastMmap - CatalystPluginStackTrace - CatalystPluginUnicodeEncoding - CatalystTraitForRequestProxyBase - CatalystViewDownload - CatalystViewJSON - CatalystViewTT - CatalystXRoleApplicator - CatalystXScriptServerStarman - CryptPassphrase - CryptPassphraseArgon2 - CryptRandPasswd - DataDump - DateTime - DBDPg - DBDSQLite - DigestSHA1 - EmailMIME - EmailSender - FileSlurper - FileWhich - final.nix.perl-bindings - git - IOCompress - IPCRun - IPCRun3 - JSON - JSONMaybeXS - JSONXS - ListSomeUtils - LWP - LWPProtocolHttps - ModulePluggable - NetAmazonS3 - NetPrometheus - NetStatsd - PadWalker - ParallelForkManager - PerlCriticCommunity - PrometheusTinyShared - ReadonlyX - SetScalar - SQLSplitStatement - Starman - StringCompareConstantTime - SysHostnameLong - TermSizeAny - TermReadKey - Test2Harness - TestMore - TestPostgreSQL - TextDiff - TextTable - UUID4Tiny - YAML - XMLSimple - ]; - }; - - in - stdenv.mkDerivation { - - name = "hydra-${version}"; - - src = self; - - buildInputs = - [ - makeWrapper - autoconf - automake - libtool - unzip - nukeReferences - pkgconfig - libpqxx - gitAndTools.topGit - mercurial - darcs - subversion - breezy - openssl - bzip2 - libxslt - final.nix - perlDeps - perl - mdbook - pixz - boost - postgresql_13 - (if lib.versionAtLeast lib.version "20.03pre" - then nlohmann_json - else nlohmann_json.override { multipleHeaders = true; }) - prometheus-cpp - ]; - - checkInputs = [ - cacert - # FIXME: foreman is broken on all nix/nixpkgs pin, up to and - # including 2.7.0 - newNixpkgs.legacyPackages.${final.system}.foreman - glibcLocales - libressl.nc - openldap - python3 - ]; - - hydraPath = lib.makeBinPath ( - [ - subversion - openssh - final.nix - coreutils - findutils - pixz - gzip - bzip2 - lzma - gnutar - unzip - git - gitAndTools.topGit - mercurial - darcs - gnused - breezy - ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] - ); - - OPENLDAP_ROOT = openldap; - - shellHook = '' - pushd $(git rev-parse --show-toplevel) >/dev/null - - PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH - PERL5LIB=$(pwd)/src/lib:$PERL5LIB - export HYDRA_HOME="$(pwd)/src/" - mkdir -p .hydra-data - export HYDRA_DATA="$(pwd)/.hydra-data" - export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444' - - popd >/dev/null - ''; - - preConfigure = "autoreconf -vfi"; - - NIX_LDFLAGS = [ "-lpthread" ]; - - enableParallelBuilding = true; - - doCheck = true; - - preCheck = '' - patchShebangs . - export LOGNAME=''${LOGNAME:-foo} - # set $HOME for bzr so it can create its trace file - export HOME=$(mktemp -d) - ''; - - postInstall = '' - mkdir -p $out/nix-support - - for i in $out/bin/*; do - read -n 4 chars < $i - if [[ $chars =~ ELF ]]; then continue; fi - wrapProgram $i \ - --prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \ - --prefix PATH ':' $out/bin:$hydraPath \ - --set HYDRA_RELEASE ${version} \ - --set HYDRA_HOME $out/libexec/hydra \ - --set NIX_RELEASE ${final.nix.name or "unknown"} - done - ''; - - dontStrip = true; - - meta.description = "Build of Hydra on ${system}"; - passthru = { inherit perlDeps; inherit (final) nix; }; + hydra = final.callPackage ./package.nix { + inherit (final.lib) fileset; + rawSrc = self; + nixComponents = final.nixComponentsForHydra; }; }; hydraJobs = { + build = forEachSystem (system: packages.${system}.hydra); - build.x86_64-linux = packages.x86_64-linux.hydra; + buildNoTests = forEachSystem (system: + packages.${system}.hydra.overrideAttrs (_: { + doCheck = false; + }) + ); - manual = - pkgs.runCommand "hydra-manual-${version}" { } + manual = forEachSystem (system: let + pkgs = nixpkgs.legacyPackages.${system}; + hydra = self.packages.${pkgs.hostPlatform.system}.hydra; + in + pkgs.runCommand "hydra-manual-${hydra.version}" { } '' mkdir -p $out/share - cp -prvd ${pkgs.hydra}/share/doc $out/share/ + cp -prvd ${hydra.doc}/share/doc $out/share/ mkdir $out/nix-support echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products - ''; + ''); - tests.install.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; - simpleTest { - machine = hydraServer; - testScript = - '' - machine.wait_for_job("hydra-init") - machine.wait_for_job("hydra-server") - machine.wait_for_job("hydra-evaluator") - machine.wait_for_job("hydra-queue-runner") - machine.wait_for_open_port("3000") - machine.succeed("curl --fail http://localhost:3000/") - ''; - }; - - tests.notifications.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; - simpleTest { - machine = { pkgs, ... }: { - imports = [ hydraServer ]; - services.hydra-dev.extraConfig = '' - - url = http://127.0.0.1:8086 - db = hydra - - ''; - services.influxdb.enable = true; - }; - testScript = '' - machine.wait_for_job("hydra-init") - - # Create an admin account and some other state. - machine.succeed( - """ - su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin" - mkdir /run/jobset - chmod 755 /run/jobset - cp ${./t/jobs/api-test.nix} /run/jobset/default.nix - chmod 644 /run/jobset/default.nix - chown -R hydra /run/jobset - """ - ) - - # Wait until InfluxDB can receive web requests - machine.wait_for_job("influxdb") - machine.wait_for_open_port("8086") - - # Create an InfluxDB database where hydra will write to - machine.succeed( - "curl -XPOST 'http://127.0.0.1:8086/query' " - + "--data-urlencode 'q=CREATE DATABASE hydra'" - ) - - # Wait until hydra-server can receive HTTP requests - machine.wait_for_job("hydra-server") - machine.wait_for_open_port("3000") - - # Setup the project and jobset - machine.succeed( - "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2" - ) - - # Wait until hydra has build the job and - # the InfluxDBNotification plugin uploaded its notification to InfluxDB - machine.wait_until_succeeds( - "curl -s -H 'Accept: application/csv' " - + "-G 'http://127.0.0.1:8086/query?db=hydra' " - + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success" - ) - ''; - }; - - tests.gitea.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; - makeTest { - machine = { pkgs, ... }: { - imports = [ hydraServer ]; - services.hydra-dev.extraConfig = '' - - root=d7f16a3412e01a43a414535b16007c6931d3a9c7 - - ''; - nix = { - distributedBuilds = true; - buildMachines = [{ - hostName = "localhost"; - systems = [ "x86_64-linux" ]; - }]; - binaryCaches = [ ]; - }; - services.gitea = { - enable = true; - database.type = "postgres"; - disableRegistration = true; - httpPort = 3001; - }; - services.openssh.enable = true; - environment.systemPackages = with pkgs; [ gitea git jq gawk ]; - networking.firewall.allowedTCPPorts = [ 3000 ]; - }; - skipLint = true; - testScript = - let - scripts.mktoken = pkgs.writeText "token.sql" '' - INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7'); - ''; - - scripts.git-setup = pkgs.writeShellScript "setup.sh" '' - set -x - mkdir -p /tmp/repo $HOME/.ssh - cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk - chmod 0400 $HOME/.ssh/privk - git -C /tmp/repo init - cp ${smallDrv} /tmp/repo/jobset.nix - git -C /tmp/repo add . - git config --global user.email test@localhost - git config --global user.name test - git -C /tmp/repo commit -m 'Initial import' - git -C /tmp/repo remote add origin gitea@machine:root/repo - GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \ - git -C /tmp/repo push origin master - git -C /tmp/repo log >&2 - ''; - - scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" '' - set -x - su -l hydra -c "hydra-create-user root --email-address \ - 'alice@example.org' --password foobar --role admin" - - URL=http://localhost:3000 - USERNAME="root" - PASSWORD="foobar" - PROJECT_NAME="trivial" - JOBSET_NAME="trivial" - mycurl() { - curl --referer $URL -H "Accept: application/json" \ - -H "Content-Type: application/json" $@ - } - - cat >data.json <data.json <data.json < $out; exit 0"]; - }; - } - ''; - in - '' - import json - - machine.start() - machine.wait_for_unit("multi-user.target") - machine.wait_for_open_port(3000) - machine.wait_for_open_port(3001) - - machine.succeed( - "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create " - + "--username root --password root --email test@localhost'" - ) - machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'") - - machine.succeed( - "curl --fail -X POST http://localhost:3001/api/v1/user/repos " - + "-H 'Accept: application/json' -H 'Content-Type: application/json' " - + f"-H 'Authorization: token ${api_token}'" - + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\''' - ) - - machine.succeed( - "curl --fail -X POST http://localhost:3001/api/v1/user/keys " - + "-H 'Accept: application/json' -H 'Content-Type: application/json' " - + f"-H 'Authorization: token ${api_token}'" - + ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\''' - ) - - machine.succeed( - "${scripts.git-setup}" - ) - - machine.succeed( - "${scripts.hydra-setup}" - ) - - machine.wait_until_succeeds( - 'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" ' - + '| jq .buildstatus | xargs test 0 -eq' - ) - - data = machine.succeed( - 'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" ' - + "-H 'Accept: application/json' -H 'Content-Type: application/json' " - + f"-H 'Authorization: token ${api_token}'" - ) - - response = json.loads(data) - - assert len(response) == 2, "Expected exactly two status updates for latest commit!" - assert response[0]['status'] == "success", "Expected latest status to be success!" - assert response[1]['status'] == "pending", "Expected first status to be pending!" - - machine.shutdown() - ''; - }; - - tests.validate-openapi = pkgs.runCommand "validate-openapi" - { buildInputs = [ pkgs.openapi-generator-cli ]; } - '' - openapi-generator-cli validate -i ${./hydra-api.yaml} - touch $out - ''; + tests = import ./nixos-tests.nix { + inherit forEachSystem nixpkgs nixosModules; + }; container = nixosConfigurations.container.config.system.build.toplevel; }; - checks.x86_64-linux.build = hydraJobs.build.x86_64-linux; - checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux; - checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi; + checks = forEachSystem (system: { + build = hydraJobs.build.${system}; + install = hydraJobs.tests.install.${system}; + validate-openapi = hydraJobs.tests.validate-openapi.${system}; + }); - packages.x86_64-linux.hydra = pkgs.hydra; - defaultPackage.x86_64-linux = pkgs.hydra; - - nixosModules.hydra = { - imports = [ ./hydra-module.nix ]; - nixpkgs.overlays = [ self.overlay nix.overlay ]; - }; - - nixosModules.hydraTest = { - imports = [ self.nixosModules.hydra ]; - - services.hydra-dev.enable = true; - services.hydra-dev.hydraURL = "http://hydra.example.org"; - services.hydra-dev.notificationSender = "admin@hydra.example.org"; - - systemd.services.hydra-send-stats.enable = false; - - services.postgresql.enable = true; - services.postgresql.package = pkgs.postgresql_11; - - # The following is to work around the following error from hydra-server: - # [error] Caught exception in engine "Cannot determine local time zone" - time.timeZone = "UTC"; - - nix.extraOptions = '' - allowed-uris = https://github.com/ - ''; - }; - - nixosModules.hydraProxy = { - services.httpd = { - enable = true; - adminAddr = "hydra-admin@example.org"; - extraConfig = '' - - Order deny,allow - Allow from all - - - ProxyRequests Off - ProxyPreserveHost On - ProxyPass /apache-errors ! - ErrorDocument 503 /apache-errors/503.html - ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on - ProxyPassReverse / http://127.0.0.1:3000/ - ''; + packages = forEachSystem (system: let + inherit (nixpkgs) lib; + pkgs = nixpkgs.legacyPackages.${system}; + nixDependencies = lib.makeScope pkgs.newScope + (import (nix + "/packaging/dependencies.nix") { + inherit pkgs; + inherit (pkgs) stdenv; + inputs = {}; + }); + nixComponents = lib.makeScope nixDependencies.newScope + (import (nix + "/packaging/components.nix") { + officialRelease = true; + inherit lib pkgs; + src = nix; + maintainers = [ ]; + }); + in { + nix-eval-jobs = pkgs.callPackage nix-eval-jobs { + inherit nixComponents; }; + hydra = pkgs.callPackage ./package.nix { + inherit (nixpkgs.lib) fileset; + inherit nixComponents; + inherit (self.packages.${system}) nix-eval-jobs; + rawSrc = self; + }; + default = self.packages.${system}.hydra; + }); + + nixosModules = import ./nixos-modules { + inherit self; }; nixosConfigurations.container = nixpkgs.lib.nixosSystem { system = "x86_64-linux"; modules = [ + self.nixosModules.hydra self.nixosModules.hydraTest self.nixosModules.hydraProxy { - system.configurationRevision = self.rev; + system.configurationRevision = self.lastModifiedDate; boot.isContainer = true; networking.useDHCP = false; diff --git a/hydra-api.yaml b/hydra-api.yaml index ce7e0f9a..a2fdea28 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -70,7 +70,7 @@ paths: $ref: '#/components/examples/projects-success' /api/push: - put: + post: summary: trigger jobsets parameters: - in: query @@ -533,13 +533,13 @@ paths: schema: $ref: '#/components/schemas/Error' - /eval/{build-id}: + /eval/{eval-id}: get: - summary: Retrieves evaluations identified by build id + summary: Retrieves evaluations identified by eval id parameters: - - name: build-id + - name: eval-id in: path - description: build identifier + description: eval identifier required: true schema: type: integer @@ -551,6 +551,24 @@ paths: schema: $ref: '#/components/schemas/JobsetEval' + /eval/{eval-id}/builds: + get: + summary: Retrieves all builds belonging to an evaluation identified by eval id + parameters: + - name: eval-id + in: path + description: eval identifier + required: true + schema: + type: integer + responses: + '200': + description: builds + content: + application/json: + schema: + $ref: '#/components/schemas/JobsetEvalBuilds' + components: schemas: @@ -796,6 +814,13 @@ components: additionalProperties: $ref: '#/components/schemas/JobsetEvalInput' + JobsetEvalBuilds: + type: array + items: + type: object + additionalProperties: + $ref: '#/components/schemas/Build' + JobsetOverview: type: array items: @@ -870,7 +895,7 @@ components: description: Size of the produced file type: integer defaultpath: - description: This is a Git/Mercurial commit hash or a Subversion revision number + description: if path is a directory, the default file relative to path to be served type: string 'type': description: Types of build product (user defined) diff --git a/meson.build b/meson.build new file mode 100644 index 00000000..c1eb577b --- /dev/null +++ b/meson.build @@ -0,0 +1,26 @@ +project('hydra', 'cpp', + version: files('version.txt'), + license: 'GPL-3.0', + default_options: [ + 'debug=true', + 'optimization=2', + 'cpp_std=c++20', + ], +) + +nix_util_dep = dependency('nix-util', required: true) +nix_store_dep = dependency('nix-store', required: true) +nix_main_dep = dependency('nix-main', required: true) + +pqxx_dep = dependency('libpqxx', required: true) + +prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true) +prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true) + +mdbook = find_program('mdbook', native: true) +perl = find_program('perl', native: true) + +subdir('doc/manual') +subdir('nixos-modules') +subdir('src') +subdir('t') diff --git a/nixos-modules/default.nix b/nixos-modules/default.nix new file mode 100644 index 00000000..d12d8338 --- /dev/null +++ b/nixos-modules/default.nix @@ -0,0 +1,47 @@ +{ self }: + +{ + hydra = { pkgs, lib,... }: { + _file = ./default.nix; + imports = [ ./hydra.nix ]; + services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra; + }; + + hydraTest = { pkgs, ... }: { + services.hydra-dev.enable = true; + services.hydra-dev.hydraURL = "http://hydra.example.org"; + services.hydra-dev.notificationSender = "admin@hydra.example.org"; + + systemd.services.hydra-send-stats.enable = false; + + services.postgresql.enable = true; + + # The following is to work around the following error from hydra-server: + # [error] Caught exception in engine "Cannot determine local time zone" + time.timeZone = "UTC"; + + nix.extraOptions = '' + allowed-uris = https://github.com/ + ''; + }; + + hydraProxy = { + services.httpd = { + enable = true; + adminAddr = "hydra-admin@example.org"; + extraConfig = '' + + Order deny,allow + Allow from all + + + ProxyRequests Off + ProxyPreserveHost On + ProxyPass /apache-errors ! + ErrorDocument 503 /apache-errors/503.html + ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on + ProxyPassReverse / http://127.0.0.1:3000/ + ''; + }; + }; +} diff --git a/hydra-module.nix b/nixos-modules/hydra.nix similarity index 93% rename from hydra-module.nix rename to nixos-modules/hydra.nix index 0df5e690..83ffeec4 100644 --- a/hydra-module.nix +++ b/nixos-modules/hydra.nix @@ -68,8 +68,6 @@ in package = mkOption { type = types.path; - default = pkgs.hydra; - defaultText = literalExpression "pkgs.hydra"; description = "The Hydra package."; }; @@ -228,7 +226,11 @@ in useDefaultShell = true; }; - nix.trustedUsers = [ "hydra-queue-runner" ]; + nix.settings = { + trusted-users = [ "hydra-queue-runner" ]; + keep-outputs = true; + keep-derivations = true; + }; services.hydra-dev.extraConfig = '' @@ -256,11 +258,6 @@ in environment.variables = hydraEnv; - nix.extraOptions = '' - gc-keep-outputs = true - gc-keep-derivations = true - ''; - systemd.services.hydra-init = { wantedBy = [ "multi-user.target" ]; requires = optional haveLocalDB "postgresql.service"; @@ -268,17 +265,17 @@ in environment = env // { HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init"; }; - path = [ pkgs.utillinux ]; + path = [ pkgs.util-linux ]; preStart = '' ln -sf ${hydraConf} ${baseDir}/hydra.conf mkdir -m 0700 -p ${baseDir}/www - chown hydra-www.hydra ${baseDir}/www + chown hydra-www:hydra ${baseDir}/www mkdir -m 0700 -p ${baseDir}/queue-runner mkdir -m 0750 -p ${baseDir}/build-logs mkdir -m 0750 -p ${baseDir}/runcommand-logs - chown hydra-queue-runner.hydra \ + chown hydra-queue-runner:hydra \ ${baseDir}/queue-runner \ ${baseDir}/build-logs \ ${baseDir}/runcommand-logs @@ -309,7 +306,7 @@ in rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots fi - chown hydra.hydra ${cfg.gcRootsDir} + chown hydra:hydra ${cfg.gcRootsDir} chmod 2775 ${cfg.gcRootsDir} ''; serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init"; @@ -341,8 +338,9 @@ in systemd.services.hydra-queue-runner = { wantedBy = [ "multi-user.target" ]; requires = [ "hydra-init.service" ]; - after = [ "hydra-init.service" "network.target" ]; - path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ]; + wants = [ "network-online.target" ]; + after = [ "hydra-init.service" "network.target" "network-online.target" ]; + path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ]; restartTriggers = [ hydraConf ]; environment = env // { PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr @@ -366,7 +364,7 @@ in requires = [ "hydra-init.service" ]; restartTriggers = [ hydraConf ]; after = [ "hydra-init.service" "network.target" ]; - path = with pkgs; [ nettools cfg.package jq ]; + path = with pkgs; [ hostname-debian cfg.package jq ]; environment = env // { HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator"; }; @@ -409,6 +407,7 @@ in requires = [ "hydra-init.service" ]; after = [ "hydra-init.service" ]; restartTriggers = [ hydraConf ]; + path = [ pkgs.zstd ]; environment = env // { PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify"; @@ -459,10 +458,17 @@ in # logs automatically after a step finishes, but this doesn't work # if the queue runner is stopped prematurely. systemd.services.hydra-compress-logs = - { path = [ pkgs.bzip2 ]; + { path = [ pkgs.bzip2 pkgs.zstd ]; script = '' - find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f + set -eou pipefail + compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf) + if [[ $compression == "" ]]; then + compression="bzip2" + elif [[ $compression == zstd ]]; then + compression="zstd --rm" + fi + find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet ''; startAt = "Sun 01:45"; }; diff --git a/nixos-modules/meson.build b/nixos-modules/meson.build new file mode 100644 index 00000000..95c47e9f --- /dev/null +++ b/nixos-modules/meson.build @@ -0,0 +1,4 @@ +install_data('hydra.nix', + install_dir: get_option('datadir') / 'nix', + rename: ['hydra-module.nix'], +) diff --git a/nixos-tests.nix b/nixos-tests.nix new file mode 100644 index 00000000..c70a3cd1 --- /dev/null +++ b/nixos-tests.nix @@ -0,0 +1,306 @@ +{ forEachSystem, nixpkgs, nixosModules }: + +let + # NixOS configuration used for VM tests. + hydraServer = + { pkgs, ... }: + { + imports = [ + nixosModules.hydra + nixosModules.hydraTest + ]; + + virtualisation.memorySize = 1024; + virtualisation.writableStore = true; + + environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ]; + + nix = { + # Without this nix tries to fetch packages from the default + # cache.nixos.org which is not reachable from this sandboxed NixOS test. + settings.substituters = [ ]; + }; + }; + +in + +{ + + install = forEachSystem (system: + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest { + name = "hydra-install"; + nodes.machine = hydraServer; + testScript = + '' + machine.wait_for_job("hydra-init") + machine.wait_for_job("hydra-server") + machine.wait_for_job("hydra-evaluator") + machine.wait_for_job("hydra-queue-runner") + machine.wait_for_open_port(3000) + machine.succeed("curl --fail http://localhost:3000/") + ''; + }); + + notifications = forEachSystem (system: + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest { + name = "hydra-notifications"; + nodes.machine = { + imports = [ hydraServer ]; + services.hydra-dev.extraConfig = '' + + url = http://127.0.0.1:8086 + db = hydra + + ''; + services.influxdb.enable = true; + }; + testScript = { nodes, ... }: '' + machine.wait_for_job("hydra-init") + + # Create an admin account and some other state. + machine.succeed( + """ + su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin" + mkdir /run/jobset + chmod 755 /run/jobset + cp ${./t/jobs/api-test.nix} /run/jobset/default.nix + chmod 644 /run/jobset/default.nix + chown -R hydra /run/jobset + """ + ) + + # Wait until InfluxDB can receive web requests + machine.wait_for_job("influxdb") + machine.wait_for_open_port(8086) + + # Create an InfluxDB database where hydra will write to + machine.succeed( + "curl -XPOST 'http://127.0.0.1:8086/query' " + + "--data-urlencode 'q=CREATE DATABASE hydra'" + ) + + # Wait until hydra-server can receive HTTP requests + machine.wait_for_job("hydra-server") + machine.wait_for_open_port(3000) + + # Setup the project and jobset + machine.succeed( + "su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2" + ) + + # Wait until hydra has build the job and + # the InfluxDBNotification plugin uploaded its notification to InfluxDB + machine.wait_until_succeeds( + "curl -s -H 'Accept: application/csv' " + + "-G 'http://127.0.0.1:8086/query?db=hydra' " + + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success" + ) + ''; + }); + + gitea = forEachSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in + (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest { + name = "hydra-gitea"; + nodes.machine = { pkgs, ... }: { + imports = [ hydraServer ]; + services.hydra-dev.extraConfig = '' + + root=d7f16a3412e01a43a414535b16007c6931d3a9c7 + + ''; + nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ]; + nix = { + settings.substituters = [ ]; + }; + services.gitea = { + enable = true; + database.type = "postgres"; + settings = { + service.DISABLE_REGISTRATION = true; + server.HTTP_PORT = 3001; + }; + }; + services.openssh.enable = true; + environment.systemPackages = with pkgs; [ gitea git jq gawk ]; + networking.firewall.allowedTCPPorts = [ 3000 ]; + }; + skipLint = true; + testScript = + let + scripts.mktoken = pkgs.writeText "token.sql" '' + INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all'); + ''; + + scripts.git-setup = pkgs.writeShellScript "setup.sh" '' + set -x + mkdir -p /tmp/repo $HOME/.ssh + cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk + chmod 0400 $HOME/.ssh/privk + git -C /tmp/repo init + cp ${smallDrv} /tmp/repo/jobset.nix + git -C /tmp/repo add . + git config --global user.email test@localhost + git config --global user.name test + git -C /tmp/repo commit -m 'Initial import' + git -C /tmp/repo remote add origin gitea@machine:root/repo + GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \ + git -C /tmp/repo push origin master + git -C /tmp/repo log >&2 + ''; + + scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" '' + set -x + su -l hydra -c "hydra-create-user root --email-address \ + 'alice@example.org' --password foobar --role admin" + + URL=http://localhost:3000 + USERNAME="root" + PASSWORD="foobar" + PROJECT_NAME="trivial" + JOBSET_NAME="trivial" + mycurl() { + curl --referer $URL -H "Accept: application/json" \ + -H "Content-Type: application/json" $@ + } + + cat >data.json <data.json <data.json < $out; exit 0"]; + }; + } + ''; + in + '' + import json + + machine.start() + machine.wait_for_unit("multi-user.target") + machine.wait_for_open_port(3000) + machine.wait_for_open_port(3001) + + machine.succeed( + "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create " + + "--username root --password root --email test@localhost'" + ) + machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'") + + machine.succeed( + "curl --fail -X POST http://localhost:3001/api/v1/user/repos " + + "-H 'Accept: application/json' -H 'Content-Type: application/json' " + + f"-H 'Authorization: token ${api_token}'" + + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\''' + ) + + machine.succeed( + "curl --fail -X POST http://localhost:3001/api/v1/user/keys " + + "-H 'Accept: application/json' -H 'Content-Type: application/json' " + + f"-H 'Authorization: token ${api_token}'" + + ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\''' + ) + + machine.succeed( + "${scripts.git-setup}" + ) + + machine.succeed( + "${scripts.hydra-setup}" + ) + + machine.wait_until_succeeds( + 'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" ' + + '| jq .buildstatus | xargs test 0 -eq' + ) + + data = machine.succeed( + 'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" ' + + "-H 'Accept: application/json' -H 'Content-Type: application/json' " + + f"-H 'Authorization: token ${api_token}'" + ) + + response = json.loads(data) + + assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!" + assert response[0]['status'] == "success", "Expected finished status to be success!" + assert response[1]['status'] == "pending", "Expected queued status to be pending!" + + machine.shutdown() + ''; + }); + + validate-openapi = forEachSystem (system: + let pkgs = nixpkgs.legacyPackages.${system}; in + pkgs.runCommand "validate-openapi" + { buildInputs = [ pkgs.openapi-generator-cli ]; } + '' + openapi-generator-cli validate -i ${./hydra-api.yaml} + touch $out + ''); + +} diff --git a/package.nix b/package.nix new file mode 100644 index 00000000..5c1a7860 --- /dev/null +++ b/package.nix @@ -0,0 +1,284 @@ +{ stdenv +, lib +, fileset + +, rawSrc + +, buildEnv + +, perlPackages + +, nixComponents +, git + +, makeWrapper +, meson +, ninja +, nukeReferences +, pkg-config +, mdbook + +, unzip +, libpqxx +, top-git +, mercurial +, darcs +, subversion +, breezy +, openssl +, bzip2 +, libxslt +, perl +, pixz +, boost +, postgresql_13 +, nlohmann_json +, prometheus-cpp + +, cacert +, foreman +, glibcLocales +, libressl +, openldap +, python3 + +, openssh +, coreutils +, findutils +, gzip +, xz +, gnutar +, gnused +, nix-eval-jobs + +, rpm +, dpkg +, cdrkit +}: + +let + perlDeps = buildEnv { + name = "hydra-perl-deps"; + paths = lib.closePropagation + ([ + nixComponents.nix-perl-bindings + git + ] ++ (with perlPackages; [ + AuthenSASL + CatalystActionREST + CatalystAuthenticationStoreDBIxClass + CatalystAuthenticationStoreLDAP + CatalystDevel + CatalystPluginAccessLog + CatalystPluginAuthorizationRoles + CatalystPluginCaptcha + CatalystPluginPrometheusTiny + CatalystPluginSessionStateCookie + CatalystPluginSessionStoreFastMmap + CatalystPluginStackTrace + CatalystTraitForRequestProxyBase + CatalystViewDownload + CatalystViewJSON + CatalystViewTT + CatalystXRoleApplicator + CatalystXScriptServerStarman + CryptPassphrase + CryptPassphraseArgon2 + CryptRandPasswd + DataDump + DateTime + DBDPg + DBDSQLite + DBIxClassHelpers + DigestSHA1 + EmailMIME + EmailSender + FileCopyRecursive + FileLibMagic + FileSlurper + FileWhich + IOCompress + IPCRun + IPCRun3 + JSON + JSONMaybeXS + JSONXS + ListSomeUtils + LWP + LWPProtocolHttps + ModulePluggable + NetAmazonS3 + NetPrometheus + NetStatsd + PadWalker + ParallelForkManager + PerlCriticCommunity + PrometheusTinyShared + ReadonlyX + SetScalar + SQLSplitStatement + Starman + StringCompareConstantTime + SysHostnameLong + TermSizeAny + TermReadKey + Test2Harness + TestPostgreSQL + TextDiff + TextTable + UUID4Tiny + YAML + XMLSimple + ])); + }; + + version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}"; +in +stdenv.mkDerivation (finalAttrs: { + pname = "hydra"; + inherit version; + + src = fileset.toSource { + root = ./.; + fileset = fileset.unions ([ + ./doc + ./meson.build + ./nixos-modules + ./src + ./t + ./version.txt + ./.perlcriticrc + ]); + }; + + outputs = [ "out" "doc" ]; + + strictDeps = true; + + nativeBuildInputs = [ + makeWrapper + meson + ninja + nukeReferences + pkg-config + mdbook + nixComponents.nix-cli + perlDeps + perl + unzip + ]; + + buildInputs = [ + libpqxx + openssl + libxslt + nixComponents.nix-util + nixComponents.nix-store + nixComponents.nix-main + perlDeps + perl + boost + nlohmann_json + prometheus-cpp + ]; + + nativeCheckInputs = [ + bzip2 + darcs + foreman + top-git + mercurial + subversion + breezy + openldap + postgresql_13 + pixz + nix-eval-jobs + ]; + + checkInputs = [ + cacert + glibcLocales + libressl.nc + python3 + nixComponents.nix-cli + ]; + + hydraPath = lib.makeBinPath ( + [ + subversion + openssh + nixComponents.nix-cli + coreutils + findutils + pixz + gzip + bzip2 + xz + gnutar + unzip + git + top-git + mercurial + darcs + gnused + breezy + nix-eval-jobs + ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] + ); + + OPENLDAP_ROOT = openldap; + + mesonBuildType = "release"; + + postPatch = '' + patchShebangs . + ''; + + shellHook = '' + pushd $(git rev-parse --show-toplevel) >/dev/null + + PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH + PERL5LIB=$(pwd)/src/lib:$PERL5LIB + export HYDRA_HOME="$(pwd)/src/" + mkdir -p .hydra-data + export HYDRA_DATA="$(pwd)/.hydra-data" + export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444' + + popd >/dev/null + ''; + + doCheck = true; + + mesonCheckFlags = [ "--verbose" ]; + + preCheck = '' + export LOGNAME=''${LOGNAME:-foo} + # set $HOME for bzr so it can create its trace file + export HOME=$(mktemp -d) + ''; + + postInstall = '' + mkdir -p $out/nix-support + + for i in $out/bin/*; do + read -n 4 chars < $i + if [[ $chars =~ ELF ]]; then continue; fi + wrapProgram $i \ + --prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \ + --prefix PATH ':' $out/bin:$hydraPath \ + --set HYDRA_RELEASE ${version} \ + --set HYDRA_HOME $out/libexec/hydra \ + --set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \ + --set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"} + done + ''; + + dontStrip = true; + + meta.description = "Build of Hydra on ${stdenv.system}"; + passthru = { + inherit perlDeps; + nix = nixComponents.nix-cli; + }; +}) diff --git a/shell.nix b/shell.nix index 9e967032..1ad58f49 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,6 @@ # The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and # returns an attribute set of the shape `{ defaultNix, shellNix }` -(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) { +(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { src = ./.; }).shellNix diff --git a/src/Makefile.am b/src/Makefile.am deleted file mode 100644 index a28780b6..00000000 --- a/src/Makefile.am +++ /dev/null @@ -1,3 +0,0 @@ -SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf -BOOTCLEAN_SUBDIRS = $(SUBDIRS) -DIST_SUBDIRS = $(SUBDIRS) diff --git a/src/hydra-eval-jobs/Makefile.am b/src/hydra-eval-jobs/Makefile.am deleted file mode 100644 index 7a4e9c91..00000000 --- a/src/hydra-eval-jobs/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -bin_PROGRAMS = hydra-eval-jobs - -hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc -hydra_eval_jobs_LDADD = $(NIX_LIBS) -hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc deleted file mode 100644 index 918bd451..00000000 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ /dev/null @@ -1,558 +0,0 @@ -#include -#include -#include -#include - -#include "shared.hh" -#include "store-api.hh" -#include "eval.hh" -#include "eval-inline.hh" -#include "util.hh" -#include "get-drvs.hh" -#include "globals.hh" -#include "common-eval-args.hh" -#include "flake/flakeref.hh" -#include "flake/flake.hh" -#include "attr-path.hh" -#include "derivations.hh" -#include "local-fs-store.hh" - -#include "hydra-config.hh" - -#include -#include -#include - -#include - -void check_pid_status_nonblocking(pid_t check_pid) { - // Only check 'initialized' and known PID's - if (check_pid <= 0) { return; } - - int wstatus = 0; - pid_t pid = waitpid(check_pid, &wstatus, WNOHANG); - // -1 = failure, WNOHANG: 0 = no change - if (pid <= 0) { return; } - - std::cerr << "child process (" << pid << ") "; - - if (WIFEXITED(wstatus)) { - std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl; - } else if (WIFSIGNALED(wstatus)) { - std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl; - } else if (WIFSTOPPED(wstatus)) { - std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl; - } else if (WIFCONTINUED(wstatus)) { - std::cerr << "continued" << std::endl; - } -} - -using namespace nix; - -static Path gcRootsDir; -static size_t maxMemorySize; - -struct MyArgs : MixEvalArgs, MixCommonArgs -{ - Path releaseExpr; - bool flake = false; - bool dryRun = false; - - MyArgs() : MixCommonArgs("hydra-eval-jobs") - { - addFlag({ - .longName = "gc-roots-dir", - .description = "garbage collector roots directory", - .labels = {"path"}, - .handler = {&gcRootsDir} - }); - - addFlag({ - .longName = "dry-run", - .description = "don't create store derivations", - .handler = {&dryRun, true} - }); - - addFlag({ - .longName = "flake", - .description = "build a flake", - .handler = {&flake, true} - }); - - expectArg("expr", &releaseExpr); - } -}; - -static MyArgs myArgs; - -static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute) -{ - Strings res; - std::function rec; - - rec = [&](Value & v) { - state.forceValue(v, noPos); - if (v.type() == nString) - res.push_back(v.string.s); - else if (v.isList()) - for (unsigned int n = 0; n < v.listSize(); ++n) - rec(*v.listElems()[n]); - else if (v.type() == nAttrs) { - auto a = v.attrs->find(state.symbols.create(subAttribute)); - if (a != v.attrs->end()) - res.push_back(std::string(state.forceString(*a->value))); - } - }; - - Value * v = drv.queryMeta(name); - if (v) rec(*v); - - return concatStringsSep(", ", res); -} - -static void worker( - EvalState & state, - Bindings & autoArgs, - AutoCloseFD & to, - AutoCloseFD & from) -{ - Value vTop; - - if (myArgs.flake) { - using namespace flake; - - auto flakeRef = parseFlakeRef(myArgs.releaseExpr); - - auto vFlake = state.allocValue(); - - auto lockedFlake = lockFlake(state, flakeRef, - LockFlags { - .updateLockFile = false, - .useRegistries = false, - .allowMutable = false, - }); - - callFlake(state, lockedFlake, *vFlake); - - auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value; - state.forceValue(*vOutputs, noPos); - - auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs")); - if (!aHydraJobs) - aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks")); - if (!aHydraJobs) - throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef); - - vTop = *aHydraJobs->value; - - } else { - state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop); - } - - auto vRoot = state.allocValue(); - state.autoCallFunction(autoArgs, vTop, *vRoot); - - while (true) { - /* Wait for the master to send us a job name. */ - writeLine(to.get(), "next"); - - auto s = readLine(from.get()); - if (s == "exit") break; - if (!hasPrefix(s, "do ")) abort(); - std::string attrPath(s, 3); - - debug("worker process %d at '%s'", getpid(), attrPath); - - /* Evaluate it and send info back to the master. */ - nlohmann::json reply; - - try { - auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first; - - auto v = state.allocValue(); - state.autoCallFunction(autoArgs, *vTmp, *v); - - if (auto drv = getDerivation(state, *v, false)) { - - DrvInfo::Outputs outputs = drv->queryOutputs(); - - if (drv->querySystem() == "unknown") - throw EvalError("derivation must have a 'system' attribute"); - - auto drvPath = state.store->printStorePath(drv->requireDrvPath()); - - nlohmann::json job; - - job["nixName"] = drv->queryName(); - job["system"] =drv->querySystem(); - job["drvPath"] = drvPath; - job["description"] = drv->queryMetaString("description"); - job["license"] = queryMetaStrings(state, *drv, "license", "shortName"); - job["homepage"] = drv->queryMetaString("homepage"); - job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email"); - job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100); - job["timeout"] = drv->queryMetaInt("timeout", 36000); - job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200); - job["isChannel"] = drv->queryMetaBool("isHydraChannel", false); - - /* If this is an aggregate, then get its constituents. */ - auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, *a->pos)) { - auto a = v->attrs->get(state.symbols.create("constituents")); - if (!a) - throw EvalError("derivation must have a ‘constituents’ attribute"); - - - PathSet context; - state.coerceToString(*a->pos, *a->value, context, true, false); - for (auto & i : context) - if (i.at(0) == '!') { - size_t index = i.find("!", 1); - job["constituents"].push_back(std::string(i, index + 1)); - } - - state.forceList(*a->value, *a->pos); - for (unsigned int n = 0; n < a->value->listSize(); ++n) { - auto v = a->value->listElems()[n]; - state.forceValue(*v, noPos); - if (v->type() == nString) - job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); - } - } - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = state.store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - nlohmann::json out; - for (auto & j : outputs) - // FIXME: handle CA/impure builds. - if (j.second) - out[j.first] = state.store->printStorePath(*j.second); - job["outputs"] = std::move(out); - - reply["job"] = std::move(job); - } - - else if (v->type() == nAttrs) { - auto attrs = nlohmann::json::array(); - StringSet ss; - for (auto & i : v->attrs->lexicographicOrder()) { - std::string name(i->name); - if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) { - printError("skipping job with illegal name '%s'", name); - continue; - } - attrs.push_back(name); - } - reply["attrs"] = std::move(attrs); - } - - else if (v->type() == nNull) - ; - - else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v)); - - } catch (EvalError & e) { - auto msg = e.msg(); - // Transmits the error we got from the previous evaluation - // in the JSON output. - reply["error"] = filterANSIEscapes(msg, true); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - printError(msg); - } - - writeLine(to.get(), reply.dump()); - - /* If our RSS exceeds the maximum, exit. The master will - start a new process. */ - struct rusage r; - getrusage(RUSAGE_SELF, &r); - if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break; - } - - writeLine(to.get(), "restart"); -} - -int main(int argc, char * * argv) -{ - /* Prevent undeclared dependencies in the evaluation via - $NIX_PATH. */ - unsetenv("NIX_PATH"); - - return handleExceptions(argv[0], [&]() { - - auto config = std::make_unique(); - - auto nrWorkers = config->getIntOption("evaluator_workers", 1); - maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096); - - initNix(); - initGC(); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake); - - /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */ - settings.builders = ""; - - /* Prevent access to paths outside of the Nix search path and - to the environment. */ - evalSettings.restrictEval = true; - - /* When building a flake, use pure evaluation (no access to - 'getEnv', 'currentSystem' etc. */ - evalSettings.pureEval = pureEval; - - if (myArgs.dryRun) settings.readOnlyMode = true; - - if (myArgs.releaseExpr == "") throw UsageError("no expression specified"); - - if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified"); - - struct State - { - std::set todo{""}; - std::set active; - nlohmann::json jobs; - std::exception_ptr exc; - }; - - std::condition_variable wakeup; - - Sync state_; - - /* Start a handler thread per worker process. */ - auto handler = [&]() - { - pid_t pid = -1; - try { - AutoCloseFD from, to; - - while (true) { - - /* Start a new worker process if necessary. */ - if (pid == -1) { - Pipe toPipe, fromPipe; - toPipe.create(); - fromPipe.create(); - pid = startProcess( - [&, - to{std::make_shared(std::move(fromPipe.writeSide))}, - from{std::make_shared(std::move(toPipe.readSide))} - ]() - { - try { - EvalState state(myArgs.searchPath, openStore()); - Bindings & autoArgs = *myArgs.getAutoArgs(state); - worker(state, autoArgs, *to, *from); - } catch (Error & e) { - nlohmann::json err; - auto msg = e.msg(); - err["error"] = filterANSIEscapes(msg, true); - printError(msg); - writeLine(to->get(), err.dump()); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - writeLine(to->get(), "restart"); - } - }, - ProcessOptions { .allowVfork = false }); - from = std::move(fromPipe.readSide); - to = std::move(toPipe.writeSide); - debug("created worker process %d", pid); - } - - /* Check whether the existing worker process is still there. */ - auto s = readLine(from.get()); - if (s == "restart") { - pid = -1; - continue; - } else if (s != "next") { - auto json = nlohmann::json::parse(s); - throw Error("worker error: %s", (std::string) json["error"]); - } - - /* Wait for a job name to become available. */ - std::string attrPath; - - while (true) { - checkInterrupt(); - auto state(state_.lock()); - if ((state->todo.empty() && state->active.empty()) || state->exc) { - writeLine(to.get(), "exit"); - return; - } - if (!state->todo.empty()) { - attrPath = *state->todo.begin(); - state->todo.erase(state->todo.begin()); - state->active.insert(attrPath); - break; - } else - state.wait(wakeup); - } - - /* Tell the worker to evaluate it. */ - writeLine(to.get(), "do " + attrPath); - - /* Wait for the response. */ - auto response = nlohmann::json::parse(readLine(from.get())); - - /* Handle the response. */ - StringSet newAttrs; - - if (response.find("job") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath] = response["job"]; - } - - if (response.find("attrs") != response.end()) { - for (auto & i : response["attrs"]) { - auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i; - newAttrs.insert(s); - } - } - - if (response.find("error") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath]["error"] = response["error"]; - } - - /* Add newly discovered job names to the queue. */ - { - auto state(state_.lock()); - state->active.erase(attrPath); - for (auto & s : newAttrs) - state->todo.insert(s); - wakeup.notify_all(); - } - } - } catch (...) { - check_pid_status_nonblocking(pid); - auto state(state_.lock()); - state->exc = std::current_exception(); - wakeup.notify_all(); - } - }; - - std::vector threads; - for (size_t i = 0; i < nrWorkers; i++) - threads.emplace_back(std::thread(handler)); - - for (auto & thread : threads) - thread.join(); - - auto state(state_.lock()); - - if (state->exc) - std::rethrow_exception(state->exc); - - /* For aggregate jobs that have named consistuents - (i.e. constituents that are a job name rather than a - derivation), look up the referenced job and add it to the - dependencies of the aggregate derivation. */ - auto store = openStore(); - - for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) { - auto jobName = i.key(); - auto & job = i.value(); - - auto named = job.find("namedConstituents"); - if (named == job.end()) continue; - - std::unordered_map brokenJobs; - auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state]( - const std::string & childJobName) -> std::optional { - auto childJob = state->jobs.find(childJobName); - if (childJob == state->jobs.end()) { - printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName); - brokenJobs[childJobName] = "does not exist"; - return std::nullopt; - } - if (childJob->find("error") != childJob->end()) { - std::string error = (*childJob)["error"]; - printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error); - brokenJobs[childJobName] = error; - return std::nullopt; - } - return *childJob; - }; - - if (myArgs.dryRun) { - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - std::string drvPath2 = (*job2)["drvPath"]; - job["constituents"].push_back(drvPath2); - } - } else { - auto drvPath = store->parseStorePath((std::string) job["drvPath"]); - auto drv = store->readDerivation(drvPath); - - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); - auto drv2 = store->readDerivation(drvPath2); - job["constituents"].push_back(store->printStorePath(drvPath2)); - drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first}; - } - - if (brokenJobs.empty()) { - std::string drvName(drvPath.name()); - assert(hasSuffix(drvName, drvExtension)); - drvName.resize(drvName.size() - drvExtension.size()); - - auto hashModulo = hashDerivationModulo(*store, drv, true); - if (hashModulo.kind != DrvHash::Kind::Regular) continue; - auto h = hashModulo.hashes.find("out"); - if (h == hashModulo.hashes.end()) continue; - auto outPath = store->makeOutputPath("out", h->second, drvName); - drv.env["out"] = store->printStorePath(outPath); - drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath }); - auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); - - debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); - - job["drvPath"] = newDrvPath; - job["outputs"]["out"] = store->printStorePath(outPath); - } - } - - job.erase("namedConstituents"); - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - auto drvPath = job["drvPath"].get(); - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - if (!brokenJobs.empty()) { - std::stringstream ss; - for (const auto& [jobName, error] : brokenJobs) { - ss << jobName << ": " << error << "\n"; - } - job["error"] = ss.str(); - } - } - - std::cout << state->jobs.dump(2) << "\n"; - }); -} diff --git a/src/hydra-evaluator/Makefile.am b/src/hydra-evaluator/Makefile.am deleted file mode 100644 index 73638cfe..00000000 --- a/src/hydra-evaluator/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -bin_PROGRAMS = hydra-evaluator - -hydra_evaluator_SOURCES = hydra-evaluator.cc -hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx -hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-evaluator/hydra-evaluator.cc b/src/hydra-evaluator/hydra-evaluator.cc index 2d7e68d9..52664188 100644 --- a/src/hydra-evaluator/hydra-evaluator.cc +++ b/src/hydra-evaluator/hydra-evaluator.cc @@ -1,7 +1,8 @@ #include "db.hh" #include "hydra-config.hh" -#include "pool.hh" -#include "shared.hh" +#include +#include +#include #include #include @@ -37,7 +38,7 @@ class JobsetId { friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs); std::string display() const { - return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id); + return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id); } }; bool operator==(const JobsetId & lhs, const JobsetId & rhs) @@ -366,6 +367,9 @@ struct Evaluator printInfo("received jobset event"); } + } catch (pqxx::broken_connection & e) { + printError("Database connection broken: %s", e.what()); + std::_Exit(1); } catch (std::exception & e) { printError("exception in database monitor thread: %s", e.what()); sleep(30); @@ -473,6 +477,9 @@ struct Evaluator while (true) { try { loop(); + } catch (pqxx::broken_connection & e) { + printError("Database connection broken: %s", e.what()); + std::_Exit(1); } catch (std::exception & e) { printError("exception in main loop: %s", e.what()); sleep(30); diff --git a/src/hydra-evaluator/meson.build b/src/hydra-evaluator/meson.build new file mode 100644 index 00000000..53ddc354 --- /dev/null +++ b/src/hydra-evaluator/meson.build @@ -0,0 +1,10 @@ +hydra_evaluator = executable('hydra-evaluator', + 'hydra-evaluator.cc', + dependencies: [ + libhydra_dep, + nix_util_dep, + nix_main_dep, + pqxx_dep, + ], + install: true, +) diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am deleted file mode 100644 index 117112f6..00000000 --- a/src/hydra-queue-runner/Makefile.am +++ /dev/null @@ -1,8 +0,0 @@ -bin_PROGRAMS = hydra-queue-runner - -hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \ - builder.cc build-result.cc build-remote.cc \ - hydra-build-result.hh counter.hh state.hh db.hh \ - nar-extractor.cc nar-extractor.hh -hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core -hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 57a5f0df..b372e7dd 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -5,107 +5,77 @@ #include #include -#include "build-result.hh" -#include "serve-protocol.hh" +#include +#include +#include +#include +#include #include "state.hh" -#include "util.hh" -#include "worker-protocol.hh" -#include "finally.hh" -#include "url.hh" +#include +#include +#include +#include +#include +#include +#include +#include using namespace nix; - -struct Child +bool ::Machine::isLocalhost() const { - Pid pid; - AutoCloseFD to, from; -}; - - -static void append(Strings & dst, const Strings & src) -{ - dst.insert(dst.end(), src.begin(), src.end()); + return storeUri.params.empty() && std::visit(overloaded { + [](const StoreReference::Auto &) { + return true; + }, + [](const StoreReference::Specified & s) { + return + (s.scheme == "local" || s.scheme == "unix") || + ((s.scheme == "ssh" || s.scheme == "ssh-ng") && + s.authority == "localhost"); + }, + }, storeUri.variant); } -static Strings extraStoreArgs(std::string & machine) +namespace nix::build_remote { + +static std::unique_ptr openConnection( + ::Machine::ptr machine, SSHMaster & master) { - Strings result; - try { - auto parsed = parseURL(machine); - if (parsed.scheme != "ssh") { - throw SysError("Currently, only (legacy-)ssh stores are supported!"); - } - machine = parsed.authority.value_or(""); - auto remoteStore = parsed.query.find("remote-store"); - if (remoteStore != parsed.query.end()) { - result = {"--store", shellEscape(remoteStore->second)}; - } - } catch (BadURL &) { - // We just try to continue with `machine->sshName` here for backwards compat. - } - - return result; -} - -static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child) -{ - std::string pgmName; - Pipe to, from; - to.create(); - from.create(); - - Strings argv; + Strings command = {"nix-store", "--serve", "--write"}; if (machine->isLocalhost()) { - pgmName = "nix-store"; - argv = {"nix-store", "--builders", "", "--serve", "--write"}; + command.push_back("--builders"); + command.push_back(""); } else { - pgmName = "ssh"; - auto sshName = machine->sshName; - Strings extraArgs = extraStoreArgs(sshName); - argv = {"ssh", sshName}; - if (machine->sshKey != "") append(argv, {"-i", machine->sshKey}); - if (machine->sshPublicHostKey != "") { - Path fileName = tmpDir + "/host-key"; - auto p = machine->sshName.find("@"); - std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName; - writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n"); - append(argv, {"-oUserKnownHostsFile=" + fileName}); + auto remoteStore = machine->storeUri.params.find("remote-store"); + if (remoteStore != machine->storeUri.params.end()) { + command.push_back("--store"); + command.push_back(escapeShellArgAlways(remoteStore->second)); } - append(argv, - { "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" - , "--", "nix-store", "--serve", "--write" }); - append(argv, extraArgs); } - child.pid = startProcess([&]() { - restoreProcessContext(); - - if (dup2(to.readSide.get(), STDIN_FILENO) == -1) - throw SysError("cannot dup input pipe to stdin"); - - if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1) - throw SysError("cannot dup output pipe to stdout"); - - if (dup2(stderrFD, STDERR_FILENO) == -1) - throw SysError("cannot dup stderr"); - - execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast - - throw SysError("cannot start %s", pgmName); + auto ret = master.startCommand(std::move(command), { + "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes" }); - to.readSide = -1; - from.writeSide = -1; + // XXX: determine the actual max value we can use from /proc. - child.to = to.writeSide.release(); - child.from = from.readSide.release(); + // FIXME: Should this be upstreamed into `startCommand` in Nix? + + int pipesize = 1024 * 1024; + + fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize); + fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize); + + return ret; } -static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, - FdSource & from, FdSink & to, const StorePathSet & paths, - bool useSubstitutes = false) +static void copyClosureTo( + ::Machine::Connection & conn, + Store & destStore, + const StorePathSet & paths, + SubstituteFlag useSubstitutes = NoSubstitute) { StorePathSet closure; destStore.computeFSClosure(paths, closure); @@ -115,13 +85,10 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, garbage-collect paths that are already there. Optionally, ask the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log - to << cmdQueryValidPaths << 1 << useSubstitutes; - worker_proto::write(destStore, to, closure); - to.flush(); - /* Get back the set of paths that are already valid on the remote host. */ - auto present = worker_proto::read(destStore, from, Phantom {}); + auto present = conn.queryValidPaths( + destStore, true, closure, useSubstitutes); if (present.size() == closure.size()) return; @@ -133,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, printMsg(lvlDebug, "sending %d missing paths", missing.size()); - std::unique_lock sendLock(sendMutex, + std::unique_lock sendLock(conn.machine->state->sendLock, std::chrono::seconds(600)); - to << cmdImportPaths; - destStore.exportPaths(missing, to); - to.flush(); + conn.to << ServeProto::Command::ImportPaths; + destStore.exportPaths(missing, conn.to); + conn.to.flush(); - if (readInt(from) != 1) + if (readInt(conn.from) != 1) throw Error("remote machine failed to import closure"); } // FIXME: use Store::topoSortPaths(). -StorePaths reverseTopoSortPaths(const std::map & paths) +static StorePaths reverseTopoSortPaths(const std::map & paths) { StorePaths sorted; StorePathSet visited; @@ -174,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map & paths return sorted; } +static std::pair openLogFile(const std::string & logDir, const StorePath & drvPath) +{ + std::string base(drvPath.to_string()); + auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2); + + createDirs(dirOf(logFile)); + + AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666); + if (!logFD) throw SysError("creating log file ‘%s’", logFile); + + return {std::move(logFile), std::move(logFD)}; +} + +static BasicDerivation sendInputs( + State & state, + Step & step, + Store & localStore, + Store & destStore, + ::Machine::Connection & conn, + unsigned int & overhead, + counter & nrStepsWaiting, + counter & nrStepsCopyingTo +) +{ + /* Replace the input derivations by their output paths to send a + minimal closure to the builder. + + `tryResolve` currently does *not* rewrite input addresses, so it + is safe to do this in all cases. (It should probably have a mode + to do that, however, but we would not use it here.) + */ + BasicDerivation basicDrv = ({ + auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore); + if (!maybeBasicDrv) + throw Error( + "the derivation '%s' can’t be resolved. It’s probably " + "missing some outputs", + localStore.printStorePath(step.drvPath)); + *maybeBasicDrv; + }); + + /* Ensure that the inputs exist in the destination store. This is + a no-op for regular stores, but for the binary cache store, + this will copy the inputs to the binary cache from the local + store. */ + if (&localStore != &destStore) { + copyClosure(localStore, destStore, + step.drv->inputSrcs, + NoRepair, NoCheckSigs, NoSubstitute); + } + + { + auto mc1 = std::make_shared>(nrStepsWaiting); + mc1.reset(); + MaintainCount mc2(nrStepsCopyingTo); + + printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", + localStore.printStorePath(step.drvPath), conn.machine->storeUri.render()); + + auto now1 = std::chrono::steady_clock::now(); + + /* Copy the input closure. */ + if (conn.machine->isLocalhost()) { + StorePathSet closure; + destStore.computeFSClosure(basicDrv.inputSrcs, closure); + copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); + } else { + copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute); + } + + auto now2 = std::chrono::steady_clock::now(); + + overhead += std::chrono::duration_cast(now2 - now1).count(); + } + + return basicDrv; +} + +static BuildResult performBuild( + ::Machine::Connection & conn, + Store & localStore, + StorePath drvPath, + const BasicDerivation & drv, + const ServeProto::BuildOptions & options, + counter & nrStepsBuilding +) +{ + conn.putBuildDerivationRequest(localStore, drvPath, drv, options); + + BuildResult result; + + time_t startTime, stopTime; + + startTime = time(0); + { + MaintainCount mc(nrStepsBuilding); + result = ServeProto::Serialise::read(localStore, conn); + } + stopTime = time(0); + + if (!result.startTime) { + // If the builder gave `startTime = 0`, use our measurements + // instead of the builder's. + // + // Note: this represents the duration of a single round, rather + // than all rounds. + result.startTime = startTime; + result.stopTime = stopTime; + } + + // If the protocol was too old to give us `builtOutputs`, initialize + // it manually by introspecting the derivation. + if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6) + { + // If the remote is too old to handle CA derivations, we can’t get this + // far anyways + assert(drv.type().hasKnownOutputPaths()); + DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore); + // Since this a `BasicDerivation`, `staticOutputHashes` will not + // do any real work. + auto outputHashes = staticOutputHashes(localStore, drv); + for (auto & [outputName, output] : drvOutputs) { + auto outputPath = output.second; + // We’ve just asserted that the output paths of the derivation + // were known + assert(outputPath); + auto outputHash = outputHashes.at(outputName); + auto drvOutput = DrvOutput { outputHash, outputName }; + result.builtOutputs.insert_or_assign( + std::move(outputName), + Realisation { drvOutput, *outputPath }); + } + } + + return result; +} + +static void copyPathFromRemote( + ::Machine::Connection & conn, + NarMemberDatas & narMembers, + Store & localStore, + Store & destStore, + const ValidPathInfo & info +) +{ + /* Receive the NAR from the remote and add it to the + destination store. Meanwhile, extract all the info from the + NAR that getBuildOutput() needs. */ + auto source2 = sinkToSource([&](Sink & sink) + { + /* Note: we should only send the command to dump the store + path to the remote if the NAR is actually going to get read + by the destination store, which won't happen if this path + is already valid on the destination store. Since this + lambda function only gets executed if someone tries to read + from source2, we will send the command from here rather + than outside the lambda. */ + conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path); + conn.to.flush(); + + TeeSource tee(conn.from, sink); + extractNarData(tee, localStore.printStorePath(info.path), narMembers); + }); + + destStore.addToStore(info, *source2, NoRepair, NoCheckSigs); +} + +static void copyPathsFromRemote( + ::Machine::Connection & conn, + NarMemberDatas & narMembers, + Store & localStore, + Store & destStore, + const std::map & infos +) +{ + auto pathsSorted = reverseTopoSortPaths(infos); + + for (auto & path : pathsSorted) { + auto & info = infos.find(path)->second; + copyPathFromRemote( + conn, narMembers, localStore, destStore, + ValidPathInfo { path, info }); + } + +} + +} + +/* using namespace nix::build_remote; */ + +void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult) +{ + startTime = buildResult.startTime; + stopTime = buildResult.stopTime; + timesBuilt = buildResult.timesBuilt; + errorMsg = buildResult.errorMsg; + isNonDeterministic = buildResult.isNonDeterministic; + + switch ((BuildResult::Status) buildResult.status) { + case BuildResult::Built: + stepStatus = bsSuccess; + break; + case BuildResult::Substituted: + case BuildResult::AlreadyValid: + stepStatus = bsSuccess; + isCached = true; + break; + case BuildResult::PermanentFailure: + stepStatus = bsFailed; + canCache = true; + errorMsg = ""; + break; + case BuildResult::InputRejected: + case BuildResult::OutputRejected: + stepStatus = bsFailed; + canCache = true; + break; + case BuildResult::TransientFailure: + stepStatus = bsFailed; + canRetry = true; + errorMsg = ""; + break; + case BuildResult::TimedOut: + stepStatus = bsTimedOut; + errorMsg = ""; + break; + case BuildResult::MiscFailure: + stepStatus = bsAborted; + canRetry = true; + break; + case BuildResult::LogLimitExceeded: + stepStatus = bsLogLimitExceeded; + break; + case BuildResult::NotDeterministic: + stepStatus = bsNotDeterministic; + canRetry = false; + canCache = true; + break; + default: + stepStatus = bsAborted; + break; + } + +} + +/* Utility guard object to auto-release a semaphore on destruction. */ +template +class SemaphoreReleaser { +public: + SemaphoreReleaser(T* s) : sem(s) {} + ~SemaphoreReleaser() { sem->release(); } + +private: + T* sem; +}; void State::buildRemote(ref destStore, - Machine::ptr machine, Step::ptr step, - unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats, + std::unique_ptr reservation, + ::Machine::ptr machine, Step::ptr step, + const ServeProto::BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, std::function updateStep, NarMemberDatas & narMembers) { assert(BuildResult::TimedOut == 8); - std::string base(step->drvPath.to_string()); - result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2); - AutoDelete autoDelete(result.logFile, false); - - createDirs(dirOf(result.logFile)); - - AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666); - if (!logFD) throw SysError("creating log file ‘%s’", result.logFile); - - nix::Path tmpDir = createTempDir(); - AutoDelete tmpDirDel(tmpDir, true); + auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath); + AutoDelete logFileDel(logFile, false); + result.logFile = logFile; try { updateStep(ssConnecting); + auto storeRef = machine->completeStoreReference(); + + auto * pSpecified = std::get_if(&storeRef.variant); + if (!pSpecified || pSpecified->scheme != "ssh") { + throw Error("Currently, only (legacy-)ssh stores are supported!"); + } + + LegacySSHStoreConfig storeConfig { + pSpecified->scheme, + pSpecified->authority, + storeRef.params + }; + + auto master = storeConfig.createSSHMaster( + false, // no SSH master yet + logFD.get()); + // FIXME: rewrite to use Store. - Child child; - openConnection(machine, tmpDir, logFD.get(), child); + auto child = build_remote::openConnection(machine, master); { auto activeStepState(activeStep->state_.lock()); if (activeStepState->cancelled) throw Error("step cancelled"); - activeStepState->pid = child.pid; + activeStepState->pid = child->sshPid; } Finally clearPid([&]() { @@ -222,34 +453,33 @@ void State::buildRemote(ref destStore, process. Meh. */ }); - FdSource from(child.from.get()); - FdSink to(child.to.get()); + ::Machine::Connection conn { + { + .to = child->in.get(), + .from = child->out.get(), + /* Handshake. */ + .remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize + }, + /*.machine =*/ machine, + }; Finally updateStats([&]() { - bytesReceived += from.read; - bytesSent += to.written; + bytesReceived += conn.from.read; + bytesSent += conn.to.written; }); - /* Handshake. */ - unsigned int remoteVersion; + constexpr ServeProto::Version our_version = 0x206; try { - to << SERVE_MAGIC_1 << 0x204; - to.flush(); - - unsigned int magic = readInt(from); - if (magic != SERVE_MAGIC_2) - throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName); - remoteVersion = readInt(from); - if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) - throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName); - if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0) - throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName); - + conn.remoteVersion = decltype(conn)::handshake( + conn.to, + conn.from, + our_version, + machine->storeUri.render()); } catch (EndOfFile & e) { - child.pid.wait(); + child->sshPid.wait(); std::string s = chomp(readFile(result.logFile)); - throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s); + throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s); } { @@ -263,62 +493,12 @@ void State::buildRemote(ref destStore, copy the immediate sources of the derivation and the required outputs of the input derivations. */ updateStep(ssSendingInputs); + BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo); - StorePathSet inputs; - BasicDerivation basicDrv(*step->drv); - - for (auto & p : step->drv->inputSrcs) - inputs.insert(p); - - for (auto & input : step->drv->inputDrvs) { - auto drv2 = localStore->readDerivation(input.first); - for (auto & name : input.second) { - if (auto i = get(drv2.outputs, name)) { - auto outPath = i->path(*localStore, drv2.name, name); - inputs.insert(*outPath); - basicDrv.inputSrcs.insert(*outPath); - } - } - } - - /* Ensure that the inputs exist in the destination store. This is - a no-op for regular stores, but for the binary cache store, - this will copy the inputs to the binary cache from the local - store. */ - if (localStore != std::shared_ptr(destStore)) { - copyClosure(*localStore, *destStore, - step->drv->inputSrcs, - NoRepair, NoCheckSigs, NoSubstitute); - } - - { - auto mc1 = std::make_shared>(nrStepsWaiting); - mc1.reset(); - MaintainCount mc2(nrStepsCopyingTo); - - printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", - localStore->printStorePath(step->drvPath), machine->sshName); - - auto now1 = std::chrono::steady_clock::now(); - - /* Copy the input closure. */ - if (machine->isLocalhost()) { - StorePathSet closure; - destStore->computeFSClosure(inputs, closure); - copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute); - } else { - copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true); - } - - auto now2 = std::chrono::steady_clock::now(); - - result.overhead += std::chrono::duration_cast(now2 - now1).count(); - } - - autoDelete.cancel(); + logFileDel.cancel(); /* Truncate the log to get rid of messages about substitutions - etc. on the remote system. */ + etc. on the remote system. */ if (lseek(logFD.get(), SEEK_SET, 0) != 0) throw SysError("seeking to the start of log file ‘%s’", result.logFile); @@ -330,89 +510,21 @@ void State::buildRemote(ref destStore, /* Do the build. */ printMsg(lvlDebug, "building ‘%s’ on ‘%s’", localStore->printStorePath(step->drvPath), - machine->sshName); + machine->storeUri.render()); updateStep(ssBuilding); - to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); - writeDerivation(to, *localStore, basicDrv); - to << maxSilentTime << buildTimeout; - if (GET_PROTOCOL_MINOR(remoteVersion) >= 2) - to << maxLogSize; - if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { - to << repeats // == build-repeat - << step->isDeterministic; // == enforce-determinism - } - to.flush(); + BuildResult buildResult = build_remote::performBuild( + conn, + *localStore, + step->drvPath, + resolvedDrv, + buildOptions, + nrStepsBuilding + ); - result.startTime = time(0); - int res; - { - MaintainCount mc(nrStepsBuilding); - res = readInt(from); - } - result.stopTime = time(0); + result.updateWithBuildResult(buildResult); - result.errorMsg = readString(from); - if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) { - result.timesBuilt = readInt(from); - result.isNonDeterministic = readInt(from); - auto start = readInt(from); - auto stop = readInt(from); - if (start && start) { - /* Note: this represents the duration of a single - round, rather than all rounds. */ - result.startTime = start; - result.stopTime = stop; - } - } - if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - worker_proto::read(*localStore, from, Phantom {}); - } - switch ((BuildResult::Status) res) { - case BuildResult::Built: - result.stepStatus = bsSuccess; - break; - case BuildResult::Substituted: - case BuildResult::AlreadyValid: - result.stepStatus = bsSuccess; - result.isCached = true; - break; - case BuildResult::PermanentFailure: - result.stepStatus = bsFailed; - result.canCache = true; - result.errorMsg = ""; - break; - case BuildResult::InputRejected: - case BuildResult::OutputRejected: - result.stepStatus = bsFailed; - result.canCache = true; - break; - case BuildResult::TransientFailure: - result.stepStatus = bsFailed; - result.canRetry = true; - result.errorMsg = ""; - break; - case BuildResult::TimedOut: - result.stepStatus = bsTimedOut; - result.errorMsg = ""; - break; - case BuildResult::MiscFailure: - result.stepStatus = bsAborted; - result.canRetry = true; - break; - case BuildResult::LogLimitExceeded: - result.stepStatus = bsLogLimitExceeded; - break; - case BuildResult::NotDeterministic: - result.stepStatus = bsNotDeterministic; - result.canRetry = false; - result.canCache = true; - break; - default: - result.stepStatus = bsAborted; - break; - } if (result.stepStatus != bsSuccess) return; result.errorMsg = ""; @@ -421,11 +533,32 @@ void State::buildRemote(ref destStore, get a build log. */ if (result.isCached) { printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’", - localStore->printStorePath(step->drvPath), machine->sshName); + localStore->printStorePath(step->drvPath), machine->storeUri.render()); unlink(result.logFile.c_str()); result.logFile = ""; } + /* Throttle CPU-bound work. Opportunistically skip updating the current + * step, since this requires a DB roundtrip. */ + if (!localWorkThrottler.try_acquire()) { + MaintainCount mc(nrStepsWaitingForDownloadSlot); + updateStep(ssWaitingForLocalSlot); + localWorkThrottler.acquire(); + } + SemaphoreReleaser releaser(&localWorkThrottler); + + /* Once we've started copying outputs, release the machine reservation + * so further builds can happen. We do not release the machine earlier + * to avoid situations where the queue runner is bottlenecked on + * copying outputs and we end up building too many things that we + * haven't been able to allow copy slots for. */ + reservation.reset(); + wakeDispatcher(); + + StorePathSet outputs; + for (auto & [_, realisation] : buildResult.builtOutputs) + outputs.insert(realisation.outPath); + /* Copy the output paths. */ if (!machine->isLocalhost() || localStore != std::shared_ptr(destStore)) { updateStep(ssReceivingOutputs); @@ -434,39 +567,10 @@ void State::buildRemote(ref destStore, auto now1 = std::chrono::steady_clock::now(); - StorePathSet outputs; - for (auto & i : step->drv->outputsAndOptPaths(*localStore)) { - if (i.second.second) - outputs.insert(*i.second.second); - } + auto infos = conn.queryPathInfos(*localStore, outputs); - /* Get info about each output path. */ - std::map infos; size_t totalNarSize = 0; - to << cmdQueryPathInfos; - worker_proto::write(*localStore, to, outputs); - to.flush(); - while (true) { - auto storePathS = readString(from); - if (storePathS == "") break; - auto deriver = readString(from); // deriver - auto references = worker_proto::read(*localStore, from, Phantom {}); - readLongLong(from); // download size - auto narSize = readLongLong(from); - auto narHash = Hash::parseAny(readString(from), htSHA256); - auto ca = parseContentAddressOpt(readString(from)); - readStrings(from); // sigs - ValidPathInfo info(localStore->parseStorePath(storePathS), narHash); - assert(outputs.count(info.path)); - info.references = references; - info.narSize = narSize; - totalNarSize += info.narSize; - info.narHash = narHash; - info.ca = ca; - if (deriver != "") - info.deriver = localStore->parseStorePath(deriver); - infos.insert_or_assign(info.path, info); - } + for (auto & [_, info] : infos) totalNarSize += info.narSize; if (totalNarSize > maxOutputSize) { result.stepStatus = bsNarSizeLimitExceeded; @@ -475,43 +579,32 @@ void State::buildRemote(ref destStore, /* Copy each path. */ printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)", - localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize); - - auto pathsSorted = reverseTopoSortPaths(infos); - - for (auto & path : pathsSorted) { - auto & info = infos.find(path)->second; - - /* Receive the NAR from the remote and add it to the - destination store. Meanwhile, extract all the info from the - NAR that getBuildOutput() needs. */ - auto source2 = sinkToSource([&](Sink & sink) - { - /* Note: we should only send the command to dump the store - path to the remote if the NAR is actually going to get read - by the destination store, which won't happen if this path - is already valid on the destination store. Since this - lambda function only gets executed if someone tries to read - from source2, we will send the command from here rather - than outside the lambda. */ - to << cmdDumpStorePath << localStore->printStorePath(path); - to.flush(); - - TeeSource tee(from, sink); - extractNarData(tee, localStore->printStorePath(path), narMembers); - }); - - destStore->addToStore(info, *source2, NoRepair, NoCheckSigs); - } + localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize); + build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos); auto now2 = std::chrono::steady_clock::now(); result.overhead += std::chrono::duration_cast(now2 - now1).count(); } + /* Register the outputs of the newly built drv */ + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { + auto outputHashes = staticOutputHashes(*localStore, *step->drv); + for (auto & [outputName, realisation] : buildResult.builtOutputs) { + // Register the resolved drv output + destStore->registerDrvOutput(realisation); + + // Also register the unresolved one + auto unresolvedRealisation = realisation; + unresolvedRealisation.signatures.clear(); + unresolvedRealisation.id.drvHash = outputHashes.at(outputName); + destStore->registerDrvOutput(unresolvedRealisation); + } + } + /* Shut down the connection. */ - child.to = -1; - child.pid.wait(); + child->in = -1; + child->sshPid.wait(); } catch (Error & e) { /* Disable this machine until a certain period of time has @@ -525,7 +618,7 @@ void State::buildRemote(ref destStore, info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4); info->lastFailure = now; int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30); - printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta); + printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta); info->disabledUntil = now + std::chrono::seconds(delta); } throw; diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index ea8b4a6a..b0695e8b 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -1,7 +1,7 @@ #include "hydra-build-result.hh" -#include "store-api.hh" -#include "util.hh" -#include "fs-accessor.hh" +#include +#include +#include #include @@ -11,18 +11,18 @@ using namespace nix; BuildOutput getBuildOutput( nix::ref store, NarMemberDatas & narMembers, - const Derivation & drv) + const OutputPathMap derivationOutputs) { BuildOutput res; /* Compute the closure size. */ StorePathSet outputs; StorePathSet closure; - for (auto & i : drv.outputsAndOptPaths(*store)) - if (i.second.second) { - store->computeFSClosure(*i.second.second, closure); - outputs.insert(*i.second.second); - } + for (auto& [outputName, outputPath] : derivationOutputs) { + store->computeFSClosure(outputPath, closure); + outputs.insert(outputPath); + res.outputs.insert({outputName, outputPath}); + } for (auto & path : closure) { auto info = store->queryPathInfo(path); res.closureSize += info->narSize; @@ -63,7 +63,7 @@ BuildOutput getBuildOutput( auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products"); if (productsFile == narMembers.end() || - productsFile->second.type != FSAccessor::Type::tRegular) + productsFile->second.type != SourceAccessor::Type::tRegular) continue; assert(productsFile->second.contents); @@ -94,7 +94,7 @@ BuildOutput getBuildOutput( product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path); - if (file->second.type == FSAccessor::Type::tRegular) { + if (file->second.type == SourceAccessor::Type::tRegular) { product.isRegular = true; product.fileSize = file->second.fileSize.value(); product.sha256hash = file->second.sha256.value(); @@ -107,17 +107,16 @@ BuildOutput getBuildOutput( /* If no build products were explicitly declared, then add all outputs as a product of type "nix-build". */ if (!explicitProducts) { - for (auto & [name, output] : drv.outputs) { + for (auto & [name, output] : derivationOutputs) { BuildProduct product; - auto outPath = output.path(*store, drv.name, name); - product.path = store->printStorePath(*outPath); + product.path = store->printStorePath(output); product.type = "nix-build"; product.subtype = name == "out" ? "" : name; - product.name = outPath->name(); + product.name = output.name(); auto file = narMembers.find(product.path); assert(file != narMembers.end()); - if (file->second.type == FSAccessor::Type::tDirectory) + if (file->second.type == SourceAccessor::Type::tDirectory) res.products.push_back(product); } } @@ -126,7 +125,7 @@ BuildOutput getBuildOutput( for (auto & output : outputs) { auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name"); if (file == narMembers.end() || - file->second.type != FSAccessor::Type::tRegular) + file->second.type != SourceAccessor::Type::tRegular) continue; res.releaseName = trim(file->second.contents.value()); // FIXME: validate release name @@ -136,7 +135,7 @@ BuildOutput getBuildOutput( for (auto & output : outputs) { auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics"); if (file == narMembers.end() || - file->second.type != FSAccessor::Type::tRegular) + file->second.type != SourceAccessor::Type::tRegular) continue; for (auto & line : tokenizeString(file->second.contents.value(), "\n")) { auto fields = tokenizeString>(line); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 37022522..ff0634b1 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -2,8 +2,8 @@ #include "state.hh" #include "hydra-build-result.hh" -#include "finally.hh" -#include "binary-cache-store.hh" +#include +#include using namespace nix; @@ -16,7 +16,7 @@ void setThreadName(const std::string & name) } -void State::builder(MachineReservation::ptr reservation) +void State::builder(std::unique_ptr reservation) { setThreadName("bld~" + std::string(reservation->step->drvPath.to_string())); @@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation) activeSteps_.lock()->erase(activeStep); }); + std::string machine = reservation->machine->storeUri.render(); + try { auto destStore = getDestStore(); - res = doBuildStep(destStore, reservation, activeStep); + // Might release the reservation. + res = doBuildStep(destStore, std::move(reservation), activeStep); } catch (std::exception & e) { printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s", - localStore->printStorePath(reservation->step->drvPath), - reservation->machine->sshName, + localStore->printStorePath(activeStep->step->drvPath), + machine, e.what()); } } - /* Release the machine and wake up the dispatcher. */ - assert(reservation.unique()); - reservation = 0; - wakeDispatcher(); - /* If there was a temporary failure, retry the step after an exponentially increasing interval. */ Step::ptr step = wstep.lock(); @@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation) State::StepResult State::doBuildStep(nix::ref destStore, - MachineReservation::ptr reservation, + std::unique_ptr reservation, std::shared_ptr activeStep) { - auto & step(reservation->step); - auto & machine(reservation->machine); + auto step(reservation->step); + auto machine(reservation->machine); { auto step_(step->state.lock()); @@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref destStore, it). */ BuildID buildId; std::optional buildDrvPath; - unsigned int maxSilentTime, buildTimeout; - unsigned int repeats = step->isDeterministic ? 1 : 0; + // Other fields set below + nix::ServeProto::BuildOptions buildOptions { + .maxLogSize = maxLogSize, + .nrRepeats = step->isDeterministic ? 1u : 0u, + .enforceDeterminism = step->isDeterministic, + .keepFailed = false, + }; auto conn(dbPool.get()); @@ -134,18 +137,18 @@ State::StepResult State::doBuildStep(nix::ref destStore, { auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName)); if (i != jobsetRepeats.end()) - repeats = std::max(repeats, i->second); + buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second); } } if (!build) build = *dependents.begin(); buildId = build->id; buildDrvPath = build->drvPath; - maxSilentTime = build->maxSilentTime; - buildTimeout = build->buildTimeout; + buildOptions.maxSilentTime = build->maxSilentTime; + buildOptions.buildTimeout = build->buildTimeout; printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)", - localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); + localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1)); } if (!buildOneDone) @@ -173,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, unlink(result.logFile.c_str()); } } catch (...) { - ignoreException(); + ignoreExceptionInDestructor(); } } }); @@ -191,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, { auto mc = startDbUpdate(); pqxx::work txn(*conn); - stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy); + stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy); txn.commit(); } @@ -206,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, try { /* FIXME: referring builds may have conflicting timeouts. */ - buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers); + buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers); } catch (Error & e) { if (activeStep->state_.lock()->cancelled) { printInfo("marking step %d of build %d as cancelled", stepNr, buildId); @@ -221,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, if (result.stepStatus == bsSuccess) { updateStep(ssPostProcessing); - res = getBuildOutput(destStore, narMembers, *step->drv); + res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore)); } } @@ -248,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, /* Finish the step in the database. */ if (stepNr) { pqxx::work txn(*conn); - finishBuildStep(txn, result, buildId, stepNr, machine->sshName); + finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render()); txn.commit(); } @@ -256,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, issue). Retry a number of times. */ if (result.canRetry) { printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s", - localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg); + localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg); assert(stepNr); bool retry; { @@ -275,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref destStore, assert(stepNr); - for (auto & i : step->drv->outputsAndOptPaths(*localStore)) { - if (i.second.second) - addRoot(*i.second.second); + for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) { + if (!optOutputPath) + throw Error( + "Missing output %s for derivation %d which was supposed to have succeeded", + outputName, localStore->printStorePath(step->drvPath)); + addRoot(*optOutputPath); } /* Register success in the database for all Build objects that @@ -323,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, pqxx::work txn(*conn); for (auto & b : direct) { - printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id); + printInfo("marking build %1% as succeeded", b->id); markSucceededBuild(txn, b, res, buildId != b->id || result.isCached, result.startTime, result.stopTime); } @@ -398,7 +404,7 @@ void State::failStep( Step::ptr step, BuildID buildId, const RemoteResult & result, - Machine::ptr machine, + ::Machine::ptr machine, bool & stepFinished) { /* Register failure in the database for all Build objects that @@ -444,14 +450,14 @@ void State::failStep( build->finishedInDB) continue; createBuildStep(txn, - 0, build->id, step, machine ? machine->sshName : "", + 0, build->id, step, machine ? machine->storeUri.render() : "", result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId); } /* Mark all builds that depend on this derivation as failed. */ for (auto & build : indirect) { if (build->finishedInDB) continue; - printMsg(lvlError, format("marking build %1% as failed") % build->id); + printError("marking build %1% as failed", build->id); txn.exec_params0 ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0", build->id, diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index d2bb3c90..ada25dc6 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "state.hh" @@ -39,28 +40,34 @@ void State::dispatcher() printMsg(lvlDebug, "dispatcher woken up"); nrDispatcherWakeups++; - auto now1 = std::chrono::steady_clock::now(); + auto t_before_work = std::chrono::steady_clock::now(); auto sleepUntil = doDispatch(); - auto now2 = std::chrono::steady_clock::now(); + auto t_after_work = std::chrono::steady_clock::now(); - dispatchTimeMs += std::chrono::duration_cast(now2 - now1).count(); + prom.dispatcher_time_spent_running.Increment( + std::chrono::duration_cast(t_after_work - t_before_work).count()); + dispatchTimeMs += std::chrono::duration_cast(t_after_work - t_before_work).count(); /* Sleep until we're woken up (either because a runnable build is added, or because a build finishes). */ { auto dispatcherWakeup_(dispatcherWakeup.lock()); if (!*dispatcherWakeup_) { - printMsg(lvlDebug, format("dispatcher sleeping for %1%s") % + debug("dispatcher sleeping for %1%s", std::chrono::duration_cast(sleepUntil - std::chrono::system_clock::now()).count()); dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil); } *dispatcherWakeup_ = false; } + auto t_after_sleep = std::chrono::steady_clock::now(); + prom.dispatcher_time_spent_waiting.Increment( + std::chrono::duration_cast(t_after_sleep - t_after_work).count()); + } catch (std::exception & e) { - printMsg(lvlError, format("dispatcher: %1%") % e.what()); + printError("dispatcher: %s", e.what()); sleep(1); } @@ -80,17 +87,124 @@ system_time State::doDispatch() jobset.second->pruneSteps(); auto s2 = jobset.second->shareUsed(); if (s1 != s2) - printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%") - % jobset.first.first % jobset.first.second % s1 % s2); + debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%", + jobset.first.first, jobset.first.second, s1, s2); } } + system_time now = std::chrono::system_clock::now(); + /* Start steps until we're out of steps or slots. */ auto sleepUntil = system_time::max(); bool keepGoing; + /* Sort the runnable steps by priority. Priority is establised + as follows (in order of precedence): + + - The global priority of the builds that depend on the + step. This allows admins to bump a build to the front of + the queue. + + - The lowest used scheduling share of the jobsets depending + on the step. + + - The local priority of the build, as set via the build's + meta.schedulingPriority field. Note that this is not + quite correct: the local priority should only be used to + establish priority between builds in the same jobset, but + here it's used between steps in different jobsets if they + happen to have the same lowest used scheduling share. But + that's not very likely. + + - The lowest ID of the builds depending on the step; + i.e. older builds take priority over new ones. + + FIXME: O(n lg n); obviously, it would be better to keep a + runnable queue sorted by priority. */ + struct StepInfo + { + Step::ptr step; + bool alreadyScheduled = false; + + /* The lowest share used of any jobset depending on this + step. */ + double lowestShareUsed = 1e9; + + /* Info copied from step->state to ensure that the + comparator is a partial ordering (see MachineInfo). */ + int highestGlobalPriority; + int highestLocalPriority; + size_t numRequiredSystemFeatures; + size_t numRevDeps; + BuildID lowestBuildID; + + StepInfo(Step::ptr step, Step::State & step_) : step(step) + { + for (auto & jobset : step_.jobsets) + lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed()); + highestGlobalPriority = step_.highestGlobalPriority; + highestLocalPriority = step_.highestLocalPriority; + numRequiredSystemFeatures = step->requiredSystemFeatures.size(); + numRevDeps = step_.rdeps.size(); + lowestBuildID = step_.lowestBuildID; + } + }; + + std::vector runnableSorted; + + struct RunnablePerType + { + unsigned int count{0}; + std::chrono::seconds waitTime{0}; + }; + + std::unordered_map runnablePerType; + + { + auto runnable_(runnable.lock()); + runnableSorted.reserve(runnable_->size()); + for (auto i = runnable_->begin(); i != runnable_->end(); ) { + auto step = i->lock(); + + /* Remove dead steps. */ + if (!step) { + i = runnable_->erase(i); + continue; + } + + ++i; + + auto & r = runnablePerType[step->systemType]; + r.count++; + + /* Skip previously failed steps that aren't ready + to be retried. */ + auto step_(step->state.lock()); + r.waitTime += std::chrono::duration_cast(now - step_->runnableSince); + if (step_->tries > 0 && step_->after > now) { + if (step_->after < sleepUntil) + sleepUntil = step_->after; + continue; + } + + runnableSorted.emplace_back(step, *step_); + } + } + + sort(runnableSorted.begin(), runnableSorted.end(), + [](const StepInfo & a, const StepInfo & b) + { + return + a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority : + a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed : + a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority : + a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures : + a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps : + a.lowestBuildID < b.lowestBuildID; + }); + do { - system_time now = std::chrono::system_clock::now(); + now = std::chrono::system_clock::now(); /* Copy the currentJobs field of each machine. This is necessary to ensure that the sort comparator below is @@ -98,7 +212,7 @@ system_time State::doDispatch() filter out temporarily disabled machines. */ struct MachineInfo { - Machine::ptr machine; + ::Machine::ptr machine; unsigned long currentJobs; }; std::vector machinesSorted; @@ -138,104 +252,6 @@ system_time State::doDispatch() a.currentJobs > b.currentJobs; }); - /* Sort the runnable steps by priority. Priority is establised - as follows (in order of precedence): - - - The global priority of the builds that depend on the - step. This allows admins to bump a build to the front of - the queue. - - - The lowest used scheduling share of the jobsets depending - on the step. - - - The local priority of the build, as set via the build's - meta.schedulingPriority field. Note that this is not - quite correct: the local priority should only be used to - establish priority between builds in the same jobset, but - here it's used between steps in different jobsets if they - happen to have the same lowest used scheduling share. But - that's not very likely. - - - The lowest ID of the builds depending on the step; - i.e. older builds take priority over new ones. - - FIXME: O(n lg n); obviously, it would be better to keep a - runnable queue sorted by priority. */ - struct StepInfo - { - Step::ptr step; - - /* The lowest share used of any jobset depending on this - step. */ - double lowestShareUsed = 1e9; - - /* Info copied from step->state to ensure that the - comparator is a partial ordering (see MachineInfo). */ - int highestGlobalPriority; - int highestLocalPriority; - BuildID lowestBuildID; - - StepInfo(Step::ptr step, Step::State & step_) : step(step) - { - for (auto & jobset : step_.jobsets) - lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed()); - highestGlobalPriority = step_.highestGlobalPriority; - highestLocalPriority = step_.highestLocalPriority; - lowestBuildID = step_.lowestBuildID; - } - }; - - std::vector runnableSorted; - - struct RunnablePerType - { - unsigned int count{0}; - std::chrono::seconds waitTime{0}; - }; - - std::unordered_map runnablePerType; - - { - auto runnable_(runnable.lock()); - runnableSorted.reserve(runnable_->size()); - for (auto i = runnable_->begin(); i != runnable_->end(); ) { - auto step = i->lock(); - - /* Remove dead steps. */ - if (!step) { - i = runnable_->erase(i); - continue; - } - - ++i; - - auto & r = runnablePerType[step->systemType]; - r.count++; - - /* Skip previously failed steps that aren't ready - to be retried. */ - auto step_(step->state.lock()); - r.waitTime += std::chrono::duration_cast(now - step_->runnableSince); - if (step_->tries > 0 && step_->after > now) { - if (step_->after < sleepUntil) - sleepUntil = step_->after; - continue; - } - - runnableSorted.emplace_back(step, *step_); - } - } - - sort(runnableSorted.begin(), runnableSorted.end(), - [](const StepInfo & a, const StepInfo & b) - { - return - a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority : - a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed : - a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority : - a.lowestBuildID < b.lowestBuildID; - }); - /* Find a machine with a free slot and find a step to run on it. Once we find such a pair, we restart the outer loop because the machine sorting will have changed. */ @@ -245,12 +261,14 @@ system_time State::doDispatch() if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue; for (auto & stepInfo : runnableSorted) { + if (stepInfo.alreadyScheduled) continue; + auto & step(stepInfo.step); /* Can this machine do this step? */ if (!mi.machine->supportsStep(step)) { debug("machine '%s' does not support step '%s' (system type '%s')", - mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform); + mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform); continue; } @@ -271,10 +289,12 @@ system_time State::doDispatch() r.count--; } + stepInfo.alreadyScheduled = true; + /* Make a slot reservation and start a thread to do the build. */ auto builderThread = std::thread(&State::builder, this, - std::make_shared(*this, step, mi.machine)); + std::make_unique(*this, step, mi.machine)); builderThread.detach(); // FIXME? keepGoing = true; @@ -428,7 +448,7 @@ void Jobset::pruneSteps() } -State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine) +State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine) : state(state), step(step), machine(machine) { machine->state->currentJobs++; diff --git a/src/hydra-queue-runner/hydra-build-result.hh b/src/hydra-queue-runner/hydra-build-result.hh index a3f71ae9..654bf1be 100644 --- a/src/hydra-queue-runner/hydra-build-result.hh +++ b/src/hydra-queue-runner/hydra-build-result.hh @@ -2,9 +2,9 @@ #include -#include "hash.hh" -#include "derivations.hh" -#include "store-api.hh" +#include +#include +#include #include "nar-extractor.hh" struct BuildProduct @@ -36,10 +36,12 @@ struct BuildOutput std::list products; + std::map outputs; + std::map metrics; }; BuildOutput getBuildOutput( nix::ref store, NarMemberDatas & narMembers, - const nix::Derivation & drv); + const nix::OutputPathMap derivationOutputs); diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 723bf223..a4a7f0a7 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -8,27 +9,21 @@ #include +#include + +#include #include "state.hh" #include "hydra-build-result.hh" -#include "store-api.hh" -#include "remote-store.hh" +#include +#include -#include "globals.hh" +#include #include "hydra-config.hh" -#include "json.hh" -#include "s3-binary-cache-store.hh" -#include "shared.hh" +#include +#include using namespace nix; - - -namespace nix { - -template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; } -template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; } -template<> void toJSON(std::ostream & str, const double & n) { str << n; } - -} +using nlohmann::json; std::string getEnvOrDie(const std::string & key) @@ -75,10 +70,31 @@ State::PromMetrics::PromMetrics() .Register(*registry) .Add({}) ) - , queue_max_id( - prometheus::BuildGauge() - .Name("hydraqueuerunner_queue_max_build_id_info") - .Help("Maximum build record ID in the queue") + , dispatcher_time_spent_running( + prometheus::BuildCounter() + .Name("hydraqueuerunner_dispatcher_time_spent_running") + .Help("Time (in micros) spent running the dispatcher") + .Register(*registry) + .Add({}) + ) + , dispatcher_time_spent_waiting( + prometheus::BuildCounter() + .Name("hydraqueuerunner_dispatcher_time_spent_waiting") + .Help("Time (in micros) spent waiting for the dispatcher to obtain work") + .Register(*registry) + .Add({}) + ) + , queue_monitor_time_spent_running( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_monitor_time_spent_running") + .Help("Time (in micros) spent running the queue monitor") + .Register(*registry) + .Add({}) + ) + , queue_monitor_time_spent_waiting( + prometheus::BuildCounter() + .Name("hydraqueuerunner_queue_monitor_time_spent_waiting") + .Help("Time (in micros) spent waiting for the queue monitor to obtain work") .Register(*registry) .Add({}) ) @@ -90,6 +106,7 @@ State::State(std::optional metricsAddrOpt) : config(std::make_unique()) , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) + , localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2))) , maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30)) , maxLogSize(config->getIntOption("max_log_size", 64ULL << 20)) , uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false)) @@ -140,50 +157,29 @@ void State::parseMachines(const std::string & contents) oldMachines = *machines_; } - for (auto line : tokenizeString(contents, "\n")) { - line = trim(std::string(line, 0, line.find('#'))); - auto tokens = tokenizeString>(line); - if (tokens.size() < 3) continue; - tokens.resize(8); - - auto machine = std::make_shared(); - machine->sshName = tokens[0]; - machine->systemTypes = tokenizeString(tokens[1], ","); - machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2]; - if (tokens[3] != "") - machine->maxJobs = string2IntmaxJobs)>(tokens[3]).value(); - else - machine->maxJobs = 1; - machine->speedFactor = atof(tokens[4].c_str()); - if (tokens[5] == "-") tokens[5] = ""; - machine->supportedFeatures = tokenizeString(tokens[5], ","); - if (tokens[6] == "-") tokens[6] = ""; - machine->mandatoryFeatures = tokenizeString(tokens[6], ","); - for (auto & f : machine->mandatoryFeatures) - machine->supportedFeatures.insert(f); - if (tokens[7] != "" && tokens[7] != "-") - machine->sshPublicHostKey = base64Decode(tokens[7]); + for (auto && machine_ : nix::Machine::parseConfig({}, contents)) { + auto machine = std::make_shared<::Machine>(std::move(machine_)); /* Re-use the State object of the previous machine with the same name. */ - auto i = oldMachines.find(machine->sshName); + auto i = oldMachines.find(machine->storeUri.variant); if (i == oldMachines.end()) - printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render()); else - printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render()); machine->state = i == oldMachines.end() - ? std::make_shared() + ? std::make_shared<::Machine::State>() : i->second->state; - newMachines[machine->sshName] = machine; + newMachines[machine->storeUri.variant] = machine; } for (auto & m : oldMachines) if (newMachines.find(m.first) == newMachines.end()) { if (m.second->enabled) - printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first); - /* Add a disabled Machine object to make sure stats are + printInfo("removing machine ‘%1%’", m.second->storeUri.render()); + /* Add a disabled ::Machine object to make sure stats are maintained. */ - auto machine = std::make_shared(*(m.second)); + auto machine = std::make_shared<::Machine>(*(m.second)); machine->enabled = false; newMachines[m.first] = machine; } @@ -211,7 +207,7 @@ void State::monitorMachinesFile() parseMachines("localhost " + (settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get()) + " - " + std::to_string(settings.maxBuildJobs) + " 1 " - + concatStringsSep(",", settings.systemFeatures.get())); + + concatStringsSep(",", StoreConfig::getDefaultSystemFeatures())); machinesReadyLock.unlock(); return; } @@ -318,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID if (r.affected_rows() == 0) goto restart; - for (auto & [name, output] : step->drv->outputs) + for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) txn.exec_params0 ("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)", - buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name))); + buildId, stepNr, name, + output + ? std::optional { localStore->printStorePath(*output)} + : std::nullopt); if (status == bsBusy) txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr)); @@ -358,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result, assert(result.logFile.find('\t') == std::string::npos); txn.exec(fmt("notify step_finished, '%d\t%d\t%s'", buildId, stepNr, result.logFile)); + + if (result.stepStatus == bsSuccess) { + // Update the corresponding `BuildStepOutputs` row to add the output path + auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr); + assert(res.size()); + StorePath drvPath = localStore->parseStorePath(res[0].as()); + // If we've finished building, all the paths should be known + for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore)) + txn.exec_params0 + ("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3", + buildId, stepNr, name, localStore->printStorePath(output)); + } } int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime, - Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath) + Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath) { restart: auto stepNr = allocBuildStep(txn, build->id); @@ -463,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build, res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt, isCachedBuild ? 1 : 0); + for (auto & [outputName, outputPath] : res.outputs) { + txn.exec_params0 + ("update BuildOutputs set path = $3 where build = $1 and name = $2", + build->id, + outputName, + localStore->printStorePath(outputPath) + ); + } + txn.exec_params0("delete from BuildProducts where build = $1", build->id); unsigned int productNr = 1; @@ -474,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build, product.type, product.subtype, product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt, - product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt, + product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt, product.path, product.name, product.defaultPath); @@ -542,182 +562,174 @@ std::shared_ptr State::acquireGlobalLock() void State::dumpStatus(Connection & conn) { - std::ostringstream out; + time_t now = time(0); + json statusJson = { + {"status", "up"}, + {"time", time(0)}, + {"uptime", now - startedAt}, + {"pid", getpid()}, + {"nrQueuedBuilds", builds.lock()->size()}, + {"nrActiveSteps", activeSteps_.lock()->size()}, + {"nrStepsBuilding", nrStepsBuilding.load()}, + {"nrStepsCopyingTo", nrStepsCopyingTo.load()}, + {"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()}, + {"nrStepsCopyingFrom", nrStepsCopyingFrom.load()}, + {"nrStepsWaiting", nrStepsWaiting.load()}, + {"nrUnsupportedSteps", nrUnsupportedSteps.load()}, + {"bytesSent", bytesSent.load()}, + {"bytesReceived", bytesReceived.load()}, + {"nrBuildsRead", nrBuildsRead.load()}, + {"buildReadTimeMs", buildReadTimeMs.load()}, + {"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead}, + {"nrBuildsDone", nrBuildsDone.load()}, + {"nrStepsStarted", nrStepsStarted.load()}, + {"nrStepsDone", nrStepsDone.load()}, + {"nrRetries", nrRetries.load()}, + {"maxNrRetries", maxNrRetries.load()}, + {"nrQueueWakeups", nrQueueWakeups.load()}, + {"nrDispatcherWakeups", nrDispatcherWakeups.load()}, + {"dispatchTimeMs", dispatchTimeMs.load()}, + {"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups}, + {"nrDbConnections", dbPool.count()}, + {"nrActiveDbUpdates", nrActiveDbUpdates.load()}, + }; { - JSONObject root(out); - time_t now = time(0); - root.attr("status", "up"); - root.attr("time", time(0)); - root.attr("uptime", now - startedAt); - root.attr("pid", getpid()); - { - auto builds_(builds.lock()); - root.attr("nrQueuedBuilds", builds_->size()); - } { auto steps_(steps.lock()); for (auto i = steps_->begin(); i != steps_->end(); ) if (i->second.lock()) ++i; else i = steps_->erase(i); - root.attr("nrUnfinishedSteps", steps_->size()); + statusJson["nrUnfinishedSteps"] = steps_->size(); } { auto runnable_(runnable.lock()); for (auto i = runnable_->begin(); i != runnable_->end(); ) if (i->lock()) ++i; else i = runnable_->erase(i); - root.attr("nrRunnableSteps", runnable_->size()); + statusJson["nrRunnableSteps"] = runnable_->size(); } - root.attr("nrActiveSteps", activeSteps_.lock()->size()); - root.attr("nrStepsBuilding", nrStepsBuilding); - root.attr("nrStepsCopyingTo", nrStepsCopyingTo); - root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom); - root.attr("nrStepsWaiting", nrStepsWaiting); - root.attr("nrUnsupportedSteps", nrUnsupportedSteps); - root.attr("bytesSent", bytesSent); - root.attr("bytesReceived", bytesReceived); - root.attr("nrBuildsRead", nrBuildsRead); - root.attr("buildReadTimeMs", buildReadTimeMs); - root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead); - root.attr("nrBuildsDone", nrBuildsDone); - root.attr("nrStepsStarted", nrStepsStarted); - root.attr("nrStepsDone", nrStepsDone); - root.attr("nrRetries", nrRetries); - root.attr("maxNrRetries", maxNrRetries); if (nrStepsDone) { - root.attr("totalStepTime", totalStepTime); - root.attr("totalStepBuildTime", totalStepBuildTime); - root.attr("avgStepTime", (float) totalStepTime / nrStepsDone); - root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone); + statusJson["totalStepTime"] = totalStepTime.load(); + statusJson["totalStepBuildTime"] = totalStepBuildTime.load(); + statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone; + statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone; } - root.attr("nrQueueWakeups", nrQueueWakeups); - root.attr("nrDispatcherWakeups", nrDispatcherWakeups); - root.attr("dispatchTimeMs", dispatchTimeMs); - root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups); - root.attr("nrDbConnections", dbPool.count()); - root.attr("nrActiveDbUpdates", nrActiveDbUpdates); { - auto nested = root.object("machines"); + auto machines_json = json::object(); auto machines_(machines.lock()); for (auto & i : *machines_) { auto & m(i.second); auto & s(m->state); - auto nested2 = nested.object(m->sshName); - nested2.attr("enabled", m->enabled); - - { - auto list = nested2.list("systemTypes"); - for (auto & s : m->systemTypes) - list.elem(s); - } - - { - auto list = nested2.list("supportedFeatures"); - for (auto & s : m->supportedFeatures) - list.elem(s); - } - - { - auto list = nested2.list("mandatoryFeatures"); - for (auto & s : m->mandatoryFeatures) - list.elem(s); - } - - nested2.attr("currentJobs", s->currentJobs); - if (s->currentJobs == 0) - nested2.attr("idleSince", s->idleSince); - nested2.attr("nrStepsDone", s->nrStepsDone); - if (m->state->nrStepsDone) { - nested2.attr("totalStepTime", s->totalStepTime); - nested2.attr("totalStepBuildTime", s->totalStepBuildTime); - nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone); - nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone); - } - auto info(m->state->connectInfo.lock()); - nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)); - nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)); - nested2.attr("consecutiveFailures", info->consecutiveFailures); + json machine = { + {"enabled", m->enabled}, + {"systemTypes", m->systemTypes}, + {"supportedFeatures", m->supportedFeatures}, + {"mandatoryFeatures", m->mandatoryFeatures}, + {"nrStepsDone", s->nrStepsDone.load()}, + {"currentJobs", s->currentJobs.load()}, + {"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)}, + {"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)}, + {"consecutiveFailures", info->consecutiveFailures}, + }; + + if (s->currentJobs == 0) + machine["idleSince"] = s->idleSince.load(); + if (m->state->nrStepsDone) { + machine["totalStepTime"] = s->totalStepTime.load(); + machine["totalStepBuildTime"] = s->totalStepBuildTime.load(); + machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone; + machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone; + } + machines_json[m->storeUri.render()] = machine; } + statusJson["machines"] = machines_json; } { - auto nested = root.object("jobsets"); + auto jobsets_json = json::object(); auto jobsets_(jobsets.lock()); for (auto & jobset : *jobsets_) { - auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second); - nested2.attr("shareUsed", jobset.second->shareUsed()); - nested2.attr("seconds", jobset.second->getSeconds()); + jobsets_json[jobset.first.first + ":" + jobset.first.second] = { + {"shareUsed", jobset.second->shareUsed()}, + {"seconds", jobset.second->getSeconds()}, + }; } + statusJson["jobsets"] = jobsets_json; } { - auto nested = root.object("machineTypes"); + auto machineTypesJson = json::object(); auto machineTypes_(machineTypes.lock()); for (auto & i : *machineTypes_) { - auto nested2 = nested.object(i.first); - nested2.attr("runnable", i.second.runnable); - nested2.attr("running", i.second.running); + auto machineTypeJson = machineTypesJson[i.first] = { + {"runnable", i.second.runnable}, + {"running", i.second.running}, + }; if (i.second.runnable > 0) - nested2.attr("waitTime", i.second.waitTime.count() + - i.second.runnable * (time(0) - lastDispatcherCheck)); + machineTypeJson["waitTime"] = i.second.waitTime.count() + + i.second.runnable * (time(0) - lastDispatcherCheck); if (i.second.running == 0) - nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive)); + machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive); } + statusJson["machineTypes"] = machineTypesJson; } auto store = getDestStore(); - auto nested = root.object("store"); - auto & stats = store->getStats(); - nested.attr("narInfoRead", stats.narInfoRead); - nested.attr("narInfoReadAverted", stats.narInfoReadAverted); - nested.attr("narInfoMissing", stats.narInfoMissing); - nested.attr("narInfoWrite", stats.narInfoWrite); - nested.attr("narInfoCacheSize", stats.pathInfoCacheSize); - nested.attr("narRead", stats.narRead); - nested.attr("narReadBytes", stats.narReadBytes); - nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); - nested.attr("narWrite", stats.narWrite); - nested.attr("narWriteAverted", stats.narWriteAverted); - nested.attr("narWriteBytes", stats.narWriteBytes); - nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); - nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); - nested.attr("narCompressionSavings", - stats.narWriteBytes - ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes - : 0.0); - nested.attr("narCompressionSpeed", // MiB/s + statusJson["store"] = { + {"narInfoRead", stats.narInfoRead.load()}, + {"narInfoReadAverted", stats.narInfoReadAverted.load()}, + {"narInfoMissing", stats.narInfoMissing.load()}, + {"narInfoWrite", stats.narInfoWrite.load()}, + {"narInfoCacheSize", stats.pathInfoCacheSize.load()}, + {"narRead", stats.narRead.load()}, + {"narReadBytes", stats.narReadBytes.load()}, + {"narReadCompressedBytes", stats.narReadCompressedBytes.load()}, + {"narWrite", stats.narWrite.load()}, + {"narWriteAverted", stats.narWriteAverted.load()}, + {"narWriteBytes", stats.narWriteBytes.load()}, + {"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()}, + {"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()}, + {"narCompressionSavings", + stats.narWriteBytes + ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes + : 0.0}, + {"narCompressionSpeed", // MiB/s stats.narWriteCompressionTimeMs ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); + : 0.0}, + }; +#if NIX_WITH_S3_SUPPORT auto s3Store = dynamic_cast(&*store); if (s3Store) { - auto nested2 = nested.object("s3"); auto & s3Stats = s3Store->getS3Stats(); - nested2.attr("put", s3Stats.put); - nested2.attr("putBytes", s3Stats.putBytes); - nested2.attr("putTimeMs", s3Stats.putTimeMs); - nested2.attr("putSpeed", - s3Stats.putTimeMs - ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("get", s3Stats.get); - nested2.attr("getBytes", s3Stats.getBytes); - nested2.attr("getTimeMs", s3Stats.getTimeMs); - nested2.attr("getSpeed", - s3Stats.getTimeMs - ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("head", s3Stats.head); - nested2.attr("costDollarApprox", - (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 - + s3Stats.put / 1000.0 * 0.005 + - + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); + auto jsonS3 = statusJson["s3"] = { + {"put", s3Stats.put.load()}, + {"putBytes", s3Stats.putBytes.load()}, + {"putTimeMs", s3Stats.putTimeMs.load()}, + {"putSpeed", + s3Stats.putTimeMs + ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0}, + {"get", s3Stats.get.load()}, + {"getBytes", s3Stats.getBytes.load()}, + {"getTimeMs", s3Stats.getTimeMs.load()}, + {"getSpeed", + s3Stats.getTimeMs + ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0}, + {"head", s3Stats.head.load()}, + {"costDollarApprox", + (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 + + s3Stats.put / 1000.0 * 0.005 + + + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09}, + }; } +#endif } { @@ -725,7 +737,7 @@ void State::dumpStatus(Connection & conn) pqxx::work txn(conn); // FIXME: use PostgreSQL 9.5 upsert. txn.exec("delete from SystemStatus where what = 'queue-runner'"); - txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str()); + txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump()); txn.exec("notify status_dumped"); txn.commit(); } @@ -820,7 +832,7 @@ void State::run(BuildID buildOne) << metricsAddr << "/metrics (port " << exposerPort << ")" << std::endl; - Store::Params localParams; + Store::Config::Params localParams; localParams["max-connections"] = "16"; localParams["max-connection-age"] = "600"; localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams); @@ -902,10 +914,17 @@ void State::run(BuildID buildOne) while (true) { try { auto conn(dbPool.get()); - receiver dumpStatus_(*conn, "dump_status"); - while (true) { - conn->await_notification(); - dumpStatus(*conn); + try { + receiver dumpStatus_(*conn, "dump_status"); + while (true) { + conn->await_notification(); + dumpStatus(*conn); + } + } catch (pqxx::broken_connection & connEx) { + printMsg(lvlError, "main thread: %s", connEx.what()); + printMsg(lvlError, "main thread: Reconnecting in 10s"); + conn.markBad(); + sleep(10); } } catch (std::exception & e) { printMsg(lvlError, "main thread: %s", e.what()); @@ -950,7 +969,6 @@ int main(int argc, char * * argv) }); settings.verboseBuild = true; - settings.lockCPU = false; State state{metricsAddrOpt}; if (status) diff --git a/src/hydra-queue-runner/meson.build b/src/hydra-queue-runner/meson.build new file mode 100644 index 00000000..27dad2c0 --- /dev/null +++ b/src/hydra-queue-runner/meson.build @@ -0,0 +1,24 @@ +srcs = files( + 'builder.cc', + 'build-remote.cc', + 'build-result.cc', + 'dispatcher.cc', + 'hydra-queue-runner.cc', + 'nar-extractor.cc', + 'queue-monitor.cc', +) + +hydra_queue_runner = executable('hydra-queue-runner', + 'hydra-queue-runner.cc', + srcs, + dependencies: [ + libhydra_dep, + nix_util_dep, + nix_store_dep, + nix_main_dep, + pqxx_dep, + prom_cpp_core_dep, + prom_cpp_pull_dep, + ], + install: true, +) diff --git a/src/hydra-queue-runner/nar-extractor.cc b/src/hydra-queue-runner/nar-extractor.cc index 9f0eb431..3bf06ef3 100644 --- a/src/hydra-queue-runner/nar-extractor.cc +++ b/src/hydra-queue-runner/nar-extractor.cc @@ -1,12 +1,51 @@ #include "nar-extractor.hh" -#include "archive.hh" +#include #include using namespace nix; -struct Extractor : ParseSink + +struct NarMemberConstructor : CreateRegularFileSink +{ + NarMemberData & curMember; + + HashSink hashSink = HashSink { HashAlgorithm::SHA256 }; + + std::optional expectedSize; + + NarMemberConstructor(NarMemberData & curMember) + : curMember(curMember) + { } + + void isExecutable() override + { + } + + void preallocateContents(uint64_t size) override + { + expectedSize = size; + } + + void operator () (std::string_view data) override + { + assert(expectedSize); + *curMember.fileSize += data.size(); + hashSink(data); + if (curMember.contents) { + curMember.contents->append(data); + } + assert(curMember.fileSize <= expectedSize); + if (curMember.fileSize == expectedSize) { + auto [hash, len] = hashSink.finish(); + assert(curMember.fileSize == len); + curMember.sha256 = hash; + } + } +}; + +struct Extractor : FileSystemObjectSink { std::unordered_set filesToKeep { "/nix-support/hydra-build-products", @@ -15,58 +54,40 @@ struct Extractor : ParseSink }; NarMemberDatas & members; - NarMemberData * curMember = nullptr; - Path prefix; + std::filesystem::path prefix; + + Path toKey(const CanonPath & path) + { + std::filesystem::path p = prefix; + // Conditional to avoid trailing slash + if (!path.isRoot()) p /= path.rel(); + return p; + } Extractor(NarMemberDatas & members, const Path & prefix) : members(members), prefix(prefix) { } - void createDirectory(const Path & path) override + void createDirectory(const CanonPath & path) override { - members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory }); + members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory }); } - void createRegularFile(const Path & path) override + void createRegularFile(const CanonPath & path, std::function func) override { - curMember = &members.insert_or_assign(prefix + path, NarMemberData { - .type = FSAccessor::Type::tRegular, - .fileSize = 0, - .contents = filesToKeep.count(path) ? std::optional("") : std::nullopt, - }).first->second; + NarMemberConstructor nmc { + members.insert_or_assign(toKey(path), NarMemberData { + .type = SourceAccessor::Type::tRegular, + .fileSize = 0, + .contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt, + }).first->second, + }; + func(nmc); } - std::optional expectedSize; - std::unique_ptr hashSink; - - void preallocateContents(uint64_t size) override + void createSymlink(const CanonPath & path, const std::string & target) override { - expectedSize = size; - hashSink = std::make_unique(htSHA256); - } - - void receiveContents(std::string_view data) override - { - assert(expectedSize); - assert(curMember); - assert(hashSink); - *curMember->fileSize += data.size(); - (*hashSink)(data); - if (curMember->contents) { - curMember->contents->append(data); - } - assert(curMember->fileSize <= expectedSize); - if (curMember->fileSize == expectedSize) { - auto [hash, len] = hashSink->finish(); - assert(curMember->fileSize == len); - curMember->sha256 = hash; - hashSink.reset(); - } - } - - void createSymlink(const Path & path, const std::string & target) override - { - members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink }); + members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink }); } }; diff --git a/src/hydra-queue-runner/nar-extractor.hh b/src/hydra-queue-runner/nar-extractor.hh index 45b2706c..0060efe2 100644 --- a/src/hydra-queue-runner/nar-extractor.hh +++ b/src/hydra-queue-runner/nar-extractor.hh @@ -1,13 +1,13 @@ #pragma once -#include "fs-accessor.hh" -#include "types.hh" -#include "serialise.hh" -#include "hash.hh" +#include +#include +#include +#include struct NarMemberData { - nix::FSAccessor::Type type; + nix::SourceAccessor::Type type; std::optional fileSize; std::optional contents; std::optional sha256; diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 3bde0d99..0785be6f 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -1,6 +1,8 @@ #include "state.hh" #include "hydra-build-result.hh" -#include "globals.hh" +#include +#include +#include #include @@ -10,63 +12,74 @@ using namespace nix; void State::queueMonitor() { while (true) { + auto conn(dbPool.get()); try { - queueMonitorLoop(); + queueMonitorLoop(*conn); + } catch (pqxx::broken_connection & e) { + printMsg(lvlError, "queue monitor: %s", e.what()); + printMsg(lvlError, "queue monitor: Reconnecting in 10s"); + conn.markBad(); + sleep(10); } catch (std::exception & e) { - printMsg(lvlError, format("queue monitor: %1%") % e.what()); + printError("queue monitor: %s", e.what()); sleep(10); // probably a DB problem, so don't retry right away } } } -void State::queueMonitorLoop() +void State::queueMonitorLoop(Connection & conn) { - auto conn(dbPool.get()); - - receiver buildsAdded(*conn, "builds_added"); - receiver buildsRestarted(*conn, "builds_restarted"); - receiver buildsCancelled(*conn, "builds_cancelled"); - receiver buildsDeleted(*conn, "builds_deleted"); - receiver buildsBumped(*conn, "builds_bumped"); - receiver jobsetSharesChanged(*conn, "jobset_shares_changed"); + receiver buildsAdded(conn, "builds_added"); + receiver buildsRestarted(conn, "builds_restarted"); + receiver buildsCancelled(conn, "builds_cancelled"); + receiver buildsDeleted(conn, "builds_deleted"); + receiver buildsBumped(conn, "builds_bumped"); + receiver jobsetSharesChanged(conn, "jobset_shares_changed"); auto destStore = getDestStore(); - unsigned int lastBuildId = 0; - bool quit = false; while (!quit) { + auto t_before_work = std::chrono::steady_clock::now(); + localStore->clearPathInfoCache(); - bool done = getQueuedBuilds(*conn, destStore, lastBuildId); + bool done = getQueuedBuilds(conn, destStore); if (buildOne && buildOneDone) quit = true; + auto t_after_work = std::chrono::steady_clock::now(); + + prom.queue_monitor_time_spent_running.Increment( + std::chrono::duration_cast(t_after_work - t_before_work).count()); + /* Sleep until we get notification from the database about an event. */ if (done && !quit) { - conn->await_notification(); + conn.await_notification(); nrQueueWakeups++; } else - conn->get_notifs(); + conn.get_notifs(); if (auto lowestId = buildsAdded.get()) { - lastBuildId = std::min(lastBuildId, static_cast(std::stoul(*lowestId) - 1)); printMsg(lvlTalkative, "got notification: new builds added to the queue"); } if (buildsRestarted.get()) { printMsg(lvlTalkative, "got notification: builds restarted"); - lastBuildId = 0; // check all builds } if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) { printMsg(lvlTalkative, "got notification: builds cancelled or bumped"); - processQueueChange(*conn); + processQueueChange(conn); } if (jobsetSharesChanged.get()) { printMsg(lvlTalkative, "got notification: jobset shares changed"); - processJobsetSharesChange(*conn); + processJobsetSharesChange(conn); } + + auto t_after_sleep = std::chrono::steady_clock::now(); + prom.queue_monitor_time_spent_waiting.Increment( + std::chrono::duration_cast(t_after_sleep - t_after_work).count()); } exit(0); @@ -80,20 +93,18 @@ struct PreviousFailure : public std::exception { bool State::getQueuedBuilds(Connection & conn, - ref destStore, unsigned int & lastBuildId) + ref destStore) { prom.queue_checks_started.Increment(); - printInfo("checking the queue for builds > %d...", lastBuildId); + printInfo("checking the queue for builds..."); /* Grab the queued builds from the database, but don't process them yet (since we don't want a long-running transaction). */ std::vector newIDs; - std::map newBuildsByID; + std::unordered_map newBuildsByID; std::multimap newBuildsByPath; - unsigned int newLastBuildId = lastBuildId; - { pqxx::work txn(conn); @@ -102,17 +113,12 @@ bool State::getQueuedBuilds(Connection & conn, "jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, " "globalPriority, priority from Builds " "inner join jobsets on builds.jobset_id = jobsets.id " - "where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id", - lastBuildId); + "where finished = 0 order by globalPriority desc, random()"); for (auto const & row : res) { auto builds_(builds.lock()); BuildID id = row["id"].as(); if (buildOne && id != buildOne) continue; - if (id > newLastBuildId) { - newLastBuildId = id; - prom.queue_max_id.Set(id); - } if (builds_->count(id)) continue; auto build = std::make_shared( @@ -142,13 +148,13 @@ bool State::getQueuedBuilds(Connection & conn, createBuild = [&](Build::ptr build) { prom.queue_build_loads.Increment(); - printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); + printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName()); nrAdded++; newBuildsByID.erase(build->id); if (!localStore->isValidPath(build->drvPath)) { /* Derivation has been GC'ed prematurely. */ - printMsg(lvlError, format("aborting GC'ed build %1%") % build->id); + printError("aborting GC'ed build %1%", build->id); if (!build->finishedInDB) { auto mc = startDbUpdate(); pqxx::work txn(conn); @@ -192,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn, if (!res[0].is_null()) propagatedFrom = res[0].as(); if (!propagatedFrom) { - for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) { - if (i.second.second) { - auto res = txn.exec_params - ("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1", - localStore->printStorePath(*i.second.second)); - if (!res[0][0].is_null()) { - propagatedFrom = res[0][0].as(); - break; - } + for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) { + constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1"; + auto res = optOutputPath + ? txn.exec_params( + std::string { common } + " and path = $1", + localStore->printStorePath(*optOutputPath)) + : txn.exec_params( + std::string { common } + " and drvPath = $1 and name = $2", + localStore->printStorePath(ex.step->drvPath), + outputName); + if (!res[0][0].is_null()) { + propagatedFrom = res[0][0].as(); + break; } } } @@ -236,12 +246,10 @@ bool State::getQueuedBuilds(Connection & conn, /* If we didn't get a step, it means the step's outputs are all valid. So we mark this as a finished, cached build. */ if (!step) { - auto drv = localStore->readDerivation(build->drvPath); - BuildOutput res = getBuildOutputCached(conn, destStore, drv); + BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath); - for (auto & i : drv.outputsAndOptPaths(*localStore)) - if (i.second.second) - addRoot(*i.second.second); + for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore)) + addRoot(i.second); { auto mc = startDbUpdate(); @@ -292,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn, try { createBuild(build); } catch (Error & e) { - e.addTrace({}, hintfmt("while loading build %d: ", build->id)); + e.addTrace({}, HintFmt("while loading build %d: ", build->id)); throw; } @@ -302,7 +310,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Add the new runnable build steps to ‘runnable’ and wake up the builder threads. */ - printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded); + printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded); for (auto & r : newRunnable) makeRunnable(r); @@ -312,15 +320,13 @@ bool State::getQueuedBuilds(Connection & conn, /* Stop after a certain time to allow priority bumps to be processed. */ - if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) { + if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) { prom.queue_checks_early_exits.Increment(); break; - } + } } prom.queue_checks_finished.Increment(); - - lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1; return newBuildsByID.empty(); } @@ -358,13 +364,13 @@ void State::processQueueChange(Connection & conn) for (auto i = builds_->begin(); i != builds_->end(); ) { auto b = currentIds.find(i->first); if (b == currentIds.end()) { - printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first); + printInfo("discarding cancelled build %1%", i->first); i = builds_->erase(i); // FIXME: ideally we would interrupt active build steps here. continue; } if (i->second->globalPriority < b->second) { - printMsg(lvlInfo, format("priority of build %1% increased") % i->first); + printInfo("priority of build %1% increased", i->first); i->second->globalPriority = b->second; i->second->propagatePriorities(); } @@ -399,6 +405,34 @@ void State::processQueueChange(Connection & conn) } +std::map> State::getMissingRemotePaths( + ref destStore, + const std::map> & paths) +{ + Sync>> missing_; + ThreadPool tp; + + for (auto & [output, maybeOutputPath] : paths) { + if (!maybeOutputPath) { + auto missing(missing_.lock()); + missing->insert({output, maybeOutputPath}); + } else { + tp.enqueue([&] { + if (!destStore->isValidPath(*maybeOutputPath)) { + auto missing(missing_.lock()); + missing->insert({output, maybeOutputPath}); + } + }); + } + } + + tp.process(); + + auto missing(missing_.lock()); + return *missing; +} + + Step::ptr State::createStep(ref destStore, Connection & conn, Build::ptr build, const StorePath & drvPath, Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, @@ -457,17 +491,23 @@ Step::ptr State::createStep(ref destStore, it's not runnable yet, and other threads won't make it runnable while step->created == false. */ step->drv = std::make_unique(localStore->readDerivation(drvPath)); - step->parsedDrv = std::make_unique(drvPath, *step->drv); + { + auto parsedOpt = StructuredAttrs::tryParse(step->drv->env); + try { + step->drvOptions = std::make_unique( + DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr)); + } catch (Error & e) { + e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath)); + throw; + } + } - step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore); - step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1"; + step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv); + step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1"; step->systemType = step->drv->platform; { - auto i = step->drv->env.find("requiredSystemFeatures"); - StringSet features; - if (i != step->drv->env.end()) - features = step->requiredSystemFeatures = tokenizeString>(i->second); + StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv); if (step->preferLocalBuild) features.insert("local"); if (!features.empty()) { @@ -481,26 +521,40 @@ Step::ptr State::createStep(ref destStore, throw PreviousFailure{step}; /* Are all outputs valid? */ - bool valid = true; - DerivationOutputs missing; - for (auto & i : step->drv->outputs) - if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) { - valid = false; - missing.insert_or_assign(i.first, i.second); - } + auto outputHashes = staticOutputHashes(*localStore, *(step->drv)); + std::map> paths; + for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) { + auto outputHash = outputHashes.at(outputName); + paths.insert({{outputHash, outputName}, maybeOutputPath}); + } + + auto missing = getMissingRemotePaths(destStore, paths); + bool valid = missing.empty(); /* Try to copy the missing paths from the local store or from substitutes. */ if (!missing.empty()) { size_t avail = 0; - for (auto & i : missing) { - auto path = i.second.path(*localStore, step->drv->name, i.first); - if (/* localStore != destStore && */ localStore->isValidPath(*path)) + for (auto & [i, pathOpt] : missing) { + // If we don't know the output path from the destination + // store, see if the local store can tell us. + if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) + if (auto maybeRealisation = localStore->queryRealisation(i)) + pathOpt = maybeRealisation->outPath; + + if (!pathOpt) { + // No hope of getting the store object if we don't know + // the path. + continue; + } + auto & path = *pathOpt; + + if (/* localStore != destStore && */ localStore->isValidPath(path)) avail++; else if (useSubstitutes) { SubstitutablePathInfos infos; - localStore->querySubstitutablePathInfos({{*path, {}}}, infos); + localStore->querySubstitutablePathInfos({{path, {}}}, infos); if (infos.size() == 1) avail++; } @@ -508,26 +562,29 @@ Step::ptr State::createStep(ref destStore, if (missing.size() == avail) { valid = true; - for (auto & i : missing) { - auto path = i.second.path(*localStore, step->drv->name, i.first); + for (auto & [i, pathOpt] : missing) { + // If we found everything, then we should know the path + // to every missing store object now. + assert(pathOpt); + auto & path = *pathOpt; try { time_t startTime = time(0); - if (localStore->isValidPath(*path)) + if (localStore->isValidPath(path)) printInfo("copying output ‘%1%’ of ‘%2%’ from local store", - localStore->printStorePath(*path), + localStore->printStorePath(path), localStore->printStorePath(drvPath)); else { printInfo("substituting output ‘%1%’ of ‘%2%’", - localStore->printStorePath(*path), + localStore->printStorePath(path), localStore->printStorePath(drvPath)); - localStore->ensurePath(*path); + localStore->ensurePath(path); // FIXME: should copy directly from substituter to destStore. } copyClosure(*localStore, *destStore, - StorePathSet { *path }, + StorePathSet { path }, NoRepair, CheckSigs, NoSubstitute); time_t stopTime = time(0); @@ -535,13 +592,13 @@ Step::ptr State::createStep(ref destStore, { auto mc = startDbUpdate(); pqxx::work txn(conn); - createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path); + createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path); txn.commit(); } } catch (Error & e) { printError("while copying/substituting output ‘%s’ of ‘%s’: %s", - localStore->printStorePath(*path), + localStore->printStorePath(path), localStore->printStorePath(drvPath), e.what()); valid = false; @@ -561,7 +618,7 @@ Step::ptr State::createStep(ref destStore, printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath)); /* Create steps for the dependencies. */ - for (auto & i : step->drv->inputDrvs) { + for (auto & i : step->drv->inputDrvs.map) { auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable); if (dep) { auto step_(step->state.lock()); @@ -640,21 +697,23 @@ void State::processJobsetSharesChange(Connection & conn) } -BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::Derivation & drv) +BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::StorePath & drvPath) { + auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore); + { pqxx::work txn(conn); - for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) { + for (auto & [name, output] : derivationOutputs) { auto r = txn.exec_params ("select id, buildStatus, releaseName, closureSize, size from Builds b " "join BuildOutputs o on b.id = o.build " "where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1", - localStore->printStorePath(*output.second)); + localStore->printStorePath(output)); if (r.empty()) continue; BuildID id = r[0][0].as(); - printMsg(lvlInfo, format("reusing build %d") % id); + printInfo("reusing build %d", id); BuildOutput res; res.failed = r[0][1].as() == bsFailedWithOutput; @@ -677,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref product.fileSize = row[2].as(); } if (!row[3].is_null()) - product.sha256hash = Hash::parseAny(row[3].as(), htSHA256); + product.sha256hash = Hash::parseAny(row[3].as(), HashAlgorithm::SHA256); if (!row[4].is_null()) product.path = row[4].as(); product.name = row[5].as(); @@ -704,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref } NarMemberDatas narMembers; - return getBuildOutput(destStore, narMembers, drv); + return getBuildOutput(destStore, narMembers, derivationOutputs); } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 47e74f55..f7ab7de3 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -13,13 +15,18 @@ #include "db.hh" -#include "parsed-derivations.hh" -#include "pathlocks.hh" -#include "pool.hh" -#include "build-result.hh" -#include "store-api.hh" -#include "sync.hh" +#include +#include +#include +#include +#include +#include +#include #include "nar-extractor.hh" +#include +#include +#include +#include typedef unsigned int BuildID; @@ -53,6 +60,7 @@ typedef enum { ssConnecting = 10, ssSendingInputs = 20, ssBuilding = 30, + ssWaitingForLocalSlot = 35, ssReceivingOutputs = 40, ssPostProcessing = 50, } StepState; @@ -77,6 +85,8 @@ struct RemoteResult { return stepStatus == bsCachedFailure ? bsFailed : stepStatus; } + + void updateWithBuildResult(const nix::BuildResult &); }; @@ -161,8 +171,8 @@ struct Step nix::StorePath drvPath; std::unique_ptr drv; - std::unique_ptr parsedDrv; - std::set requiredSystemFeatures; + std::unique_ptr drvOptions; + nix::StringSet requiredSystemFeatures; bool preferLocalBuild; bool isDeterministic; std::string systemType; // concatenation of drv.platform and requiredSystemFeatures @@ -230,18 +240,10 @@ void getDependents(Step::ptr step, std::set & builds, std::set visitor, Step::ptr step); -struct Machine +struct Machine : nix::Machine { typedef std::shared_ptr ptr; - bool enabled{true}; - - std::string sshName, sshKey; - std::set systemTypes, supportedFeatures, mandatoryFeatures; - unsigned int maxJobs = 1; - float speedFactor = 1.0; - std::string sshPublicHostKey; - struct State { typedef std::shared_ptr ptr; counter currentJobs{0}; @@ -291,10 +293,13 @@ struct Machine return true; } - bool isLocalhost() - { - return sshName == "localhost"; - } + bool isLocalhost() const; + + // A connection to a machine + struct Connection : nix::ServeProto::BasicClientConnection { + // Backpointer to the machine + ptr machine; + }; }; @@ -348,9 +353,13 @@ private: /* The build machines. */ std::mutex machinesReadyLock; - typedef std::map Machines; + typedef std::map Machines; nix::Sync machines; // FIXME: use atomic_shared_ptr + /* Throttler for CPU-bound local work. */ + static constexpr unsigned int maxSupportedLocalWorkers = 1024; + std::counting_semaphore localWorkThrottler; + /* Various stats. */ time_t startedAt; counter nrBuildsRead{0}; @@ -360,6 +369,7 @@ private: counter nrStepsDone{0}; counter nrStepsBuilding{0}; counter nrStepsCopyingTo{0}; + counter nrStepsWaitingForDownloadSlot{0}; counter nrStepsCopyingFrom{0}; counter nrStepsWaiting{0}; counter nrUnsupportedSteps{0}; @@ -390,7 +400,6 @@ private: struct MachineReservation { - typedef std::shared_ptr ptr; State & state; Step::ptr step; Machine::ptr machine; @@ -428,7 +437,7 @@ private: /* How often the build steps of a jobset should be repeated in order to detect non-determinism. */ - std::map, unsigned int> jobsetRepeats; + std::map, size_t> jobsetRepeats; bool uploadLogsToBinaryCache; @@ -448,7 +457,12 @@ private: prometheus::Counter& queue_steps_created; prometheus::Counter& queue_checks_early_exits; prometheus::Counter& queue_checks_finished; - prometheus::Gauge& queue_max_id; + + prometheus::Counter& dispatcher_time_spent_running; + prometheus::Counter& dispatcher_time_spent_waiting; + + prometheus::Counter& queue_monitor_time_spent_running; + prometheus::Counter& queue_monitor_time_spent_waiting; PromMetrics(); }; @@ -483,23 +497,28 @@ private: const std::string & machine); int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime, - Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath); + Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath); void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status); void queueMonitor(); - void queueMonitorLoop(); + void queueMonitorLoop(Connection & conn); /* Check the queue for new builds. */ - bool getQueuedBuilds(Connection & conn, - nix::ref destStore, unsigned int & lastBuildId); + bool getQueuedBuilds(Connection & conn, nix::ref destStore); /* Handle cancellation, deletion and priority bumps. */ void processQueueChange(Connection & conn); BuildOutput getBuildOutputCached(Connection & conn, nix::ref destStore, - const nix::Derivation & drv); + const nix::StorePath & drvPath); + + /* Returns paths missing from the remote store. Paths are processed in + * parallel to work around the possible latency of remote stores. */ + std::map> getMissingRemotePaths( + nix::ref destStore, + const std::map> & paths); Step::ptr createStep(nix::ref store, Connection & conn, Build::ptr build, const nix::StorePath & drvPath, @@ -530,19 +549,19 @@ private: void abortUnsupported(); - void builder(MachineReservation::ptr reservation); + void builder(std::unique_ptr reservation); /* Perform the given build step. Return true if the step is to be retried. */ enum StepResult { sDone, sRetry, sMaybeCancelled }; StepResult doBuildStep(nix::ref destStore, - MachineReservation::ptr reservation, + std::unique_ptr reservation, std::shared_ptr activeStep); void buildRemote(nix::ref destStore, + std::unique_ptr reservation, Machine::ptr machine, Step::ptr step, - unsigned int maxSilentTime, unsigned int buildTimeout, - unsigned int repeats, + const nix::ServeProto::BuildOptions & buildOptions, RemoteResult & result, std::shared_ptr activeStep, std::function updateStep, NarMemberDatas & narMembers); diff --git a/src/lib/Hydra/Base/Controller/NixChannel.pm b/src/lib/Hydra/Base/Controller/NixChannel.pm index 3f8e9609..a5bc2784 100644 --- a/src/lib/Hydra/Base/Controller/NixChannel.pm +++ b/src/lib/Hydra/Base/Controller/NixChannel.pm @@ -4,7 +4,6 @@ use strict; use warnings; use base 'Hydra::Base::Controller::REST'; use List::SomeUtils qw(any); -use Nix::Store; use Hydra::Helper::Nix; use Hydra::Helper::CatalystUtils; @@ -30,7 +29,7 @@ sub getChannelData { my $outputs = {}; foreach my $output (@outputs) { my $outPath = $output->get_column("outpath"); - next if $checkValidity && !isValidPath($outPath); + next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath); $outputs->{$output->get_column("outname")} = $outPath; push @storePaths, $outPath; # Put the system type in the manifest (for top-level diff --git a/src/lib/Hydra/Config.pm b/src/lib/Hydra/Config.pm index af686fca..6aae5a5e 100644 --- a/src/lib/Hydra/Config.pm +++ b/src/lib/Hydra/Config.pm @@ -95,6 +95,7 @@ sub get_legacy_ldap_config { "hydra_bump-to-front" => [ "bump-to-front" ], "hydra_cancel-build" => [ "cancel-build" ], "hydra_create-projects" => [ "create-projects" ], + "hydra_eval-jobset" => [ "eval-jobset" ], "hydra_restart-jobs" => [ "restart-jobs" ], }, }; @@ -159,6 +160,7 @@ sub valid_roles { "bump-to-front", "cancel-build", "create-projects", + "eval-jobset", "restart-jobs", ]; } diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 6f10ef57..9f8b7cba 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) { } elsif ($type eq "git") { my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); die if ! -d $clonePath; - $diff .= `(cd $clonePath; git log $rev1..$rev2)`; - $diff .= `(cd $clonePath; git diff $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`; } $c->stash->{'plain'} = { data => (scalar $diff) || " " }; @@ -239,6 +239,8 @@ sub triggerJobset { sub push : Chained('api') PathPart('push') Args(0) { my ($self, $c) = @_; + requirePost($c); + $c->{stash}->{json}->{jobsetsTriggered} = []; my $force = exists $c->request->query_params->{force}; @@ -246,19 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) { foreach my $s (@jobsets) { my ($p, $j) = parseJobsetName($s); my $jobset = $c->model('DB::Jobsets')->find($p, $j); + requireEvalJobsetPrivileges($c, $jobset->project); next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled)); triggerJobset($self, $c, $jobset, $force); } my @repos = split /,/, ($c->request->query_params->{repos} // ""); foreach my $r (@repos) { - triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search( + my @jobsets = $c->model('DB::Jobsets')->search( { 'project.enabled' => 1, 'me.enabled' => 1 }, { join => 'project', where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ], order_by => 'me.id DESC' }); + foreach my $jobset (@jobsets) { + requireEvalJobsetPrivileges($c, $jobset->project); + triggerJobset($self, $c, $jobset, $force) + } } $self->status_ok( @@ -285,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) { $c->response->body(""); } +sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) { + my ($self, $c) = @_; + + $c->{stash}->{json}->{jobsetsTriggered} = []; + + my $in = $c->request->{data}; + my $url = $in->{repository}->{clone_url} or die; + $url =~ s/.git$//; + print STDERR "got push from Gitea repository $url\n"; + + triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search( + { 'project.enabled' => 1, 'me.enabled' => 1 }, + { join => 'project' + , where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ] + }); + $c->response->body(""); +} 1; diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index 552f31af..5e7b6f24 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -7,15 +7,15 @@ use base 'Hydra::Base::Controller::NixChannel'; use Hydra::Helper::Nix; use Hydra::Helper::CatalystUtils; use File::Basename; +use File::LibMagic; use File::stat; use Data::Dump qw(dump); -use Nix::Store; -use Nix::Config; use List::SomeUtils qw(all); use Encode; -use MIME::Types; use JSON::PP; +use WWW::Form::UrlEncoded::PP qw(); +use feature 'state'; sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) { my ($self, $c, $id) = @_; @@ -77,14 +77,16 @@ sub build_GET { $c->stash->{template} = 'build.tt'; $c->stash->{isLocalStore} = isLocalStore(); + # XXX: If the derivation is content-addressed then this will always return + # false because `$_->path` will be empty $c->stash->{available} = $c->stash->{isLocalStore} - ? all { isValidPath($_->path) } $build->buildoutputs->all + ? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all : 1; - $c->stash->{drvAvailable} = isValidPath $build->drvpath; + $c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath); if ($build->finished && $build->iscachedbuild) { - my $path = ($build->buildoutputs)[0]->path or die; + my $path = ($build->buildoutputs)[0]->path or undef; my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path); if (defined $cachedBuildStep) { $c->stash->{cachedBuild} = $cachedBuildStep->build; @@ -138,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') { $c->stash->{step} = $step; my $drvPath = $step->drvpath; - my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]); + my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]); showLog($c, $mode, $log_uri); } @@ -147,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') { my ($self, $c, $mode) = @_; my $drvPath = $c->stash->{build}->drvpath; - my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]); + my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]); showLog($c, $mode, $log_uri); } @@ -232,17 +234,24 @@ sub serveFile { } elsif ($ls->{type} eq "regular") { + # Have the hosted data considered its own origin to avoid being a giant + # XSS hole. + $c->response->header('Content-Security-Policy' => 'sandbox allow-scripts'); - $c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command", - "cat-store", "--store", getStoreUri(), "$path"]) }; + $c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command", + "store", "cat", "--store", getStoreUri(), "$path"]) }; - # Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple. + # Detect MIME type. my $type = "text/plain"; if ($path =~ /.*\.(\S{1,})$/xms) { my $ext = $1; my $mimeTypes = MIME::Types->new(only_complete => 1); my $t = $mimeTypes->mimeTypeOf($ext); $type = ref $t ? $t->type : $t if $t; + } else { + state $magic = File::LibMagic->new(follow_symlinks => 1); + my $info = $magic->info_from_filename($path); + $type = $info->{mime_with_encoding}; } $c->response->content_type($type); $c->forward('Hydra::View::Plain'); @@ -288,29 +297,7 @@ sub download : Chained('buildChain') PathPart { my $path = $product->path; $path .= "/" . join("/", @path) if scalar @path > 0; - if (isLocalStore) { - - notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path; - - # Make sure the file is in the Nix store. - $path = checkPath($self, $c, $path); - - # If this is a directory but no "/" is attached, then redirect. - if (-d $path && substr($c->request->uri, -1) ne "/") { - return $c->res->redirect($c->request->uri . "/"); - } - - $path = "$path/index.html" if -d $path && -e "$path/index.html"; - - notFound($c, "File '$path' does not exist.") if !-e $path; - - notFound($c, "Path '$path' is a directory.") if -d $path; - - $c->serve_static_file($path); - - } else { - serveFile($c, $path); - } + serveFile($c, $path); $c->response->headers->last_modified($c->stash->{build}->stoptime); } @@ -323,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) { error($c, "This build is not finished yet.") unless $build->finished; my $output = $build->buildoutputs->find({name => $outputName}); notFound($c, "This build has no output named ‘$outputName’") unless defined $output; - gone($c, "Output is no longer available.") unless isValidPath $output->path; + gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path); $c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\""); $c->stash->{current_view} = 'NixNAR'; @@ -366,7 +353,7 @@ sub contents : Chained('buildChain') PathPart Args(1) { # FIXME: don't use shell invocations below. - # FIXME: use nix cat-store + # FIXME: use nix store cat my $res; @@ -440,7 +427,7 @@ sub getDependencyGraph { }; $$done{$path} = $node; my @refs; - foreach my $ref (queryReferences($path)) { + foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) { next if $ref eq $path; next unless $runtime || $ref =~ /\.drv$/; getDependencyGraph($self, $c, $runtime, $done, $ref); @@ -448,7 +435,7 @@ sub getDependencyGraph { } # Show in reverse topological order to flatten the graph. # Should probably do a proper BFS. - my @sorted = reverse topoSortPaths(@refs); + my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs); $node->{refs} = [map { $$done{$_} } @sorted]; } @@ -461,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') { my $build = $c->stash->{build}; my $drvPath = $build->drvpath; - error($c, "Derivation no longer available.") unless isValidPath $drvPath; + error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath); $c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath); @@ -476,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') { requireLocalStore($c); - error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths; + error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths; my $done = {}; $c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ]; @@ -496,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) { if (isLocalStore) { foreach my $out ($build->buildoutputs) { notFound($c, "Path " . $out->path . " is no longer available.") - unless isValidPath($out->path); + unless $MACHINE_LOCAL_STORE->isValidPath($out->path); } } diff --git a/src/lib/Hydra/Controller/Job.pm b/src/lib/Hydra/Controller/Job.pm index 378887ea..b392e8e1 100644 --- a/src/lib/Hydra/Controller/Job.pm +++ b/src/lib/Hydra/Controller/Job.pm @@ -69,7 +69,7 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) { my $lastBuild = $c->stash->{jobset}->builds->find( { job => $c->stash->{job}, finished => 1 }, - { order_by => 'id DESC', rows => 1, columns => [@buildListColumns] } + { order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] } ); $prometheus->new_counter( @@ -92,6 +92,26 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) { $c->stash->{job}, )->inc($lastBuild->buildstatus > 0); + $prometheus->new_gauge( + name => "hydra_build_closure_size", + help => "Closure size of the last job's build in bytes", + labels => [ "project", "jobset", "job" ] + )->labels( + $c->stash->{project}->name, + $c->stash->{jobset}->name, + $c->stash->{job}, + )->inc($lastBuild->closuresize); + + $prometheus->new_gauge( + name => "hydra_build_output_size", + help => "Output size of the last job's build in bytes", + labels => [ "project", "jobset", "job" ] + )->labels( + $c->stash->{project}->name, + $c->stash->{jobset}->name, + $c->stash->{job}, + )->inc($lastBuild->size); + $c->stash->{'plain'} = { data => $prometheus->render }; $c->forward('Hydra::View::Plain'); } diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index eeb4232a..bc7d7444 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -364,6 +364,21 @@ sub evals_GET { ); } +sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { } + +sub errors_GET { + my ($self, $c) = @_; + + $c->stash->{template} = 'eval-error.tt'; + + my $jobsetName = $c->stash->{params}->{name}; + $c->stash->{jobset} = $c->stash->{project}->jobsets->find( + { name => $jobsetName }, + { '+columns' => { 'errormsg' => 'errormsg' } } + ); + + $self->status_ok($c, entity => $c->stash->{jobset}); +} # Redirect to the latest finished evaluation of this jobset. sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') { diff --git a/src/lib/Hydra/Controller/JobsetEval.pm b/src/lib/Hydra/Controller/JobsetEval.pm index 30179d49..77c01a84 100644 --- a/src/lib/Hydra/Controller/JobsetEval.pm +++ b/src/lib/Hydra/Controller/JobsetEval.pm @@ -76,7 +76,9 @@ sub view_GET { $c->stash->{removed} = $diff->{removed}; $c->stash->{unfinished} = $diff->{unfinished}; $c->stash->{aborted} = $diff->{aborted}; - $c->stash->{failed} = $diff->{failed}; + $c->stash->{totalAborted} = $diff->{totalAborted}; + $c->stash->{totalFailed} = $diff->{totalFailed}; + $c->stash->{totalQueued} = $diff->{totalQueued}; $c->stash->{full} = ($c->req->params->{full} || "0") eq "1"; @@ -86,6 +88,17 @@ sub view_GET { ); } +sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { } + +sub errors_GET { + my ($self, $c) = @_; + + $c->stash->{template} = 'eval-error.tt'; + + $c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' }); + + $self->status_ok($c, entity => $c->stash->{eval}); +} sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) { my ($self, $c) = @_; diff --git a/src/lib/Hydra/Controller/Root.pm b/src/lib/Hydra/Controller/Root.pm index c6843d29..0a5d05e5 100644 --- a/src/lib/Hydra/Controller/Root.pm +++ b/src/lib/Hydra/Controller/Root.pm @@ -16,8 +16,11 @@ use List::Util qw[min max]; use List::SomeUtils qw{any}; use Net::Prometheus; use Types::Standard qw/StrMatch/; +use WWW::Form::UrlEncoded::PP qw(); use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$}; +# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi +use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$}; # Put this controller at top-level. __PACKAGE__->config->{namespace} = ''; @@ -32,6 +35,7 @@ sub noLoginNeeded { return $whitelisted || $c->request->path eq "api/push-github" || + $c->request->path eq "api/push-gitea" || $c->request->path eq "google-login" || $c->request->path eq "github-redirect" || $c->request->path eq "github-login" || @@ -47,11 +51,13 @@ sub begin :Private { $c->stash->{curUri} = $c->request->uri; $c->stash->{version} = $ENV{"HYDRA_RELEASE"} || ""; $c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || ""; + $c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || ""; $c->stash->{curTime} = time; $c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : ""; $c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : ""; $c->stash->{flashMsg} = $c->flash->{flashMsg}; $c->stash->{successMsg} = $c->flash->{successMsg}; + $c->stash->{localStore} = isLocalStore; $c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0"; @@ -77,7 +83,7 @@ sub begin :Private { $_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins}; # XSRF protection: require POST requests to have the same origin. - if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") { + if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") { my $referer = $c->req->header('Referer'); $referer //= $c->req->header('Origin'); my $base = $c->req->base; @@ -157,7 +163,7 @@ sub status_GET { { "buildsteps.busy" => { '!=', 0 } }, { order_by => ["globalpriority DESC", "id"], join => "buildsteps", - columns => [@buildListColumns] + columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type'] })] ); } @@ -326,7 +332,7 @@ sub nar :Local :Args(1) { else { $path = $Nix::Config::storeDir . "/$path"; - gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path); + gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path); $c->stash->{current_view} = 'NixNAR'; $c->stash->{storePath} = $path; @@ -355,6 +361,33 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) { } +sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) { + my ($self, $c, $realisation) = @_; + + if (!isLocalStore) { + notFound($c, "There is no binary cache here."); + } + + else { + my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX; + my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput); + + if (!$rawRealisation) { + $c->response->status(404); + $c->response->content_type('text/plain'); + $c->stash->{plain}->{data} = "does not exist\n"; + $c->forward('Hydra::View::Plain'); + setCacheHeaders($c, 60 * 60); + return; + } + + $c->response->content_type('text/plain'); + $c->stash->{plain}->{data} = $rawRealisation; + $c->forward('Hydra::View::Plain'); + } +} + + sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) { my ($self, $c, $narinfo) = @_; @@ -366,7 +399,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) { my ($hash) = $narinfo =~ NARINFO_REGEX; die("Hash length was not 32") if length($hash) != 32; - my $path = queryPathFromHashPart($hash); + my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash); if (!$path) { $c->response->status(404); @@ -524,7 +557,7 @@ sub log :Local :Args(1) { my $logPrefix = $c->config->{log_prefix}; if (defined $logPrefix) { - $c->res->redirect($logPrefix . "log/" . basename($drvPath)); + $c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))); } else { notFound($c, "The build log of $drvPath is not available."); } diff --git a/src/lib/Hydra/Controller/User.pm b/src/lib/Hydra/Controller/User.pm index 2a8affae..9e7d96e5 100644 --- a/src/lib/Hydra/Controller/User.pm +++ b/src/lib/Hydra/Controller/User.pm @@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) { , "jobset.enabled" => 1 }, { order_by => ["project", "jobset", "job"] - , join => ["project", "jobset"] + , join => {"jobset" => "project"} })]; } diff --git a/src/lib/Hydra/Helper/AddBuilds.pm b/src/lib/Hydra/Helper/AddBuilds.pm index 9e3ddfd2..a6373be5 100644 --- a/src/lib/Hydra/Helper/AddBuilds.pm +++ b/src/lib/Hydra/Helper/AddBuilds.pm @@ -67,7 +67,7 @@ sub validateDeclarativeJobset { my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0; if ($enable_dynamic_run_command && !($config->{dynamicruncommand}->{enable} - && $project->{enable_dynamic_run_command})) + && $project->enable_dynamic_run_command)) { die "Dynamic RunCommand is not enabled by the server or the parent project."; } diff --git a/src/lib/Hydra/Helper/BuildDiff.pm b/src/lib/Hydra/Helper/BuildDiff.pm index cd8c7691..be8525d6 100644 --- a/src/lib/Hydra/Helper/BuildDiff.pm +++ b/src/lib/Hydra/Helper/BuildDiff.pm @@ -32,12 +32,26 @@ sub buildDiff { removed => [], unfinished => [], aborted => [], - failed => [], + + # These summary counters cut across the categories to determine whether + # actions such as "Restart all failed" or "Bump queue" are available. + totalAborted => 0, + totalFailed => 0, + totalQueued => 0, }; my $n = 0; foreach my $build (@{$builds}) { - my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4); + my $aborted = $build->finished != 0 && ( + # aborted + $build->buildstatus == 3 + # cancelled + || $build->buildstatus == 4 + # timeout + || $build->buildstatus == 7 + # log limit exceeded + || $build->buildstatus == 10 + ); my $d; my $found = 0; while ($n < scalar(@{$builds2})) { @@ -71,12 +85,19 @@ sub buildDiff { } else { push @{$ret->{new}}, $build if !$found; } - if (defined $build->buildstatus && $build->buildstatus != 0) { - push @{$ret->{failed}}, $build; + + if ($build->finished != 0 && $build->buildstatus != 0) { + if ($aborted) { + ++$ret->{totalAborted}; + } else { + ++$ret->{totalFailed}; + } + } elsif ($build->finished == 0) { + ++$ret->{totalQueued}; } } return $ret; } -1; \ No newline at end of file +1; diff --git a/src/lib/Hydra/Helper/CatalystUtils.pm b/src/lib/Hydra/Helper/CatalystUtils.pm index 2a2ad86f..6ccdbc4d 100644 --- a/src/lib/Hydra/Helper/CatalystUtils.pm +++ b/src/lib/Hydra/Helper/CatalystUtils.pm @@ -15,6 +15,7 @@ our @EXPORT = qw( forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner requireBumpPrivileges requireCancelBuildPrivileges + requireEvalJobsetPrivileges trim getLatestFinishedEval getFirstEval paramToList @@ -186,6 +187,27 @@ sub isProjectOwner { defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username })); } +sub hasEvalJobsetRole { + my ($c) = @_; + return $c->user_exists && $c->check_user_roles("eval-jobset"); +} + +sub mayEvalJobset { + my ($c, $project) = @_; + return + $c->user_exists && + (isAdmin($c) || + hasEvalJobsetRole($c) || + isProjectOwner($c, $project)); +} + +sub requireEvalJobsetPrivileges { + my ($c, $project) = @_; + requireUser($c); + accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.") + unless mayEvalJobset($c, $project); +} + sub hasCancelBuildRole { my ($c) = @_; return $c->user_exists && $c->check_user_roles('cancel-build'); @@ -272,7 +294,7 @@ sub requireAdmin { sub requirePost { my ($c) = @_; - error($c, "Request must be POSTed.") if $c->request->method ne "POST"; + error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST"; } diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index 514fb439..134b8b7e 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -36,12 +36,16 @@ our @EXPORT = qw( jobsetOverview jobsetOverview_ pathIsInsidePrefix + readIntoSocket readNixFile registerRoot restartBuilds run + $MACHINE_LOCAL_STORE ); +our $MACHINE_LOCAL_STORE = Nix::Store->new(); + sub getHydraHome { my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n"; @@ -171,6 +175,9 @@ sub getDrvLogPath { for ($fn . $bucketed, $fn . $bucketed . ".bz2") { return $_ if -f $_; } + for ($fn . $bucketed, $fn . $bucketed . ".zst") { + return $_ if -f $_; + } return undef; } @@ -187,6 +194,10 @@ sub findLog { return undef if scalar @outPaths == 0; + # Filter out any NULLs. Content-addressed derivations + # that haven't built yet or failed to build may have a NULL outPath. + @outPaths = grep {defined} @outPaths; + my @steps = $c->model('DB::BuildSteps')->search( { path => { -in => [@outPaths] } }, { select => ["drvpath"] @@ -286,8 +297,7 @@ sub getEvals { my @evals = $evals_result_set->search( { hasnewbuilds => 1 }, - { order_by => "$me.id DESC", rows => $rows, offset => $offset - , prefetch => { evaluationerror => [ ] } }); + { order_by => "$me.id DESC", rows => $rows, offset => $offset }); my @res = (); my $cache = {}; @@ -407,6 +417,16 @@ sub pathIsInsidePrefix { return $cur; } +sub readIntoSocket{ + my (%args) = @_; + my $sock; + + eval { + open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x); + }; + + return $sock; +} @@ -494,7 +514,7 @@ sub restartBuilds { $builds = $builds->search({ finished => 1 }); foreach my $build ($builds->search({}, { columns => ["drvpath"] })) { - next if !isValidPath($build->drvpath); + next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath); registerRoot $build->drvpath; } @@ -537,7 +557,7 @@ sub getStoreUri { sub readNixFile { my ($path) = @_; return grab(cmd => ["nix", "--experimental-features", "nix-command", - "cat-store", "--store", getStoreUri(), "$path"]); + "store", "cat", "--store", getStoreUri(), "$path"]); } diff --git a/src/lib/Hydra/Plugin/BazaarInput.pm b/src/lib/Hydra/Plugin/BazaarInput.pm index 230d108b..b35ed7c8 100644 --- a/src/lib/Hydra/Plugin/BazaarInput.pm +++ b/src/lib/Hydra/Plugin/BazaarInput.pm @@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex); use File::Path; use Hydra::Helper::Exec; use Hydra::Helper::Nix; -use Nix::Store; sub supportedInputTypes { my ($self, $inputTypes) = @_; @@ -38,9 +37,9 @@ sub fetchInput { (my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search( {uri => $uri, revision => $revision}); - addTempRoot($cachedInput->storepath) if defined $cachedInput; + $MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput; - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; } else { @@ -58,7 +57,7 @@ sub fetchInput { ($sha256, $storePath) = split ' ', $stdout; # FIXME: time window between nix-prefetch-bzr and addTempRoot. - addTempRoot($storePath); + $MACHINE_LOCAL_STORE->addTempRoot($storePath); $self->{db}->txn_do(sub { $self->{db}->resultset('CachedBazaarInputs')->create( diff --git a/src/lib/Hydra/Plugin/CompressLog.pm b/src/lib/Hydra/Plugin/CompressLog.pm index 10e8f6cc..fe4d33b0 100644 --- a/src/lib/Hydra/Plugin/CompressLog.pm +++ b/src/lib/Hydra/Plugin/CompressLog.pm @@ -9,11 +9,24 @@ use Hydra::Helper::CatalystUtils; sub stepFinished { my ($self, $step, $logPath) = @_; - my $doCompress = $self->{config}->{'compress_build_logs'} // "1"; + my $doCompress = $self->{config}->{'compress_build_logs'} // '1'; + my $silent = $self->{config}->{'compress_build_logs_silent'} // '0'; + my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2'; - if ($doCompress eq "1" && -e $logPath) { - print STDERR "compressing ‘$logPath’...\n"; - system("bzip2", "--force", $logPath); + if (not -e $logPath or $doCompress ne "1") { + return; + } + + if ($silent ne '1') { + print STDERR "compressing '$logPath' with $compression...\n"; + } + + if ($compression eq 'bzip2') { + system('bzip2', '--force', $logPath); + } elsif ($compression eq 'zstd') { + system('zstd', '--rm', '--quiet', '-T0', $logPath); + } else { + print STDERR "unknown compression type '$compression'\n"; } } diff --git a/src/lib/Hydra/Plugin/DarcsInput.pm b/src/lib/Hydra/Plugin/DarcsInput.pm index b7f3db55..a8df6396 100644 --- a/src/lib/Hydra/Plugin/DarcsInput.pm +++ b/src/lib/Hydra/Plugin/DarcsInput.pm @@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex); use File::Path; use Hydra::Helper::Exec; use Hydra::Helper::Nix; -use Nix::Store; sub supportedInputTypes { my ($self, $inputTypes) = @_; @@ -58,7 +57,7 @@ sub fetchInput { {uri => $uri, revision => $revision}, {rows => 1}); - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; $revision = $cachedInput->revision; @@ -75,8 +74,8 @@ sub fetchInput { die "darcs changes --count failed" if $? != 0; system "rm", "-rf", "$tmpDir/export/_darcs"; - $storePath = addToStore("$tmpDir/export", 1, "sha256"); - $sha256 = queryPathHash($storePath); + $storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256"); + $sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://; $self->{db}->txn_do(sub { diff --git a/src/lib/Hydra/Plugin/GitInput.pm b/src/lib/Hydra/Plugin/GitInput.pm index aca35c30..0de02128 100644 --- a/src/lib/Hydra/Plugin/GitInput.pm +++ b/src/lib/Hydra/Plugin/GitInput.pm @@ -186,9 +186,9 @@ sub fetchInput { {uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0}, {rows => 1}); - addTempRoot($cachedInput->storepath) if defined $cachedInput; + $MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput; - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; $revision = $cachedInput->revision; @@ -217,7 +217,7 @@ sub fetchInput { ($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1); # FIXME: time window between nix-prefetch-git and addTempRoot. - addTempRoot($storePath); + $MACHINE_LOCAL_STORE->addTempRoot($storePath); $self->{db}->txn_do(sub { $self->{db}->resultset('CachedGitInputs')->update_or_create( @@ -261,7 +261,7 @@ sub getCommits { my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); - my $out = grab(cmd => ["git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath); + my $out = grab(cmd => ["git", "--git-dir=.git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath); my $res = []; foreach my $line (split /\n/, $out) { diff --git a/src/lib/Hydra/Plugin/GiteaStatus.pm b/src/lib/Hydra/Plugin/GiteaStatus.pm index 426c93f5..f3498941 100644 --- a/src/lib/Hydra/Plugin/GiteaStatus.pm +++ b/src/lib/Hydra/Plugin/GiteaStatus.pm @@ -88,10 +88,6 @@ sub buildQueued { common(@_, [], 0); } -sub buildStarted { - common(@_, [], 1); -} - sub buildFinished { common(@_, 2); } diff --git a/src/lib/Hydra/Plugin/GithubPulls.pm b/src/lib/Hydra/Plugin/GithubPulls.pm index db0e8d25..9d8412c3 100644 --- a/src/lib/Hydra/Plugin/GithubPulls.pm +++ b/src/lib/Hydra/Plugin/GithubPulls.pm @@ -30,7 +30,7 @@ sub _iterate { $pulls->{$pull->{number}} = $pull; } # TODO Make Link header parsing more robust!!! - my @links = split ',', $res->header("Link"); + my @links = split ',', ($res->header("Link") // ""); my $next = ""; foreach my $link (@links) { my ($url, $rel) = split ";", $link; diff --git a/src/lib/Hydra/Plugin/HipChatNotification.pm b/src/lib/Hydra/Plugin/HipChatNotification.pm deleted file mode 100644 index 61cea57a..00000000 --- a/src/lib/Hydra/Plugin/HipChatNotification.pm +++ /dev/null @@ -1,89 +0,0 @@ -package Hydra::Plugin::HipChatNotification; - -use strict; -use warnings; -use parent 'Hydra::Plugin'; -use LWP::UserAgent; -use Hydra::Helper::CatalystUtils; - -sub isEnabled { - my ($self) = @_; - return defined $self->{config}->{hipchat}; -} - -sub buildFinished { - my ($self, $topbuild, $dependents) = @_; - - my $cfg = $self->{config}->{hipchat}; - my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : (); - - my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000"; - - # Figure out to which rooms to send notification. For each email - # room, we send one aggregate message. - my %rooms; - foreach my $build ($topbuild, @{$dependents}) { - my $prevBuild = getPreviousBuild($build); - my $jobName = showJobName $build; - - foreach my $room (@config) { - my $force = $room->{force}; - next unless $jobName =~ /^$room->{jobs}$/; - - # If build is cancelled or aborted, do not send email. - next if ! $force && ($build->buildstatus == 4 || $build->buildstatus == 3); - - # If there is a previous (that is not cancelled or aborted) build - # with same buildstatus, do not send email. - next if ! $force && defined $prevBuild && ($build->buildstatus == $prevBuild->buildstatus); - - $rooms{$room->{room}} //= { room => $room, builds => [] }; - push @{$rooms{$room->{room}}->{builds}}, $build; - } - } - - return if scalar keys %rooms == 0; - - my ($authors, $nrCommits) = getResponsibleAuthors($topbuild, $self->{plugins}); - - # Send a message to each room. - foreach my $roomId (keys %rooms) { - my $room = $rooms{$roomId}; - my @deps = grep { $_->id != $topbuild->id } @{$room->{builds}}; - - my $img = - $topbuild->buildstatus == 0 ? "$baseurl/static/images/checkmark_16.png" : - $topbuild->buildstatus == 2 ? "$baseurl/static/images/dependency_16.png" : - $topbuild->buildstatus == 4 ? "$baseurl/static/images/cancelled_16.png" : - "$baseurl/static/images/error_16.png"; - - my $msg = ""; - $msg .= " "; - $msg .= "Job jobset->get_column('name')}/${\$topbuild->get_column('job')}'>${\showJobName($topbuild)}"; - $msg .= " (and ${\scalar @deps} others)" if scalar @deps > 0; - $msg .= ": " . showStatus($topbuild) . ""; - - if (scalar keys %{$authors} > 0) { - # FIXME: HTML escaping - my @x = map { "$_" } (sort keys %{$authors}); - $msg .= ", likely due to "; - $msg .= "$nrCommits commits by " if $nrCommits > 1; - $msg .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]); - } - - print STDERR "sending hipchat notification to room $roomId: $msg\n"; - - my $ua = LWP::UserAgent->new(); - my $resp = $ua->post('https://api.hipchat.com/v1/rooms/message?format=json&auth_token=' . $room->{room}->{token}, { - room_id => $roomId, - from => 'Hydra', - message => $msg, - message_format => 'html', - notify => $room->{room}->{notify} || 0, - color => $topbuild->buildstatus == 0 ? 'green' : 'red' }); - - print STDERR $resp->status_line, ": ", $resp->decoded_content,"\n" if !$resp->is_success; - } -} - -1; diff --git a/src/lib/Hydra/Plugin/MercurialInput.pm b/src/lib/Hydra/Plugin/MercurialInput.pm index 921262ad..85bd2c70 100644 --- a/src/lib/Hydra/Plugin/MercurialInput.pm +++ b/src/lib/Hydra/Plugin/MercurialInput.pm @@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex); use File::Path; use Hydra::Helper::Nix; use Hydra::Helper::Exec; -use Nix::Store; use Fcntl qw(:flock); sub supportedInputTypes { @@ -68,9 +67,9 @@ sub fetchInput { (my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search( {uri => $uri, branch => $branch, revision => $revision}); - addTempRoot($cachedInput->storepath) if defined $cachedInput; + $MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput; - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; } else { @@ -85,7 +84,7 @@ sub fetchInput { ($sha256, $storePath) = split ' ', $stdout; # FIXME: time window between nix-prefetch-hg and addTempRoot. - addTempRoot($storePath); + $MACHINE_LOCAL_STORE->addTempRoot($storePath); $self->{db}->txn_do(sub { $self->{db}->resultset('CachedHgInputs')->update_or_create( diff --git a/src/lib/Hydra/Plugin/PathInput.pm b/src/lib/Hydra/Plugin/PathInput.pm index d122ff57..c923a03c 100644 --- a/src/lib/Hydra/Plugin/PathInput.pm +++ b/src/lib/Hydra/Plugin/PathInput.pm @@ -5,7 +5,6 @@ use warnings; use parent 'Hydra::Plugin'; use POSIX qw(strftime); use Hydra::Helper::Nix; -use Nix::Store; sub supportedInputTypes { my ($self, $inputTypes) = @_; @@ -30,7 +29,7 @@ sub fetchInput { {srcpath => $uri, lastseen => {">", $timestamp - $timeout}}, {rows => 1, order_by => "lastseen DESC"}); - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; $timestamp = $cachedInput->timestamp; @@ -46,7 +45,7 @@ sub fetchInput { } chomp $storePath; - $sha256 = (queryPathInfo($storePath, 0))[1] or die; + $sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die; ($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search( {srcpath => $uri, sha256hash => $sha256}); diff --git a/src/lib/Hydra/Plugin/S3Backup.pm b/src/lib/Hydra/Plugin/S3Backup.pm index 98e79747..f1f50754 100644 --- a/src/lib/Hydra/Plugin/S3Backup.pm +++ b/src/lib/Hydra/Plugin/S3Backup.pm @@ -14,6 +14,7 @@ use Nix::Config; use Nix::Store; use Hydra::Model::DB; use Hydra::Helper::CatalystUtils; +use Hydra::Helper::Nix; sub isEnabled { my ($self) = @_; @@ -92,7 +93,7 @@ sub buildFinished { my $hash = substr basename($path), 0, 32; my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0); my $system; - if (defined $deriver and isValidPath($deriver)) { + if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) { $system = derivationFromPath($deriver)->{platform}; } foreach my $reference (@{$refs}) { diff --git a/src/lib/Hydra/Plugin/SubversionInput.pm b/src/lib/Hydra/Plugin/SubversionInput.pm index 456c6892..d3579c40 100644 --- a/src/lib/Hydra/Plugin/SubversionInput.pm +++ b/src/lib/Hydra/Plugin/SubversionInput.pm @@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex); use Hydra::Helper::Exec; use Hydra::Helper::Nix; use IPC::Run; -use Nix::Store; sub supportedInputTypes { my ($self, $inputTypes) = @_; @@ -45,9 +44,9 @@ sub fetchInput { (my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search( {uri => $uri, revision => $revision}); - addTempRoot($cachedInput->storepath) if defined $cachedInput; + $MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput; - if (defined $cachedInput && isValidPath($cachedInput->storepath)) { + if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) { $storePath = $cachedInput->storepath; $sha256 = $cachedInput->sha256hash; } else { @@ -62,16 +61,16 @@ sub fetchInput { die "error checking out Subversion repo at `$uri':\n$stderr" if $res; if ($type eq "svn-checkout") { - $storePath = addToStore($wcPath, 1, "sha256"); + $storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256"); } else { # Hm, if the Nix Perl bindings supported filters in # addToStore(), then we wouldn't need to make a copy here. my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die; (system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed"; - $storePath = addToStore("$tmpDir/source", 1, "sha256"); + $storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256"); } - $sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://; + $sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://; $self->{db}->txn_do(sub { $self->{db}->resultset('CachedSubversionInputs')->update_or_create( diff --git a/src/lib/Hydra/Schema/Result/BuildOutputs.pm b/src/lib/Hydra/Schema/Result/BuildOutputs.pm index 9fc4f7c7..3997b497 100644 --- a/src/lib/Hydra/Schema/Result/BuildOutputs.pm +++ b/src/lib/Hydra/Schema/Result/BuildOutputs.pm @@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs"); =head2 path data_type: 'text' - is_nullable: 0 + is_nullable: 1 =cut @@ -59,7 +59,7 @@ __PACKAGE__->add_columns( "name", { data_type => "text", is_nullable => 0 }, "path", - { data_type => "text", is_nullable => 0 }, + { data_type => "text", is_nullable => 1 }, ); =head1 PRIMARY KEY @@ -94,8 +94,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/Result/BuildStepOutputs.pm b/src/lib/Hydra/Schema/Result/BuildStepOutputs.pm index 016a35fe..6d997a8c 100644 --- a/src/lib/Hydra/Schema/Result/BuildStepOutputs.pm +++ b/src/lib/Hydra/Schema/Result/BuildStepOutputs.pm @@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs"); =head2 path data_type: 'text' - is_nullable: 0 + is_nullable: 1 =cut @@ -67,7 +67,7 @@ __PACKAGE__->add_columns( "name", { data_type => "text", is_nullable => 0 }, "path", - { data_type => "text", is_nullable => 0 }, + { data_type => "text", is_nullable => 1 }, ); =head1 PRIMARY KEY @@ -119,8 +119,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/Result/EvaluationErrors.pm b/src/lib/Hydra/Schema/Result/EvaluationErrors.pm index 7033fa5e..f6cc48db 100644 --- a/src/lib/Hydra/Schema/Result/EvaluationErrors.pm +++ b/src/lib/Hydra/Schema/Result/EvaluationErrors.pm @@ -105,4 +105,6 @@ __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } ); +__PACKAGE__->mk_group_accessors('column' => 'has_error'); + 1; diff --git a/src/lib/Hydra/Schema/Result/Jobsets.pm b/src/lib/Hydra/Schema/Result/Jobsets.pm index cd704ac8..aee87e00 100644 --- a/src/lib/Hydra/Schema/Result/Jobsets.pm +++ b/src/lib/Hydra/Schema/Result/Jobsets.pm @@ -386,6 +386,8 @@ __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } ); +__PACKAGE__->mk_group_accessors('column' => 'has_error'); + sub supportsDynamicRunCommand { my ($self) = @_; diff --git a/src/lib/Hydra/Schema/Result/Users.pm b/src/lib/Hydra/Schema/Result/Users.pm index b3de6543..c28ae931 100644 --- a/src/lib/Hydra/Schema/Result/Users.pm +++ b/src/lib/Hydra/Schema/Result/Users.pm @@ -216,7 +216,7 @@ sub json_hint { sub _authenticator() { my $authenticator = Crypt::Passphrase->new( - encoder => 'Argon2', + encoder => { module => 'Argon2', output_size => 16 }, validators => [ (sub { my ($password, $hash) = @_; diff --git a/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm b/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm new file mode 100644 index 00000000..a4c6d955 --- /dev/null +++ b/src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm @@ -0,0 +1,30 @@ +package Hydra::Schema::ResultSet::EvaluationErrors; + +use strict; +use utf8; +use warnings; + +use parent 'DBIx::Class::ResultSet'; + +use Storable qw(dclone); + +__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns'); + +# Exclude expensive error message values unless explicitly requested, and +# replace them with a summary field describing their presence/absence. +sub search_rs { + my ( $class, $query, $attrs ) = @_; + + if ($attrs) { + $attrs = dclone($attrs); + } + + unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) { + $attrs->{'+columns'}->{'has_error'} = "errormsg != ''"; + } + unless (exists $attrs->{'+columns'}->{'errormsg'}) { + push @{ $attrs->{'remove_columns'} }, 'errormsg'; + } + + return $class->next::method($query, $attrs); +} diff --git a/src/lib/Hydra/Schema/ResultSet/Jobsets.pm b/src/lib/Hydra/Schema/ResultSet/Jobsets.pm new file mode 100644 index 00000000..1b2a12e3 --- /dev/null +++ b/src/lib/Hydra/Schema/ResultSet/Jobsets.pm @@ -0,0 +1,30 @@ +package Hydra::Schema::ResultSet::Jobsets; + +use strict; +use utf8; +use warnings; + +use parent 'DBIx::Class::ResultSet'; + +use Storable qw(dclone); + +__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns'); + +# Exclude expensive error message values unless explicitly requested, and +# replace them with a summary field describing their presence/absence. +sub search_rs { + my ( $class, $query, $attrs ) = @_; + + if ($attrs) { + $attrs = dclone($attrs); + } + + unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) { + $attrs->{'+columns'}->{'has_error'} = "errormsg != ''"; + } + unless (exists $attrs->{'+columns'}->{'errormsg'}) { + push @{ $attrs->{'remove_columns'} }, 'errormsg'; + } + + return $class->next::method($query, $attrs); +} diff --git a/src/lib/Hydra/View/NARInfo.pm b/src/lib/Hydra/View/NARInfo.pm index 44db78b1..801fc06a 100644 --- a/src/lib/Hydra/View/NARInfo.pm +++ b/src/lib/Hydra/View/NARInfo.pm @@ -8,6 +8,7 @@ use MIME::Base64; use Nix::Manifest; use Nix::Store; use Nix::Utils; +use Hydra::Helper::Nix; use base qw/Catalyst::View/; sub process { @@ -17,7 +18,7 @@ sub process { $c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type - my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1); + my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1); my $info; $info .= "StorePath: $storePath\n"; @@ -28,8 +29,8 @@ sub process { $info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n"; if (defined $deriver) { $info .= "Deriver: " . basename $deriver . "\n"; - if (isValidPath($deriver)) { - my $drv = derivationFromPath($deriver); + if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) { + my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver); $info .= "System: $drv->{platform}\n"; } } diff --git a/src/lib/Hydra/View/NixLog.pm b/src/lib/Hydra/View/NixLog.pm index 7f37ae78..fe37d900 100644 --- a/src/lib/Hydra/View/NixLog.pm +++ b/src/lib/Hydra/View/NixLog.pm @@ -16,7 +16,10 @@ sub process { my $tail = int($c->stash->{tail} // "0"); - if ($logPath =~ /\.bz2$/) { + if ($logPath =~ /\.zst$/) { + my $doTail = $tail ? "| tail -n '$tail'" : ""; + open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die; + } elsif ($logPath =~ /\.bz2$/) { my $doTail = $tail ? "| tail -n '$tail'" : ""; open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die; } else { diff --git a/src/lib/Hydra/View/TT.pm b/src/lib/Hydra/View/TT.pm index 84fcf3e9..241787e0 100644 --- a/src/lib/Hydra/View/TT.pm +++ b/src/lib/Hydra/View/TT.pm @@ -6,6 +6,7 @@ use base 'Catalyst::View::TT'; use Template::Plugin::HTML; use Hydra::Helper::Nix; use Time::Seconds; +use Digest::SHA qw(sha1_hex); __PACKAGE__->config( TEMPLATE_EXTENSION => '.tt', @@ -25,8 +26,14 @@ __PACKAGE__->config( makeNameTextForJobset relativeDuration stripSSHUser + metricDivId /]); +sub metricDivId { + my ($self, $c, $text) = @_; + return "metric-" . sha1_hex($text); +} + sub buildLogExists { my ($self, $c, $build) = @_; return 1 if defined $c->config->{log_prefix}; diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am deleted file mode 100644 index 434868e0..00000000 --- a/src/lib/Makefile.am +++ /dev/null @@ -1,22 +0,0 @@ -PERL_MODULES = \ - $(wildcard *.pm) \ - $(wildcard Hydra/*.pm) \ - $(wildcard Hydra/Helper/*.pm) \ - $(wildcard Hydra/Model/*.pm) \ - $(wildcard Hydra/View/*.pm) \ - $(wildcard Hydra/Schema/*.pm) \ - $(wildcard Hydra/Schema/Result/*.pm) \ - $(wildcard Hydra/Schema/ResultSet/*.pm) \ - $(wildcard Hydra/Controller/*.pm) \ - $(wildcard Hydra/Base/*.pm) \ - $(wildcard Hydra/Base/Controller/*.pm) \ - $(wildcard Hydra/Script/*.pm) \ - $(wildcard Hydra/Component/*.pm) \ - $(wildcard Hydra/Event/*.pm) \ - $(wildcard Hydra/Plugin/*.pm) - -EXTRA_DIST = \ - $(PERL_MODULES) - -hydradir = $(libexecdir)/hydra/lib -nobase_hydra_DATA = $(PERL_MODULES) diff --git a/src/libhydra/db.hh b/src/libhydra/db.hh index 00e8f406..c664a01d 100644 --- a/src/libhydra/db.hh +++ b/src/libhydra/db.hh @@ -2,7 +2,8 @@ #include -#include "util.hh" +#include +#include struct Connection : pqxx::connection diff --git a/src/libhydra/hydra-config.hh b/src/libhydra/hydra-config.hh index 1688c278..85c58746 100644 --- a/src/libhydra/hydra-config.hh +++ b/src/libhydra/hydra-config.hh @@ -2,7 +2,8 @@ #include -#include "util.hh" +#include +#include struct HydraConfig { diff --git a/src/libhydra/meson.build b/src/libhydra/meson.build new file mode 100644 index 00000000..1866233c --- /dev/null +++ b/src/libhydra/meson.build @@ -0,0 +1,5 @@ +libhydra_inc = include_directories('.') + +libhydra_dep = declare_dependency( + include_directories: [libhydra_inc], +) diff --git a/src/meson.build b/src/meson.build new file mode 100644 index 00000000..52b821bc --- /dev/null +++ b/src/meson.build @@ -0,0 +1,85 @@ +# Native code +subdir('libhydra') +subdir('hydra-evaluator') +subdir('hydra-queue-runner') + +hydra_libexecdir = get_option('libexecdir') / 'hydra' + +# Data and interpreted +foreach dir : ['lib', 'root'] + install_subdir(dir, + install_dir: hydra_libexecdir, + ) +endforeach +subdir('sql') +subdir('ttf') + +# Static files for website + +hydra_libexecdir_static = hydra_libexecdir / 'root' / 'static' + +## Bootstrap + +bootstrap_name = 'bootstrap-4.3.1-dist' +bootstrap = custom_target( + 'extract-bootstrap', + input: 'root' / (bootstrap_name + '.zip'), + output: bootstrap_name, + command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'], +) +custom_target( + 'name-bootstrap', + input: bootstrap, + output: 'bootstrap', + command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'], + install: true, + install_dir: hydra_libexecdir_static, +) + +## Flot + +custom_target( + 'extract-flot', + input: 'root' / 'flot-0.8.3.zip', + output: 'flot', + command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'], + install: true, + install_dir: hydra_libexecdir_static / 'js', +) + +## Fontawesome + +fontawesome_name = 'fontawesome-free-5.10.2-web' +fontawesome = custom_target( + 'extract-fontawesome', + input: 'root' / (fontawesome_name + '.zip'), + output: fontawesome_name, + command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'], +) +custom_target( + 'name-fontawesome-css', + input: fontawesome, + output: 'css', + command: ['cp', '-r', '@INPUT@/css', '@OUTPUT@'], + install: true, + install_dir: hydra_libexecdir_static / 'fontawesome', +) +custom_target( + 'name-fontawesome-webfonts', + input: fontawesome, + output: 'webfonts', + command: ['cp', '-r', '@INPUT@/webfonts', '@OUTPUT@'], + install: true, + install_dir: hydra_libexecdir_static / 'fontawesome', +) + +# Scripts + +install_subdir('script', + install_dir: get_option('bindir'), + exclude_files: [ + 'hydra-dev-server', + ], + install_mode: 'rwxr-xr-x', + strip_directory: true, +) diff --git a/src/root/Makefile.am b/src/root/Makefile.am deleted file mode 100644 index 163b96e0..00000000 --- a/src/root/Makefile.am +++ /dev/null @@ -1,39 +0,0 @@ -TEMPLATES = $(wildcard *.tt) -STATIC = \ - $(wildcard static/images/*) \ - $(wildcard static/css/*) \ - static/js/bootbox.min.js \ - static/js/popper.min.js \ - static/js/common.js \ - static/js/jquery/jquery-3.4.1.min.js \ - static/js/jquery/jquery-ui-1.10.4.min.js - -FLOT = flot-0.8.3.zip -BOOTSTRAP = bootstrap-4.3.1-dist.zip -FONTAWESOME = fontawesome-free-5.10.2-web.zip - -ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME) - -EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS) - -hydradir = $(libexecdir)/hydra/root -nobase_hydra_DATA = $(EXTRA_DIST) - -all: - mkdir -p $(srcdir)/static/js - unzip -u -d $(srcdir)/static $(BOOTSTRAP) - rm -rf $(srcdir)/static/bootstrap - mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap - unzip -u -d $(srcdir)/static/js $(FLOT) - unzip -u -d $(srcdir)/static $(FONTAWESOME) - rm -rf $(srcdir)/static/fontawesome - mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome - -install-data-local: $(ZIPS) - mkdir -p $(hydradir)/static/js - cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js - mkdir -p $(hydradir)/static/bootstrap - cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap - mkdir -p $(hydradir)/static/fontawesome/{css,webfonts} - cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css - cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts diff --git a/src/root/auth.tt b/src/root/auth.tt index 360904d9..d49ba5bd 100644 --- a/src/root/auth.tt +++ b/src/root/auth.tt @@ -33,7 +33,7 @@