diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 3ba4aba6..613e3ef9 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,14 +1,17 @@
name: "Test"
on:
pull_request:
+ merge_group:
push:
+ branches:
+ - master
jobs:
tests:
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- - uses: cachix/install-nix-action@v16
+ - uses: cachix/install-nix-action@v31
#- run: nix flake check
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
diff --git a/.github/workflows/update-flakes.yml b/.github/workflows/update-flakes.yml
new file mode 100644
index 00000000..b5c0c2dd
--- /dev/null
+++ b/.github/workflows/update-flakes.yml
@@ -0,0 +1,28 @@
+name: "Update Flakes"
+on:
+ schedule:
+ # Run weekly on Monday at 00:00 UTC
+ - cron: '0 0 * * 1'
+ workflow_dispatch:
+jobs:
+ update-flakes:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ pull-requests: write
+ steps:
+ - uses: actions/checkout@v3
+ - uses: cachix/install-nix-action@v31
+ - name: Update flake inputs
+ run: nix flake update
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v5
+ with:
+ commit-message: "flake.lock: Update"
+ title: "Update flake inputs"
+ body: |
+ Automated flake input updates.
+
+ This PR was automatically created by the update-flakes workflow.
+ branch: update-flakes
+ delete-branch: true
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 799db665..12df926f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,47 +1,9 @@
-/.pls_cache
-*.o
*~
-Makefile
-Makefile.in
-.deps
-.hydra-data
-/config.guess
-/config.log
-/config.status
-/config.sub
-/configure
-/depcomp
-/libtool
-/ltmain.sh
-/autom4te.cache
-/aclocal.m4
-/missing
-/install-sh
+.test_info.*
/src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite
-/src/hydra-eval-jobs/hydra-eval-jobs
-/src/root/static/bootstrap
-/src/root/static/js/flot
-/tests
-/doc/manual/images
-/doc/manual/manual.html
-/doc/manual/manual.pdf
-/t/.bzr*
-/t/.git*
-/t/.hg*
-/t/nix
-/t/data
-/t/jobs/config.nix
-t/jobs/declarative/project.json
-/inst
-hydra-config.h
-hydra-config.h.in
+.hydra-data
result
+result-*
outputs
-config
-stamp-h1
-src/hydra-evaluator/hydra-evaluator
-src/hydra-queue-runner/hydra-queue-runner
-src/root/static/fontawesome/
-src/root/static/bootstrap*/
diff --git a/.yath.rc b/.yath.rc
deleted file mode 100644
index 19bb35af..00000000
--- a/.yath.rc
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
--I=rel(t/lib)
diff --git a/Makefile.am b/Makefile.am
deleted file mode 100644
index e744cc33..00000000
--- a/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = src t doc
-BOOTCLEAN_SUBDIRS = $(SUBDIRS)
-DIST_SUBDIRS = $(SUBDIRS)
-EXTRA_DIST = hydra-module.nix
-
-install-data-local: hydra-module.nix
- $(INSTALL) -d $(DESTDIR)$(datadir)/nix
- $(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
diff --git a/README.md b/README.md
index 54cb9a93..54b95549 100644
--- a/README.md
+++ b/README.md
@@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta
#### Creating A Project
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
-- **Identifier**: `hello`
+- **Identifier**: `hello-project`
- **Display name**: `hello`
- **Description**: `hello project`
Click "_Create project_".
#### Creating A Jobset
-After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
+After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
-- **Identifier**: `hello`
+- **Identifier**: `hello-project`
- **Nix expression**: `examples/hello.nix` in `hydra`
- **Check interval**: 60
- **Scheduling shares**: 1
@@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_
- **Input name**: `nixpkgs`
- **Type**: `Git checkout`
-- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
+- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
- **Input name**: `hydra`
- **Type**: `Git checkout`
@@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
```
-$ nix-build
+$ nix build
```
### Development Environment
You can use the provided shell.nix to get a working development environment:
```
-$ nix-shell
-$ ./bootstrap
-$ configurePhase # NOTE: not ./configure
-$ make
+$ nix develop
+$ mesonConfigurePhase
+$ ninja
```
### Executing Hydra During Development
@@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
can be done using [foreman](https://github.com/ddollar/foreman):
```
-$ nix-shell
+$ nix develop
$ # hack hack
-$ make
+$ ninja -C build
$ foreman start
```
@@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
Then, you can run the tests and the perlcritic linter together with:
```console
-$ nix-shell
-$ make check
+$ nix develop
+$ ninja -C build test
```
You can run a single test with:
```
-$ nix-shell
-$ yath test ./t/foo/bar.t
+$ nix develop
+$ cd build
+$ meson test --test-args=../t/Hydra/Event.t testsuite
```
And you can run just perlcritic with:
```
-$ nix-shell
-$ make perlcritic
+$ nix develop
+$ cd build
+$ meson test perlcritic
```
### JSON API
@@ -140,7 +141,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
## Additional Resources
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
-- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
+- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
- [hydra-cli](https://github.com/nlewo/hydra-cli)
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
diff --git a/bootstrap b/bootstrap
deleted file mode 100755
index 091b0ee4..00000000
--- a/bootstrap
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/sh -e
-exec autoreconf -vfi
diff --git a/configure.ac b/configure.ac
deleted file mode 100644
index 0c823696..00000000
--- a/configure.ac
+++ /dev/null
@@ -1,85 +0,0 @@
-AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
-AC_CONFIG_AUX_DIR(config)
-AM_INIT_AUTOMAKE([foreign serial-tests])
-
-AC_LANG([C++])
-
-AC_PROG_CC
-AC_PROG_INSTALL
-AC_PROG_LN_S
-AC_PROG_LIBTOOL
-AC_PROG_CXX
-
-CXXFLAGS+=" -std=c++17"
-
-AC_PATH_PROG([XSLTPROC], [xsltproc])
-
-AC_ARG_WITH([docbook-xsl],
- [AS_HELP_STRING([--with-docbook-xsl=PATH],
- [path of the DocBook XSL stylesheets])],
- [docbookxsl="$withval"],
- [docbookxsl="/docbook-xsl-missing"])
-AC_SUBST([docbookxsl])
-
-
-AC_DEFUN([NEED_PROG],
-[
-AC_PATH_PROG($1, $2)
-if test -z "$$1"; then
- AC_MSG_ERROR([$2 is required])
-fi
-])
-
-NEED_PROG(perl, perl)
-
-NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
-
-AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
-if test -n "$NIX_STORE" -a -n "$TMPDIR"
-then
- # This may be executed from within a build chroot, so pacify
- # `nix-store' instead of letting it choke while trying to mkdir
- # /nix/var.
- NIX_STATE_DIR="$TMPDIR"
- export NIX_STATE_DIR
-fi
-if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
- AC_MSG_RESULT([yes])
-else
- AC_MSG_RESULT([no])
- AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
-fi
-
-PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
-
-testPath="$(dirname $(type -p expr))"
-AC_SUBST(testPath)
-
-jobsPath="$(realpath ./t/jobs)"
-AC_SUBST(jobsPath)
-
-CXXFLAGS+=" -include nix/config.h"
-
-AC_CONFIG_FILES([
- Makefile
- doc/Makefile
- doc/manual/Makefile
- src/Makefile
- src/hydra-evaluator/Makefile
- src/hydra-eval-jobs/Makefile
- src/hydra-queue-runner/Makefile
- src/sql/Makefile
- src/ttf/Makefile
- src/lib/Makefile
- src/root/Makefile
- src/script/Makefile
- t/Makefile
- t/jobs/config.nix
- t/jobs/declarative/project.json
-])
-
-AC_CONFIG_COMMANDS([executable-scripts], [])
-
-AC_CONFIG_HEADER([hydra-config.h])
-
-AC_OUTPUT
diff --git a/default.nix b/default.nix
index d4c7ec29..b81119c3 100644
--- a/default.nix
+++ b/default.nix
@@ -1,6 +1,6 @@
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
# returns an attribute set of the shape `{ defaultNix, shellNix }`
-(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
+(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
src = ./.;
}).defaultNix
diff --git a/doc/Makefile.am b/doc/Makefile.am
deleted file mode 100644
index 9ac91d24..00000000
--- a/doc/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-SUBDIRS = manual
-BOOTCLEAN_SUBDIRS = $(SUBDIRS)
-DIST_SUBDIRS = $(SUBDIRS)
-
diff --git a/doc/manual/Makefile.am b/doc/manual/Makefile.am
deleted file mode 100644
index ec732166..00000000
--- a/doc/manual/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-MD_FILES = src/*.md
-
-EXTRA_DIST = $(MD_FILES)
-
-install: $(MD_FILES)
- mdbook build . -d $(docdir)
diff --git a/doc/manual/meson.build b/doc/manual/meson.build
new file mode 100644
index 00000000..11178809
--- /dev/null
+++ b/doc/manual/meson.build
@@ -0,0 +1,36 @@
+srcs = files(
+ 'src/SUMMARY.md',
+ 'src/about.md',
+ 'src/api.md',
+ 'src/configuration.md',
+ 'src/hacking.md',
+ 'src/installation.md',
+ 'src/introduction.md',
+ 'src/jobs.md',
+ 'src/monitoring/README.md',
+ 'src/notifications.md',
+ 'src/plugins/README.md',
+ 'src/plugins/RunCommand.md',
+ 'src/plugins/declarative-projects.md',
+ 'src/projects.md',
+ 'src/webhooks.md',
+)
+
+manual = custom_target(
+ 'manual',
+ command: [
+ mdbook,
+ 'build',
+ '@SOURCE_ROOT@/doc/manual',
+ '-d', meson.current_build_dir() / 'html'
+ ],
+ depend_files: srcs,
+ output: ['html'],
+ build_by_default: true,
+)
+
+install_subdir(
+ manual.full_path(),
+ install_dir: get_option('datadir') / 'doc/hydra',
+ strip_directory: true,
+)
diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md
index 2700625d..bd8141a3 100644
--- a/doc/manual/src/configuration.md
+++ b/doc/manual/src/configuration.md
@@ -51,10 +51,12 @@ base_uri example.com
`base_uri` should be your hydra servers proxied URL. If you are using
Hydra nixos module then setting `hydraURL` option should be enough.
-If you want to serve Hydra with a prefix path, for example
-[http://example.com/hydra]() then you need to configure your reverse
-proxy to pass `X-Request-Base` to hydra, with prefix path as value. For
-example if you are using nginx, then use configuration similar to
+You also need to configure your reverse proxy to pass `X-Request-Base`
+to hydra, with the same value as `base_uri`.
+This also covers the case of serving Hydra with a prefix path,
+as in [http://example.com/hydra]().
+
+For example if you are using nginx, then use configuration similar to
following:
server {
@@ -63,8 +65,7 @@ following:
.. other configuration ..
location /hydra/ {
- proxy_pass http://127.0.0.1:3000;
- proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
+ proxy_pass http://127.0.0.1:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@@ -74,6 +75,33 @@ following:
}
}
+Note the trailing slash on the `proxy_pass` directive, which causes nginx to
+strip off the `/hydra/` part of the URL before passing it to hydra.
+
+Populating a Cache
+------------------
+
+A common use for Hydra is to pre-build and cache derivations which
+take a long time to build. While it is possible to direcly access the
+Hydra server's store over SSH, a more scalable option is to upload
+built derivations to a remote store like an [S3-compatible object
+store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
+the `store_uri` parameter will cause Hydra to sign and upload
+derivations as they are built:
+
+```
+store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
+```
+
+This example uses [Zstandard](https://github.com/facebook/zstd)
+compression on derivations to reduce CPU usage on the server, but
+[Brotli](https://brotli.org/) compression for derivation listings and
+build logs because it has better browser support.
+
+See [`nix help
+stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
+for a description of the store URI format.
+
Statsd Configuration
--------------------
@@ -131,8 +159,8 @@ use LDAP to manage roles and users.
This is configured by defining the `` block in the configuration file.
In this block it's possible to configure the authentication plugin in the
`` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
-The documentation for the available settings can be found [here]
-(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
+The documentation for the available settings can be found
+[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
Note that the bind password (if needed) should be supplied as an included file to
prevent it from leaking to the Nix store.
@@ -179,13 +207,15 @@ Example configuration:
deref = always
+
# Make all users in the hydra_admin group Hydra admins
hydra_admin = admin
- # Allow all users in the dev group to restart jobs and cancel builds
+ # Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
+ dev = eval-jobset
dev = restart-jobs
- dev = cancel-builds
+ dev = cancel-build
```
diff --git a/doc/manual/src/hacking.md b/doc/manual/src/hacking.md
index 9d98b00c..8b2b13ba 100644
--- a/doc/manual/src/hacking.md
+++ b/doc/manual/src/hacking.md
@@ -12,24 +12,26 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
and dependencies can be found:
```console
-$ nix-shell
+$ nix develop
```
To build Hydra, you should then do:
```console
-[nix-shell]$ ./bootstrap
-[nix-shell]$ configurePhase
-[nix-shell]$ make
+$ mesonConfigurePhase
+$ ninja
```
You start a local database, the webserver, and other components with
foreman:
```console
+$ ninja -C build
$ foreman start
```
+The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
+
You can run just the Hydra web server in your source tree as follows:
```console
@@ -39,18 +41,11 @@ $ ./src/script/hydra-server
You can run Hydra's test suite with the following:
```console
-[nix-shell]$ make check
-[nix-shell]$ # to run as many tests as you have cores:
-[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
-[nix-shell]$ # or run yath directly:
-[nix-shell]$ yath test
-[nix-shell]$ # to run as many tests as you have cores:
-[nix-shell]$ yath test -j $NIX_BUILD_CORES
+$ meson test
+# to run as many tests as you have cores:
+$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
```
-When using `yath` instead of `make check`, ensure you have run `make`
-in the root of the repository at least once.
-
**Warning**: Currently, the tests can fail
if run with high parallelism [due to an issue in
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
@@ -67,7 +62,7 @@ will reload the page every time you save.
To build Hydra and its dependencies:
```console
-$ nix-build release.nix -A build.x86_64-linux
+$ nix build .#packages.x86_64-linux.default
```
## Development Tasks
@@ -92,7 +87,7 @@ On NixOS:
```nix
{
- nix.trustedUsers = [ "YOURUSER" ];
+ nix.settings.trusted-users = [ "YOURUSER" ];
}
```
diff --git a/doc/manual/src/installation.md b/doc/manual/src/installation.md
index cbf3f907..39a86885 100644
--- a/doc/manual/src/installation.md
+++ b/doc/manual/src/installation.md
@@ -48,7 +48,7 @@ Getting Nix
If your server runs NixOS you are all set to continue with installation
of Hydra. Otherwise you first need to install Nix. The latest stable
version can be found one [the Nix web
-site](http://nixos.org/nix/download.html), along with a manual, which
+site](https://nixos.org/download/), along with a manual, which
includes installation instructions.
Installation
diff --git a/doc/manual/src/plugins/README.md b/doc/manual/src/plugins/README.md
index 26ee2649..93aa80b4 100644
--- a/doc/manual/src/plugins/README.md
+++ b/doc/manual/src/plugins/README.md
@@ -42,7 +42,7 @@ Sets CircleCI status.
## Compress build logs
-Compresses build logs after a build with bzip2.
+Compresses build logs after a build with bzip2 or zstd.
### Configuration options
@@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2.
Enable log compression
+- `compress_build_logs_compression`
+
+Which compression format to use. Valid values are bzip2 (default) and zstd.
+
+- `compress_build_logs_silent`
+
+Whether to compress logs silently.
+
### Example
```xml
@@ -172,17 +180,6 @@ Sets Gitlab CI status.
- `gitlab_authorization.`
-## HipChat notification
-
-Sends hipchat chat notifications when a build finish.
-
-### Configuration options
-
-- `hipchat.[].jobs`
-- `hipchat.[].builds`
-- `hipchat.[].token`
-- `hipchat.[].notify`
-
## InfluxDB notification
Writes InfluxDB events when a builds finished.
diff --git a/doc/manual/src/projects.md b/doc/manual/src/projects.md
index 95174f1b..f7c4975f 100644
--- a/doc/manual/src/projects.md
+++ b/doc/manual/src/projects.md
@@ -378,13 +378,18 @@ This section describes how it can be implemented for `gitea`, but the approach f
analogous:
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
-* Add it to your `hydra.conf` like this:
+* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information
+ ```
+
+ your_username=your_token
+
+ ```
+
+* Include the file in your `hydra.conf` like this:
``` nix
{
services.hydra-dev.extraConfig = ''
-
- your_username=your_token
-
+ Include /path/to/secret/file
'';
}
```
@@ -399,3 +404,10 @@ analogous:
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
+Content-addressed derivations
+-----------------------------
+
+Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
+To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
+
+Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
diff --git a/doc/manual/src/webhooks.md b/doc/manual/src/webhooks.md
index 2b26cd61..674e1064 100644
--- a/doc/manual/src/webhooks.md
+++ b/doc/manual/src/webhooks.md
@@ -1,9 +1,12 @@
# Webhooks
-Hydra can be notified by github's webhook to trigger a new evaluation when a
+Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
jobset has a github repo in its input.
-To set up a github webhook go to `https://github.com///settings` and in the `Webhooks` tab
-click on `Add webhook`.
+
+## GitHub
+
+To set up a webhook for a GitHub repository go to `https://github.com///settings`
+and in the `Webhooks` tab click on `Add webhook`.
- In `Payload URL` fill in `https:///api/push-github`.
- In `Content type` switch to `application/json`.
@@ -11,3 +14,14 @@ click on `Add webhook`.
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
Then add the hook with `Add webhook`.
+
+## Gitea
+
+To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
+and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
+
+- In `Target URL` fill in `https:///api/push-gitea`.
+- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
+- Change the branch filter to match the git branch hydra builds.
+
+Then add the hook with `Add webhook`.
diff --git a/examples/hello.nix b/examples/hello.nix
index 5a5d2585..84707025 100644
--- a/examples/hello.nix
+++ b/examples/hello.nix
@@ -1,5 +1,5 @@
#
-# jobset example file. This file canbe referenced as Nix expression
+# jobset example file. This file can be referenced as Nix expression
# in a jobset configuration along with inputs for nixpkgs and the
# repository containing this file.
#
diff --git a/flake.lock b/flake.lock
index 5c726a24..0ca074f3 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,94 +1,59 @@
{
"nodes": {
- "lowdown-src": {
+ "nix": {
"flake": false,
"locked": {
- "lastModified": 1633514407,
- "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
- "owner": "kristapsdz",
- "repo": "lowdown",
- "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
- "type": "github"
- },
- "original": {
- "owner": "kristapsdz",
- "repo": "lowdown",
- "type": "github"
- }
- },
- "newNixpkgs": {
- "locked": {
- "lastModified": 1647380550,
- "narHash": "sha256-909TI9poX7CIUiFx203WL29YON6m/I6k0ExbZvR7bLM=",
- "owner": "NixOS",
- "repo": "nixpkgs",
- "rev": "6e3ee8957637a60f5072e33d78e05c0f65c54366",
- "type": "github"
- },
- "original": {
- "owner": "NixOS",
- "ref": "nixos-unstable-small",
- "repo": "nixpkgs",
- "type": "github"
- }
- },
- "nix": {
- "inputs": {
- "lowdown-src": "lowdown-src",
- "nixpkgs": "nixpkgs",
- "nixpkgs-regression": "nixpkgs-regression"
- },
- "locked": {
- "lastModified": 1649172203,
- "narHash": "sha256-Q3nYaXqbseDOvZrlePKeIrx0/KzqyrtNpxHIUbtFHuI=",
+ "lastModified": 1750777360,
+ "narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=",
"owner": "NixOS",
"repo": "nix",
- "rev": "5fe4fe823c193cbb7bfa05a468de91eeab09058d",
+ "rev": "7bb200199705eddd53cb34660a76567c6f1295d9",
"type": "github"
},
"original": {
- "id": "nix",
- "type": "indirect"
+ "owner": "NixOS",
+ "ref": "2.29-maintenance",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nix-eval-jobs": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1748680938,
+ "narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=",
+ "owner": "nix-community",
+ "repo": "nix-eval-jobs",
+ "rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-community",
+ "repo": "nix-eval-jobs",
+ "type": "github"
}
},
"nixpkgs": {
"locked": {
- "lastModified": 1645296114,
- "narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=",
+ "lastModified": 1750736827,
+ "narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1",
+ "rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec",
"type": "github"
},
"original": {
- "id": "nixpkgs",
- "ref": "nixos-21.05-small",
- "type": "indirect"
- }
- },
- "nixpkgs-regression": {
- "locked": {
- "lastModified": 1643052045,
- "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
+ "ref": "nixos-25.05-small",
"repo": "nixpkgs",
- "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
- },
- "original": {
- "id": "nixpkgs",
- "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
- "type": "indirect"
}
},
"root": {
"inputs": {
- "newNixpkgs": "newNixpkgs",
"nix": "nix",
- "nixpkgs": [
- "nix",
- "nixpkgs"
- ]
+ "nix-eval-jobs": "nix-eval-jobs",
+ "nixpkgs": "nixpkgs"
}
}
},
diff --git a/flake.nix b/flake.nix
index 01b0c988..e67a3a99 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,1023 +1,130 @@
{
description = "A Nix-based continuous build system";
- # FIXME: All the pinned versions of nix/nixpkgs have a broken foreman (yes,
- # even 2.7.0's Nixpkgs pin).
- inputs.newNixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small";
- inputs.nixpkgs.follows = "nix/nixpkgs";
- #inputs.nix.url = github:NixOS/nix/2.7.0;
+ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small";
- outputs = { self, newNixpkgs, nixpkgs, nix }:
+ inputs.nix = {
+ url = "github:NixOS/nix/2.29-maintenance";
+ # We want to control the deps precisely
+ flake = false;
+ };
+
+ inputs.nix-eval-jobs = {
+ url = "github:nix-community/nix-eval-jobs";
+ # We want to control the deps precisely
+ flake = false;
+ };
+
+ outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }:
let
-
- version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
-
- pkgs = import nixpkgs {
- system = "x86_64-linux";
- overlays = [ self.overlay nix.overlay ];
- };
-
- # NixOS configuration used for VM tests.
- hydraServer =
- { config, pkgs, ... }:
- {
- imports = [ self.nixosModules.hydraTest ];
-
- virtualisation.memorySize = 1024;
- virtualisation.writableStore = true;
-
- environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
-
- nix = {
- # Without this nix tries to fetch packages from the default
- # cache.nixos.org which is not reachable from this sandboxed NixOS test.
- binaryCaches = [ ];
- };
- };
-
+ systems = [ "x86_64-linux" "aarch64-linux" ];
+ forEachSystem = nixpkgs.lib.genAttrs systems;
in
rec {
# A Nixpkgs overlay that provides a 'hydra' package.
- overlay = final: prev: {
-
- # Overlay these packages to use dependencies from the Nixpkgs everything
- # else uses, to side-step the version difference: glibc is 2.32 in the
- # nix-pinned Nixpkgs, but 2.33 in the newNixpkgs commit.
- civetweb = (final.callPackage "${newNixpkgs}/pkgs/development/libraries/civetweb" { }).overrideAttrs
- # Can be dropped once newNixpkgs points to a revision containing
- # https://github.com/NixOS/nixpkgs/pull/167751
- ({ cmakeFlags ? [ ], ... }: {
- cmakeFlags = cmakeFlags ++ [
- "-DCIVETWEB_ENABLE_IPV6=1"
- ];
+ overlays.default = final: prev: {
+ nixDependenciesForHydra = final.lib.makeScope final.newScope
+ (import (nix + "/packaging/dependencies.nix") {
+ pkgs = final;
+ inherit (final) stdenv;
+ inputs = {};
});
- prometheus-cpp = final.callPackage "${newNixpkgs}/pkgs/development/libraries/prometheus-cpp" { };
-
- # Add LDAP dependencies that aren't currently found within nixpkgs.
- perlPackages = prev.perlPackages // {
- TestPostgreSQL = final.perlPackages.buildPerlModule {
- pname = "Test-PostgreSQL";
- version = "1.28-1";
- src = final.fetchFromGitHub {
- owner = "grahamc";
- repo = "Test-postgresql";
- rev = "release-1.28-1";
- hash = "sha256-SFC1C3q3dbcBos18CYd/s0TIcfJW4g04ld0+XQXVToQ=";
- };
- buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ];
- propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ];
-
- makeMakerFlags = "POSTGRES_HOME=${final.postgresql}";
-
- meta = {
- homepage = "https://github.com/grahamc/Test-postgresql/releases/tag/release-1.28-1";
- description = "PostgreSQL runner for tests";
- license = with final.lib.licenses; [ artistic2 ];
- };
- };
-
- FunctionParameters = final.perlPackages.buildPerlPackage {
- pname = "Function-Parameters";
- version = "2.001003";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/M/MA/MAUKE/Function-Parameters-2.001003.tar.gz";
- sha256 = "eaa22c6b43c02499ec7db0758c2dd218a3b2ab47a714b2bdf8010b5ee113c242";
- };
- buildInputs = with final.perlPackages; [ DirSelf TestFatal ];
- meta = {
- description = "Define functions and methods with parameter lists (\"subroutine signatures\")";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- CatalystPluginPrometheusTiny = final.perlPackages.buildPerlPackage {
- pname = "Catalyst-Plugin-PrometheusTiny";
- version = "0.005";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/S/SY/SYSPETE/Catalyst-Plugin-PrometheusTiny-0.005.tar.gz";
- sha256 = "a42ef09efdc3053899ae007c41220d3ed7207582cc86e491b4f534539c992c5a";
- };
- buildInputs = with final.perlPackages; [ HTTPMessage Plack SubOverride TestDeep ];
- propagatedBuildInputs = with final.perlPackages; [ CatalystRuntime Moose PrometheusTiny PrometheusTinyShared ];
- meta = {
- description = "Prometheus metrics for Catalyst";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- CryptArgon2 = final.perlPackages.buildPerlModule {
- pname = "Crypt-Argon2";
- version = "0.010";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Argon2-0.010.tar.gz";
- sha256 = "3ea1c006f10ef66fd417e502a569df15c4cc1c776b084e35639751c41ce6671a";
- };
- nativeBuildInputs = [ pkgs.ld-is-cc-hook ];
- meta = {
- description = "Perl interface to the Argon2 key derivation functions";
- license = final.lib.licenses.cc0;
- };
- };
-
- CryptPassphrase = final.perlPackages.buildPerlPackage {
- pname = "Crypt-Passphrase";
- version = "0.003";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-0.003.tar.gz";
- sha256 = "685aa090f8179a86d6896212ccf8ccfde7a79cce857199bb14e2277a10d240ad";
- };
- meta = {
- description = "A module for managing passwords in a cryptographically agile manner";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- CryptPassphraseArgon2 = final.perlPackages.buildPerlPackage {
- pname = "Crypt-Passphrase-Argon2";
- version = "0.002";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/L/LE/LEONT/Crypt-Passphrase-Argon2-0.002.tar.gz";
- sha256 = "3906ff81697d13804ee21bd5ab78ffb1c4408b4822ce020e92ecf4737ba1f3a8";
- };
- propagatedBuildInputs = with final.perlPackages; [ CryptArgon2 CryptPassphrase ];
- meta = {
- description = "An Argon2 encoder for Crypt::Passphrase";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- DataRandom = final.perlPackages.buildPerlPackage {
- pname = "Data-Random";
- version = "0.13";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/B/BA/BAREFOOT/Data-Random-0.13.tar.gz";
- sha256 = "eb590184a8db28a7e49eab09e25f8650c33f1f668b6a472829de74a53256bfc0";
- };
- buildInputs = with final.perlPackages; [ FileShareDirInstall TestMockTime ];
- meta = {
- description = "Perl module to generate random data";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- DirSelf = final.perlPackages.buildPerlPackage {
- pname = "Dir-Self";
- version = "0.11";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/M/MA/MAUKE/Dir-Self-0.11.tar.gz";
- sha256 = "e251a51abc7d9ba3e708f73c2aa208e09d47a0c528d6254710fa78cc8d6885b5";
- };
- meta = {
- homepage = "https://github.com/mauke/Dir-Self";
- description = "A __DIR__ constant for the directory your source file is in";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- HashSharedMem = final.perlPackages.buildPerlModule {
- pname = "Hash-SharedMem";
- version = "0.005";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/Z/ZE/ZEFRAM/Hash-SharedMem-0.005.tar.gz";
- sha256 = "324776808602f7bdc44adaa937895365454029a926fa611f321c9bf6b940bb5e";
- };
- buildInputs = with final.perlPackages; [ ScalarString ];
- meta = {
- description = "Efficient shared mutable hash";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- PrometheusTiny = final.perlPackages.buildPerlPackage {
- pname = "Prometheus-Tiny";
- version = "0.007";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz";
- sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec";
- };
- buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ];
- meta = {
- homepage = "https://github.com/robn/Prometheus-Tiny";
- description = "A tiny Prometheus client";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- PrometheusTinyShared = final.perlPackages.buildPerlPackage {
- pname = "Prometheus-Tiny-Shared";
- version = "0.023";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-Shared-0.023.tar.gz";
- sha256 = "7c2c72397be5d8e4839d1bf4033c1800f467f2509689673c6419df48794f2abe";
- };
- buildInputs = with final.perlPackages; [ DataRandom HTTPMessage Plack TestDifferences TestException ];
- propagatedBuildInputs = with final.perlPackages; [ HashSharedMem JSONXS PrometheusTiny ];
- meta = {
- homepage = "https://github.com/robn/Prometheus-Tiny-Shared";
- description = "A tiny Prometheus client with a shared database behind it";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- ReadonlyX = final.perlPackages.buildPerlModule {
- pname = "ReadonlyX";
- version = "1.04";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/S/SA/SANKO/ReadonlyX-1.04.tar.gz";
- sha256 = "81bb97dba93ac6b5ccbce04a42c3590eb04557d75018773ee18d5a30fcf48188";
- };
- buildInputs = with final.perlPackages; [ ModuleBuildTiny TestFatal ];
- meta = {
- homepage = "https://github.com/sanko/readonly";
- description = "Faster facility for creating read-only scalars, arrays, hashes";
- license = final.lib.licenses.artistic2;
- };
- };
-
- TieHashMethod = final.perlPackages.buildPerlPackage {
- pname = "Tie-Hash-Method";
- version = "0.02";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/Y/YV/YVES/Tie-Hash-Method-0.02.tar.gz";
- sha256 = "d513fbb51413f7ca1e64a1bdce6194df7ec6076dea55066d67b950191eec32a9";
- };
- meta = {
- description = "Tied hash with specific methods overriden by callbacks";
- license = with final.lib.licenses; [ artistic1 ];
- };
- };
-
- Test2Harness = final.perlPackages.buildPerlPackage {
- pname = "Test2-Harness";
- version = "1.000042";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Harness-1.000042.tar.gz";
- sha256 = "aaf231a68af1a6ffd6a11188875fcf572e373e43c8285945227b9d687b43db2d";
- };
-
- checkPhase = ''
- patchShebangs ./t ./scripts/yath
- ./scripts/yath test -j $NIX_BUILD_CORES
- '';
-
- propagatedBuildInputs = with final.perlPackages; [ DataUUID Importer LongJump ScopeGuard TermTable Test2PluginMemUsage Test2PluginUUID Test2Suite gotofile ];
- meta = {
- description = "A new and improved test harness with better Test2 integration";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- Test2PluginMemUsage = prev.perlPackages.buildPerlPackage {
- pname = "Test2-Plugin-MemUsage";
- version = "0.002003";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-MemUsage-0.002003.tar.gz";
- sha256 = "5e0662d5a823ae081641f5ce82843111eec1831cd31f883a6c6de54afdf87c25";
- };
- buildInputs = with final.perlPackages; [ Test2Suite ];
- meta = {
- description = "Collect and display memory usage information";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- Test2PluginUUID = prev.perlPackages.buildPerlPackage {
- pname = "Test2-Plugin-UUID";
- version = "0.002001";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/E/EX/EXODIST/Test2-Plugin-UUID-0.002001.tar.gz";
- sha256 = "4c6c8d484d7153d8779dc155a992b203095b5c5aa1cfb1ee8bcedcd0601878c9";
- };
- buildInputs = with final.perlPackages;[ Test2Suite ];
- propagatedBuildInputs = with final.perlPackages; [ DataUUID ];
- meta = {
- description = "Use REAL UUIDs in Test2";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- LongJump = final.perlPackages.buildPerlPackage {
- pname = "Long-Jump";
- version = "0.000001";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/E/EX/EXODIST/Long-Jump-0.000001.tar.gz";
- sha256 = "d5d6456d86992b559d8f66fc90960f919292cd3803c13403faac575762c77af4";
- };
- buildInputs = with final.perlPackages; [ Test2Suite ];
- meta = {
- description = "Mechanism for returning to a specific point from a deeply nested stack";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- gotofile = final.perlPackages.buildPerlPackage {
- pname = "goto-file";
- version = "0.005";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/E/EX/EXODIST/goto-file-0.005.tar.gz";
- sha256 = "c6cdd5ee4a6cdcbdbf314d92a4f9985dbcdf9e4258048cae76125c052aa31f77";
- };
- buildInputs = with final.perlPackages; [ Test2Suite ];
- meta = {
- description = "Stop parsing the current file and move on to a different one";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- NetLDAPServer = prev.perlPackages.buildPerlPackage {
- pname = "Net-LDAP-Server";
- version = "0.43";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/A/AA/AAR/Net-LDAP-Server-0.43.tar.gz";
- sha256 = "0qmh3cri3fpccmwz6bhwp78yskrb3qmalzvqn0a23hqbsfs4qv6x";
- };
- propagatedBuildInputs = with final.perlPackages; [ NetLDAP ConvertASN1 ];
- meta = {
- description = "LDAP server side protocol handling";
- license = with final.lib.licenses; [ artistic1 ];
- };
- };
-
- NetLDAPSID = prev.perlPackages.buildPerlPackage {
- pname = "Net-LDAP-SID";
- version = "0.0001";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-SID-0.001.tar.gz";
- sha256 = "1mnnpkmj8kpb7qw50sm8h4sd8py37ssy2xi5hhxzr5whcx0cvhm8";
- };
- meta = {
- description = "Active Directory Security Identifier manipulation";
- license = with final.lib.licenses; [ artistic2 ];
- };
- };
-
- NetLDAPServerTest = prev.perlPackages.buildPerlPackage {
- pname = "Net-LDAP-Server-Test";
- version = "0.22";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-Server-Test-0.22.tar.gz";
- sha256 = "13idip7jky92v4adw60jn2gcc3zf339gsdqlnc9nnvqzbxxp285i";
- };
- propagatedBuildInputs = with final.perlPackages; [ NetLDAP NetLDAPServer TestMore DataDump NetLDAPSID ];
- meta = {
- description = "test Net::LDAP code";
- license = with final.lib.licenses; [ artistic1 ];
- };
- };
-
- CatalystAuthenticationStoreLDAP = prev.perlPackages.buildPerlPackage {
- pname = "Catalyst-Authentication-Store-LDAP";
- version = "1.016";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/I/IL/ILMARI/Catalyst-Authentication-Store-LDAP-1.016.tar.gz";
- sha256 = "0cm399vxqqf05cjgs1j5v3sk4qc6nmws5nfhf52qvpbwc4m82mq8";
- };
- propagatedBuildInputs = with final.perlPackages; [ NetLDAP CatalystPluginAuthentication ClassAccessorFast ];
- buildInputs = with final.perlPackages; [ TestMore TestMockObject TestException NetLDAPServerTest ];
- meta = {
- description = "Authentication from an LDAP Directory";
- license = with final.lib.licenses; [ artistic1 ];
- };
- };
-
- PerlCriticCommunity = prev.perlPackages.buildPerlModule {
- pname = "Perl-Critic-Community";
- version = "1.0.0";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/D/DB/DBOOK/Perl-Critic-Community-v1.0.0.tar.gz";
- sha256 = "311b775da4193e9de94cf5225e993cc54dd096ae1e7ef60738cdae1d9b8854e7";
- };
- buildInputs = with final.perlPackages; [ ModuleBuildTiny ];
- propagatedBuildInputs = with final.perlPackages; [ PPI PathTiny PerlCritic PerlCriticPolicyVariablesProhibitLoopOnHash PerlCriticPulp ];
- meta = {
- homepage = "https://github.com/Grinnz/Perl-Critic-Freenode";
- description = "Community-inspired Perl::Critic policies";
- license = final.lib.licenses.artistic2;
- };
- };
-
- PerlCriticPolicyVariablesProhibitLoopOnHash = prev.perlPackages.buildPerlPackage {
- pname = "Perl-Critic-Policy-Variables-ProhibitLoopOnHash";
- version = "0.008";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/X/XS/XSAWYERX/Perl-Critic-Policy-Variables-ProhibitLoopOnHash-0.008.tar.gz";
- sha256 = "12f5f0be96ea1bdc7828058577bd1c5c63ca23c17fac9c3709452b3dff5b84e0";
- };
- propagatedBuildInputs = with final.perlPackages; [ PerlCritic ];
- meta = {
- description = "Don't write loops on hashes, only on keys and values of hashes";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- PerlCriticPulp = prev.perlPackages.buildPerlPackage {
- pname = "Perl-Critic-Pulp";
- version = "99";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/K/KR/KRYDE/Perl-Critic-Pulp-99.tar.gz";
- sha256 = "b8fda842fcbed74d210257c0a284b6dc7b1d0554a47a3de5d97e7d542e23e7fe";
- };
- propagatedBuildInputs = with final.perlPackages; [ IOString ListMoreUtils PPI PerlCritic PodMinimumVersion ];
- meta = {
- homepage = "http://user42.tuxfamily.org/perl-critic-pulp/index.html";
- description = "Some add-on policies for Perl::Critic";
- license = final.lib.licenses.gpl3Plus;
- };
- };
-
- PodMinimumVersion = prev.perlPackages.buildPerlPackage {
- pname = "Pod-MinimumVersion";
- version = "50";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/K/KR/KRYDE/Pod-MinimumVersion-50.tar.gz";
- sha256 = "0bd2812d9aacbd99bb71fa103a4bb129e955c138ba7598734207dc9fb67b5a6f";
- };
- propagatedBuildInputs = with final.perlPackages; [ IOString PodParser ];
- meta = {
- homepage = "http://user42.tuxfamily.org/pod-minimumversion/index.html";
- description = "Determine minimum Perl version of POD directives";
- license = final.lib.licenses.free;
- };
- };
-
- StringCompareConstantTime = final.perlPackages.buildPerlPackage {
- pname = "String-Compare-ConstantTime";
- version = "0.321";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/F/FR/FRACTAL/String-Compare-ConstantTime-0.321.tar.gz";
- sha256 = "0b26ba2b121d8004425d4485d1d46f59001c83763aa26624dff6220d7735d7f7";
- };
- meta = {
- description = "Timing side-channel protected string compare";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
- UUID4Tiny = final.perlPackages.buildPerlPackage {
- pname = "UUID4-Tiny";
- version = "0.002";
- src = final.fetchurl {
- url = "mirror://cpan/authors/id/C/CV/CVLIBRARY/UUID4-Tiny-0.002.tar.gz";
- sha256 = "e7535b31e386d432dec7adde214348389e1d5cf753e7ed07f1ae04c4360840cf";
- };
- meta = {
- description = "Cryptographically secure v4 UUIDs for Linux x64";
- license = with final.lib.licenses; [ artistic1 gpl1Plus ];
- };
- };
-
+ nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope
+ (import (nix + "/packaging/components.nix") {
+ officialRelease = true;
+ inherit (final) lib;
+ pkgs = final;
+ src = nix;
+ maintainers = [ ];
+ });
+ nix-eval-jobs = final.callPackage nix-eval-jobs {
+ nixComponents = final.nixComponentsForHydra;
};
-
- hydra = with final; let
- perlDeps = buildEnv {
- name = "hydra-perl-deps";
- paths = with perlPackages; lib.closePropagation
- [
- AuthenSASL
- CatalystActionREST
- CatalystAuthenticationStoreDBIxClass
- CatalystAuthenticationStoreLDAP
- CatalystDevel
- CatalystPluginAccessLog
- CatalystPluginAuthorizationRoles
- CatalystPluginCaptcha
- CatalystPluginPrometheusTiny
- CatalystPluginSessionStateCookie
- CatalystPluginSessionStoreFastMmap
- CatalystPluginStackTrace
- CatalystPluginUnicodeEncoding
- CatalystTraitForRequestProxyBase
- CatalystViewDownload
- CatalystViewJSON
- CatalystViewTT
- CatalystXRoleApplicator
- CatalystXScriptServerStarman
- CryptPassphrase
- CryptPassphraseArgon2
- CryptRandPasswd
- DataDump
- DateTime
- DBDPg
- DBDSQLite
- DigestSHA1
- EmailMIME
- EmailSender
- FileSlurper
- FileWhich
- final.nix.perl-bindings
- git
- IOCompress
- IPCRun
- IPCRun3
- JSON
- JSONMaybeXS
- JSONXS
- ListSomeUtils
- LWP
- LWPProtocolHttps
- ModulePluggable
- NetAmazonS3
- NetPrometheus
- NetStatsd
- PadWalker
- ParallelForkManager
- PerlCriticCommunity
- PrometheusTinyShared
- ReadonlyX
- SetScalar
- SQLSplitStatement
- Starman
- StringCompareConstantTime
- SysHostnameLong
- TermSizeAny
- TermReadKey
- Test2Harness
- TestMore
- TestPostgreSQL
- TextDiff
- TextTable
- UUID4Tiny
- YAML
- XMLSimple
- ];
- };
-
- in
- stdenv.mkDerivation {
-
- name = "hydra-${version}";
-
- src = self;
-
- buildInputs =
- [
- makeWrapper
- autoconf
- automake
- libtool
- unzip
- nukeReferences
- pkgconfig
- libpqxx
- gitAndTools.topGit
- mercurial
- darcs
- subversion
- breezy
- openssl
- bzip2
- libxslt
- final.nix
- perlDeps
- perl
- mdbook
- pixz
- boost
- postgresql_13
- (if lib.versionAtLeast lib.version "20.03pre"
- then nlohmann_json
- else nlohmann_json.override { multipleHeaders = true; })
- prometheus-cpp
- ];
-
- checkInputs = [
- cacert
- # FIXME: foreman is broken on all nix/nixpkgs pin, up to and
- # including 2.7.0
- newNixpkgs.legacyPackages.${final.system}.foreman
- glibcLocales
- libressl.nc
- openldap
- python3
- ];
-
- hydraPath = lib.makeBinPath (
- [
- subversion
- openssh
- final.nix
- coreutils
- findutils
- pixz
- gzip
- bzip2
- lzma
- gnutar
- unzip
- git
- gitAndTools.topGit
- mercurial
- darcs
- gnused
- breezy
- ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
- );
-
- OPENLDAP_ROOT = openldap;
-
- shellHook = ''
- pushd $(git rev-parse --show-toplevel) >/dev/null
-
- PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
- PERL5LIB=$(pwd)/src/lib:$PERL5LIB
- export HYDRA_HOME="$(pwd)/src/"
- mkdir -p .hydra-data
- export HYDRA_DATA="$(pwd)/.hydra-data"
- export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
-
- popd >/dev/null
- '';
-
- preConfigure = "autoreconf -vfi";
-
- NIX_LDFLAGS = [ "-lpthread" ];
-
- enableParallelBuilding = true;
-
- doCheck = true;
-
- preCheck = ''
- patchShebangs .
- export LOGNAME=''${LOGNAME:-foo}
- # set $HOME for bzr so it can create its trace file
- export HOME=$(mktemp -d)
- '';
-
- postInstall = ''
- mkdir -p $out/nix-support
-
- for i in $out/bin/*; do
- read -n 4 chars < $i
- if [[ $chars =~ ELF ]]; then continue; fi
- wrapProgram $i \
- --prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
- --prefix PATH ':' $out/bin:$hydraPath \
- --set HYDRA_RELEASE ${version} \
- --set HYDRA_HOME $out/libexec/hydra \
- --set NIX_RELEASE ${final.nix.name or "unknown"}
- done
- '';
-
- dontStrip = true;
-
- meta.description = "Build of Hydra on ${system}";
- passthru = { inherit perlDeps; inherit (final) nix; };
+ hydra = final.callPackage ./package.nix {
+ inherit (final.lib) fileset;
+ rawSrc = self;
+ nixComponents = final.nixComponentsForHydra;
};
};
hydraJobs = {
+ build = forEachSystem (system: packages.${system}.hydra);
- build.x86_64-linux = packages.x86_64-linux.hydra;
+ buildNoTests = forEachSystem (system:
+ packages.${system}.hydra.overrideAttrs (_: {
+ doCheck = false;
+ })
+ );
- manual =
- pkgs.runCommand "hydra-manual-${version}" { }
+ manual = forEachSystem (system: let
+ pkgs = nixpkgs.legacyPackages.${system};
+ hydra = self.packages.${pkgs.hostPlatform.system}.hydra;
+ in
+ pkgs.runCommand "hydra-manual-${hydra.version}" { }
''
mkdir -p $out/share
- cp -prvd ${pkgs.hydra}/share/doc $out/share/
+ cp -prvd ${hydra.doc}/share/doc $out/share/
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
- '';
+ '');
- tests.install.x86_64-linux =
- with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
- simpleTest {
- machine = hydraServer;
- testScript =
- ''
- machine.wait_for_job("hydra-init")
- machine.wait_for_job("hydra-server")
- machine.wait_for_job("hydra-evaluator")
- machine.wait_for_job("hydra-queue-runner")
- machine.wait_for_open_port("3000")
- machine.succeed("curl --fail http://localhost:3000/")
- '';
- };
-
- tests.notifications.x86_64-linux =
- with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
- simpleTest {
- machine = { pkgs, ... }: {
- imports = [ hydraServer ];
- services.hydra-dev.extraConfig = ''
-
- url = http://127.0.0.1:8086
- db = hydra
-
- '';
- services.influxdb.enable = true;
- };
- testScript = ''
- machine.wait_for_job("hydra-init")
-
- # Create an admin account and some other state.
- machine.succeed(
- """
- su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
- mkdir /run/jobset
- chmod 755 /run/jobset
- cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
- chmod 644 /run/jobset/default.nix
- chown -R hydra /run/jobset
- """
- )
-
- # Wait until InfluxDB can receive web requests
- machine.wait_for_job("influxdb")
- machine.wait_for_open_port("8086")
-
- # Create an InfluxDB database where hydra will write to
- machine.succeed(
- "curl -XPOST 'http://127.0.0.1:8086/query' "
- + "--data-urlencode 'q=CREATE DATABASE hydra'"
- )
-
- # Wait until hydra-server can receive HTTP requests
- machine.wait_for_job("hydra-server")
- machine.wait_for_open_port("3000")
-
- # Setup the project and jobset
- machine.succeed(
- "su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
- )
-
- # Wait until hydra has build the job and
- # the InfluxDBNotification plugin uploaded its notification to InfluxDB
- machine.wait_until_succeeds(
- "curl -s -H 'Accept: application/csv' "
- + "-G 'http://127.0.0.1:8086/query?db=hydra' "
- + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
- )
- '';
- };
-
- tests.gitea.x86_64-linux =
- with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
- makeTest {
- machine = { pkgs, ... }: {
- imports = [ hydraServer ];
- services.hydra-dev.extraConfig = ''
-
- root=d7f16a3412e01a43a414535b16007c6931d3a9c7
-
- '';
- nix = {
- distributedBuilds = true;
- buildMachines = [{
- hostName = "localhost";
- systems = [ "x86_64-linux" ];
- }];
- binaryCaches = [ ];
- };
- services.gitea = {
- enable = true;
- database.type = "postgres";
- disableRegistration = true;
- httpPort = 3001;
- };
- services.openssh.enable = true;
- environment.systemPackages = with pkgs; [ gitea git jq gawk ];
- networking.firewall.allowedTCPPorts = [ 3000 ];
- };
- skipLint = true;
- testScript =
- let
- scripts.mktoken = pkgs.writeText "token.sql" ''
- INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
- '';
-
- scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
- set -x
- mkdir -p /tmp/repo $HOME/.ssh
- cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
- chmod 0400 $HOME/.ssh/privk
- git -C /tmp/repo init
- cp ${smallDrv} /tmp/repo/jobset.nix
- git -C /tmp/repo add .
- git config --global user.email test@localhost
- git config --global user.name test
- git -C /tmp/repo commit -m 'Initial import'
- git -C /tmp/repo remote add origin gitea@machine:root/repo
- GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
- git -C /tmp/repo push origin master
- git -C /tmp/repo log >&2
- '';
-
- scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
- set -x
- su -l hydra -c "hydra-create-user root --email-address \
- 'alice@example.org' --password foobar --role admin"
-
- URL=http://localhost:3000
- USERNAME="root"
- PASSWORD="foobar"
- PROJECT_NAME="trivial"
- JOBSET_NAME="trivial"
- mycurl() {
- curl --referer $URL -H "Accept: application/json" \
- -H "Content-Type: application/json" $@
- }
-
- cat >data.json <data.json <data.json < $out; exit 0"];
- };
- }
- '';
- in
- ''
- import json
-
- machine.start()
- machine.wait_for_unit("multi-user.target")
- machine.wait_for_open_port(3000)
- machine.wait_for_open_port(3001)
-
- machine.succeed(
- "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
- + "--username root --password root --email test@localhost'"
- )
- machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
-
- machine.succeed(
- "curl --fail -X POST http://localhost:3001/api/v1/user/repos "
- + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
- + f"-H 'Authorization: token ${api_token}'"
- + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
- )
-
- machine.succeed(
- "curl --fail -X POST http://localhost:3001/api/v1/user/keys "
- + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
- + f"-H 'Authorization: token ${api_token}'"
- + ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
- )
-
- machine.succeed(
- "${scripts.git-setup}"
- )
-
- machine.succeed(
- "${scripts.hydra-setup}"
- )
-
- machine.wait_until_succeeds(
- 'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
- + '| jq .buildstatus | xargs test 0 -eq'
- )
-
- data = machine.succeed(
- 'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
- + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
- + f"-H 'Authorization: token ${api_token}'"
- )
-
- response = json.loads(data)
-
- assert len(response) == 2, "Expected exactly two status updates for latest commit!"
- assert response[0]['status'] == "success", "Expected latest status to be success!"
- assert response[1]['status'] == "pending", "Expected first status to be pending!"
-
- machine.shutdown()
- '';
- };
-
- tests.validate-openapi = pkgs.runCommand "validate-openapi"
- { buildInputs = [ pkgs.openapi-generator-cli ]; }
- ''
- openapi-generator-cli validate -i ${./hydra-api.yaml}
- touch $out
- '';
+ tests = import ./nixos-tests.nix {
+ inherit forEachSystem nixpkgs nixosModules;
+ };
container = nixosConfigurations.container.config.system.build.toplevel;
};
- checks.x86_64-linux.build = hydraJobs.build.x86_64-linux;
- checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux;
- checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi;
+ checks = forEachSystem (system: {
+ build = hydraJobs.build.${system};
+ install = hydraJobs.tests.install.${system};
+ validate-openapi = hydraJobs.tests.validate-openapi.${system};
+ });
- packages.x86_64-linux.hydra = pkgs.hydra;
- defaultPackage.x86_64-linux = pkgs.hydra;
-
- nixosModules.hydra = {
- imports = [ ./hydra-module.nix ];
- nixpkgs.overlays = [ self.overlay nix.overlay ];
- };
-
- nixosModules.hydraTest = {
- imports = [ self.nixosModules.hydra ];
-
- services.hydra-dev.enable = true;
- services.hydra-dev.hydraURL = "http://hydra.example.org";
- services.hydra-dev.notificationSender = "admin@hydra.example.org";
-
- systemd.services.hydra-send-stats.enable = false;
-
- services.postgresql.enable = true;
- services.postgresql.package = pkgs.postgresql_11;
-
- # The following is to work around the following error from hydra-server:
- # [error] Caught exception in engine "Cannot determine local time zone"
- time.timeZone = "UTC";
-
- nix.extraOptions = ''
- allowed-uris = https://github.com/
- '';
- };
-
- nixosModules.hydraProxy = {
- services.httpd = {
- enable = true;
- adminAddr = "hydra-admin@example.org";
- extraConfig = ''
-
- Order deny,allow
- Allow from all
-
-
- ProxyRequests Off
- ProxyPreserveHost On
- ProxyPass /apache-errors !
- ErrorDocument 503 /apache-errors/503.html
- ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
- ProxyPassReverse / http://127.0.0.1:3000/
- '';
+ packages = forEachSystem (system: let
+ inherit (nixpkgs) lib;
+ pkgs = nixpkgs.legacyPackages.${system};
+ nixDependencies = lib.makeScope pkgs.newScope
+ (import (nix + "/packaging/dependencies.nix") {
+ inherit pkgs;
+ inherit (pkgs) stdenv;
+ inputs = {};
+ });
+ nixComponents = lib.makeScope nixDependencies.newScope
+ (import (nix + "/packaging/components.nix") {
+ officialRelease = true;
+ inherit lib pkgs;
+ src = nix;
+ maintainers = [ ];
+ });
+ in {
+ nix-eval-jobs = pkgs.callPackage nix-eval-jobs {
+ inherit nixComponents;
};
+ hydra = pkgs.callPackage ./package.nix {
+ inherit (nixpkgs.lib) fileset;
+ inherit nixComponents;
+ inherit (self.packages.${system}) nix-eval-jobs;
+ rawSrc = self;
+ };
+ default = self.packages.${system}.hydra;
+ });
+
+ nixosModules = import ./nixos-modules {
+ inherit self;
};
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules =
[
+ self.nixosModules.hydra
self.nixosModules.hydraTest
self.nixosModules.hydraProxy
{
- system.configurationRevision = self.rev;
+ system.configurationRevision = self.lastModifiedDate;
boot.isContainer = true;
networking.useDHCP = false;
diff --git a/hydra-api.yaml b/hydra-api.yaml
index ce7e0f9a..a2fdea28 100644
--- a/hydra-api.yaml
+++ b/hydra-api.yaml
@@ -70,7 +70,7 @@ paths:
$ref: '#/components/examples/projects-success'
/api/push:
- put:
+ post:
summary: trigger jobsets
parameters:
- in: query
@@ -533,13 +533,13 @@ paths:
schema:
$ref: '#/components/schemas/Error'
- /eval/{build-id}:
+ /eval/{eval-id}:
get:
- summary: Retrieves evaluations identified by build id
+ summary: Retrieves evaluations identified by eval id
parameters:
- - name: build-id
+ - name: eval-id
in: path
- description: build identifier
+ description: eval identifier
required: true
schema:
type: integer
@@ -551,6 +551,24 @@ paths:
schema:
$ref: '#/components/schemas/JobsetEval'
+ /eval/{eval-id}/builds:
+ get:
+ summary: Retrieves all builds belonging to an evaluation identified by eval id
+ parameters:
+ - name: eval-id
+ in: path
+ description: eval identifier
+ required: true
+ schema:
+ type: integer
+ responses:
+ '200':
+ description: builds
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/JobsetEvalBuilds'
+
components:
schemas:
@@ -796,6 +814,13 @@ components:
additionalProperties:
$ref: '#/components/schemas/JobsetEvalInput'
+ JobsetEvalBuilds:
+ type: array
+ items:
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/Build'
+
JobsetOverview:
type: array
items:
@@ -870,7 +895,7 @@ components:
description: Size of the produced file
type: integer
defaultpath:
- description: This is a Git/Mercurial commit hash or a Subversion revision number
+ description: if path is a directory, the default file relative to path to be served
type: string
'type':
description: Types of build product (user defined)
diff --git a/meson.build b/meson.build
new file mode 100644
index 00000000..c1eb577b
--- /dev/null
+++ b/meson.build
@@ -0,0 +1,26 @@
+project('hydra', 'cpp',
+ version: files('version.txt'),
+ license: 'GPL-3.0',
+ default_options: [
+ 'debug=true',
+ 'optimization=2',
+ 'cpp_std=c++20',
+ ],
+)
+
+nix_util_dep = dependency('nix-util', required: true)
+nix_store_dep = dependency('nix-store', required: true)
+nix_main_dep = dependency('nix-main', required: true)
+
+pqxx_dep = dependency('libpqxx', required: true)
+
+prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
+prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
+
+mdbook = find_program('mdbook', native: true)
+perl = find_program('perl', native: true)
+
+subdir('doc/manual')
+subdir('nixos-modules')
+subdir('src')
+subdir('t')
diff --git a/nixos-modules/default.nix b/nixos-modules/default.nix
new file mode 100644
index 00000000..d12d8338
--- /dev/null
+++ b/nixos-modules/default.nix
@@ -0,0 +1,47 @@
+{ self }:
+
+{
+ hydra = { pkgs, lib,... }: {
+ _file = ./default.nix;
+ imports = [ ./hydra.nix ];
+ services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
+ };
+
+ hydraTest = { pkgs, ... }: {
+ services.hydra-dev.enable = true;
+ services.hydra-dev.hydraURL = "http://hydra.example.org";
+ services.hydra-dev.notificationSender = "admin@hydra.example.org";
+
+ systemd.services.hydra-send-stats.enable = false;
+
+ services.postgresql.enable = true;
+
+ # The following is to work around the following error from hydra-server:
+ # [error] Caught exception in engine "Cannot determine local time zone"
+ time.timeZone = "UTC";
+
+ nix.extraOptions = ''
+ allowed-uris = https://github.com/
+ '';
+ };
+
+ hydraProxy = {
+ services.httpd = {
+ enable = true;
+ adminAddr = "hydra-admin@example.org";
+ extraConfig = ''
+
+ Order deny,allow
+ Allow from all
+
+
+ ProxyRequests Off
+ ProxyPreserveHost On
+ ProxyPass /apache-errors !
+ ErrorDocument 503 /apache-errors/503.html
+ ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
+ ProxyPassReverse / http://127.0.0.1:3000/
+ '';
+ };
+ };
+}
diff --git a/hydra-module.nix b/nixos-modules/hydra.nix
similarity index 93%
rename from hydra-module.nix
rename to nixos-modules/hydra.nix
index 0df5e690..83ffeec4 100644
--- a/hydra-module.nix
+++ b/nixos-modules/hydra.nix
@@ -68,8 +68,6 @@ in
package = mkOption {
type = types.path;
- default = pkgs.hydra;
- defaultText = literalExpression "pkgs.hydra";
description = "The Hydra package.";
};
@@ -228,7 +226,11 @@ in
useDefaultShell = true;
};
- nix.trustedUsers = [ "hydra-queue-runner" ];
+ nix.settings = {
+ trusted-users = [ "hydra-queue-runner" ];
+ keep-outputs = true;
+ keep-derivations = true;
+ };
services.hydra-dev.extraConfig =
''
@@ -256,11 +258,6 @@ in
environment.variables = hydraEnv;
- nix.extraOptions = ''
- gc-keep-outputs = true
- gc-keep-derivations = true
- '';
-
systemd.services.hydra-init =
{ wantedBy = [ "multi-user.target" ];
requires = optional haveLocalDB "postgresql.service";
@@ -268,17 +265,17 @@ in
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
};
- path = [ pkgs.utillinux ];
+ path = [ pkgs.util-linux ];
preStart = ''
ln -sf ${hydraConf} ${baseDir}/hydra.conf
mkdir -m 0700 -p ${baseDir}/www
- chown hydra-www.hydra ${baseDir}/www
+ chown hydra-www:hydra ${baseDir}/www
mkdir -m 0700 -p ${baseDir}/queue-runner
mkdir -m 0750 -p ${baseDir}/build-logs
mkdir -m 0750 -p ${baseDir}/runcommand-logs
- chown hydra-queue-runner.hydra \
+ chown hydra-queue-runner:hydra \
${baseDir}/queue-runner \
${baseDir}/build-logs \
${baseDir}/runcommand-logs
@@ -309,7 +306,7 @@ in
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
fi
- chown hydra.hydra ${cfg.gcRootsDir}
+ chown hydra:hydra ${cfg.gcRootsDir}
chmod 2775 ${cfg.gcRootsDir}
'';
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
@@ -341,8 +338,9 @@ in
systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
- after = [ "hydra-init.service" "network.target" ];
- path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
+ wants = [ "network-online.target" ];
+ after = [ "hydra-init.service" "network.target" "network-online.target" ];
+ path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
@@ -366,7 +364,7 @@ in
requires = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
after = [ "hydra-init.service" "network.target" ];
- path = with pkgs; [ nettools cfg.package jq ];
+ path = with pkgs; [ hostname-debian cfg.package jq ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
};
@@ -409,6 +407,7 @@ in
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
+ path = [ pkgs.zstd ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
@@ -459,10 +458,17 @@ in
# logs automatically after a step finishes, but this doesn't work
# if the queue runner is stopped prematurely.
systemd.services.hydra-compress-logs =
- { path = [ pkgs.bzip2 ];
+ { path = [ pkgs.bzip2 pkgs.zstd ];
script =
''
- find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
+ set -eou pipefail
+ compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
+ if [[ $compression == "" ]]; then
+ compression="bzip2"
+ elif [[ $compression == zstd ]]; then
+ compression="zstd --rm"
+ fi
+ find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
'';
startAt = "Sun 01:45";
};
diff --git a/nixos-modules/meson.build b/nixos-modules/meson.build
new file mode 100644
index 00000000..95c47e9f
--- /dev/null
+++ b/nixos-modules/meson.build
@@ -0,0 +1,4 @@
+install_data('hydra.nix',
+ install_dir: get_option('datadir') / 'nix',
+ rename: ['hydra-module.nix'],
+)
diff --git a/nixos-tests.nix b/nixos-tests.nix
new file mode 100644
index 00000000..c70a3cd1
--- /dev/null
+++ b/nixos-tests.nix
@@ -0,0 +1,306 @@
+{ forEachSystem, nixpkgs, nixosModules }:
+
+let
+ # NixOS configuration used for VM tests.
+ hydraServer =
+ { pkgs, ... }:
+ {
+ imports = [
+ nixosModules.hydra
+ nixosModules.hydraTest
+ ];
+
+ virtualisation.memorySize = 1024;
+ virtualisation.writableStore = true;
+
+ environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
+
+ nix = {
+ # Without this nix tries to fetch packages from the default
+ # cache.nixos.org which is not reachable from this sandboxed NixOS test.
+ settings.substituters = [ ];
+ };
+ };
+
+in
+
+{
+
+ install = forEachSystem (system:
+ (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
+ name = "hydra-install";
+ nodes.machine = hydraServer;
+ testScript =
+ ''
+ machine.wait_for_job("hydra-init")
+ machine.wait_for_job("hydra-server")
+ machine.wait_for_job("hydra-evaluator")
+ machine.wait_for_job("hydra-queue-runner")
+ machine.wait_for_open_port(3000)
+ machine.succeed("curl --fail http://localhost:3000/")
+ '';
+ });
+
+ notifications = forEachSystem (system:
+ (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
+ name = "hydra-notifications";
+ nodes.machine = {
+ imports = [ hydraServer ];
+ services.hydra-dev.extraConfig = ''
+
+ url = http://127.0.0.1:8086
+ db = hydra
+
+ '';
+ services.influxdb.enable = true;
+ };
+ testScript = { nodes, ... }: ''
+ machine.wait_for_job("hydra-init")
+
+ # Create an admin account and some other state.
+ machine.succeed(
+ """
+ su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
+ mkdir /run/jobset
+ chmod 755 /run/jobset
+ cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
+ chmod 644 /run/jobset/default.nix
+ chown -R hydra /run/jobset
+ """
+ )
+
+ # Wait until InfluxDB can receive web requests
+ machine.wait_for_job("influxdb")
+ machine.wait_for_open_port(8086)
+
+ # Create an InfluxDB database where hydra will write to
+ machine.succeed(
+ "curl -XPOST 'http://127.0.0.1:8086/query' "
+ + "--data-urlencode 'q=CREATE DATABASE hydra'"
+ )
+
+ # Wait until hydra-server can receive HTTP requests
+ machine.wait_for_job("hydra-server")
+ machine.wait_for_open_port(3000)
+
+ # Setup the project and jobset
+ machine.succeed(
+ "su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
+ )
+
+ # Wait until hydra has build the job and
+ # the InfluxDBNotification plugin uploaded its notification to InfluxDB
+ machine.wait_until_succeeds(
+ "curl -s -H 'Accept: application/csv' "
+ + "-G 'http://127.0.0.1:8086/query?db=hydra' "
+ + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
+ )
+ '';
+ });
+
+ gitea = forEachSystem (system:
+ let
+ pkgs = nixpkgs.legacyPackages.${system};
+ in
+ (import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
+ name = "hydra-gitea";
+ nodes.machine = { pkgs, ... }: {
+ imports = [ hydraServer ];
+ services.hydra-dev.extraConfig = ''
+
+ root=d7f16a3412e01a43a414535b16007c6931d3a9c7
+
+ '';
+ nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
+ nix = {
+ settings.substituters = [ ];
+ };
+ services.gitea = {
+ enable = true;
+ database.type = "postgres";
+ settings = {
+ service.DISABLE_REGISTRATION = true;
+ server.HTTP_PORT = 3001;
+ };
+ };
+ services.openssh.enable = true;
+ environment.systemPackages = with pkgs; [ gitea git jq gawk ];
+ networking.firewall.allowedTCPPorts = [ 3000 ];
+ };
+ skipLint = true;
+ testScript =
+ let
+ scripts.mktoken = pkgs.writeText "token.sql" ''
+ INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
+ '';
+
+ scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
+ set -x
+ mkdir -p /tmp/repo $HOME/.ssh
+ cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
+ chmod 0400 $HOME/.ssh/privk
+ git -C /tmp/repo init
+ cp ${smallDrv} /tmp/repo/jobset.nix
+ git -C /tmp/repo add .
+ git config --global user.email test@localhost
+ git config --global user.name test
+ git -C /tmp/repo commit -m 'Initial import'
+ git -C /tmp/repo remote add origin gitea@machine:root/repo
+ GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
+ git -C /tmp/repo push origin master
+ git -C /tmp/repo log >&2
+ '';
+
+ scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
+ set -x
+ su -l hydra -c "hydra-create-user root --email-address \
+ 'alice@example.org' --password foobar --role admin"
+
+ URL=http://localhost:3000
+ USERNAME="root"
+ PASSWORD="foobar"
+ PROJECT_NAME="trivial"
+ JOBSET_NAME="trivial"
+ mycurl() {
+ curl --referer $URL -H "Accept: application/json" \
+ -H "Content-Type: application/json" $@
+ }
+
+ cat >data.json <data.json <data.json < $out; exit 0"];
+ };
+ }
+ '';
+ in
+ ''
+ import json
+
+ machine.start()
+ machine.wait_for_unit("multi-user.target")
+ machine.wait_for_open_port(3000)
+ machine.wait_for_open_port(3001)
+
+ machine.succeed(
+ "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
+ + "--username root --password root --email test@localhost'"
+ )
+ machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
+
+ machine.succeed(
+ "curl --fail -X POST http://localhost:3001/api/v1/user/repos "
+ + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ + f"-H 'Authorization: token ${api_token}'"
+ + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
+ )
+
+ machine.succeed(
+ "curl --fail -X POST http://localhost:3001/api/v1/user/keys "
+ + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ + f"-H 'Authorization: token ${api_token}'"
+ + ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
+ )
+
+ machine.succeed(
+ "${scripts.git-setup}"
+ )
+
+ machine.succeed(
+ "${scripts.hydra-setup}"
+ )
+
+ machine.wait_until_succeeds(
+ 'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
+ + '| jq .buildstatus | xargs test 0 -eq'
+ )
+
+ data = machine.succeed(
+ 'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
+ + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ + f"-H 'Authorization: token ${api_token}'"
+ )
+
+ response = json.loads(data)
+
+ assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
+ assert response[0]['status'] == "success", "Expected finished status to be success!"
+ assert response[1]['status'] == "pending", "Expected queued status to be pending!"
+
+ machine.shutdown()
+ '';
+ });
+
+ validate-openapi = forEachSystem (system:
+ let pkgs = nixpkgs.legacyPackages.${system}; in
+ pkgs.runCommand "validate-openapi"
+ { buildInputs = [ pkgs.openapi-generator-cli ]; }
+ ''
+ openapi-generator-cli validate -i ${./hydra-api.yaml}
+ touch $out
+ '');
+
+}
diff --git a/package.nix b/package.nix
new file mode 100644
index 00000000..5c1a7860
--- /dev/null
+++ b/package.nix
@@ -0,0 +1,284 @@
+{ stdenv
+, lib
+, fileset
+
+, rawSrc
+
+, buildEnv
+
+, perlPackages
+
+, nixComponents
+, git
+
+, makeWrapper
+, meson
+, ninja
+, nukeReferences
+, pkg-config
+, mdbook
+
+, unzip
+, libpqxx
+, top-git
+, mercurial
+, darcs
+, subversion
+, breezy
+, openssl
+, bzip2
+, libxslt
+, perl
+, pixz
+, boost
+, postgresql_13
+, nlohmann_json
+, prometheus-cpp
+
+, cacert
+, foreman
+, glibcLocales
+, libressl
+, openldap
+, python3
+
+, openssh
+, coreutils
+, findutils
+, gzip
+, xz
+, gnutar
+, gnused
+, nix-eval-jobs
+
+, rpm
+, dpkg
+, cdrkit
+}:
+
+let
+ perlDeps = buildEnv {
+ name = "hydra-perl-deps";
+ paths = lib.closePropagation
+ ([
+ nixComponents.nix-perl-bindings
+ git
+ ] ++ (with perlPackages; [
+ AuthenSASL
+ CatalystActionREST
+ CatalystAuthenticationStoreDBIxClass
+ CatalystAuthenticationStoreLDAP
+ CatalystDevel
+ CatalystPluginAccessLog
+ CatalystPluginAuthorizationRoles
+ CatalystPluginCaptcha
+ CatalystPluginPrometheusTiny
+ CatalystPluginSessionStateCookie
+ CatalystPluginSessionStoreFastMmap
+ CatalystPluginStackTrace
+ CatalystTraitForRequestProxyBase
+ CatalystViewDownload
+ CatalystViewJSON
+ CatalystViewTT
+ CatalystXRoleApplicator
+ CatalystXScriptServerStarman
+ CryptPassphrase
+ CryptPassphraseArgon2
+ CryptRandPasswd
+ DataDump
+ DateTime
+ DBDPg
+ DBDSQLite
+ DBIxClassHelpers
+ DigestSHA1
+ EmailMIME
+ EmailSender
+ FileCopyRecursive
+ FileLibMagic
+ FileSlurper
+ FileWhich
+ IOCompress
+ IPCRun
+ IPCRun3
+ JSON
+ JSONMaybeXS
+ JSONXS
+ ListSomeUtils
+ LWP
+ LWPProtocolHttps
+ ModulePluggable
+ NetAmazonS3
+ NetPrometheus
+ NetStatsd
+ PadWalker
+ ParallelForkManager
+ PerlCriticCommunity
+ PrometheusTinyShared
+ ReadonlyX
+ SetScalar
+ SQLSplitStatement
+ Starman
+ StringCompareConstantTime
+ SysHostnameLong
+ TermSizeAny
+ TermReadKey
+ Test2Harness
+ TestPostgreSQL
+ TextDiff
+ TextTable
+ UUID4Tiny
+ YAML
+ XMLSimple
+ ]));
+ };
+
+ version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
+in
+stdenv.mkDerivation (finalAttrs: {
+ pname = "hydra";
+ inherit version;
+
+ src = fileset.toSource {
+ root = ./.;
+ fileset = fileset.unions ([
+ ./doc
+ ./meson.build
+ ./nixos-modules
+ ./src
+ ./t
+ ./version.txt
+ ./.perlcriticrc
+ ]);
+ };
+
+ outputs = [ "out" "doc" ];
+
+ strictDeps = true;
+
+ nativeBuildInputs = [
+ makeWrapper
+ meson
+ ninja
+ nukeReferences
+ pkg-config
+ mdbook
+ nixComponents.nix-cli
+ perlDeps
+ perl
+ unzip
+ ];
+
+ buildInputs = [
+ libpqxx
+ openssl
+ libxslt
+ nixComponents.nix-util
+ nixComponents.nix-store
+ nixComponents.nix-main
+ perlDeps
+ perl
+ boost
+ nlohmann_json
+ prometheus-cpp
+ ];
+
+ nativeCheckInputs = [
+ bzip2
+ darcs
+ foreman
+ top-git
+ mercurial
+ subversion
+ breezy
+ openldap
+ postgresql_13
+ pixz
+ nix-eval-jobs
+ ];
+
+ checkInputs = [
+ cacert
+ glibcLocales
+ libressl.nc
+ python3
+ nixComponents.nix-cli
+ ];
+
+ hydraPath = lib.makeBinPath (
+ [
+ subversion
+ openssh
+ nixComponents.nix-cli
+ coreutils
+ findutils
+ pixz
+ gzip
+ bzip2
+ xz
+ gnutar
+ unzip
+ git
+ top-git
+ mercurial
+ darcs
+ gnused
+ breezy
+ nix-eval-jobs
+ ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
+ );
+
+ OPENLDAP_ROOT = openldap;
+
+ mesonBuildType = "release";
+
+ postPatch = ''
+ patchShebangs .
+ '';
+
+ shellHook = ''
+ pushd $(git rev-parse --show-toplevel) >/dev/null
+
+ PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
+ PERL5LIB=$(pwd)/src/lib:$PERL5LIB
+ export HYDRA_HOME="$(pwd)/src/"
+ mkdir -p .hydra-data
+ export HYDRA_DATA="$(pwd)/.hydra-data"
+ export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
+
+ popd >/dev/null
+ '';
+
+ doCheck = true;
+
+ mesonCheckFlags = [ "--verbose" ];
+
+ preCheck = ''
+ export LOGNAME=''${LOGNAME:-foo}
+ # set $HOME for bzr so it can create its trace file
+ export HOME=$(mktemp -d)
+ '';
+
+ postInstall = ''
+ mkdir -p $out/nix-support
+
+ for i in $out/bin/*; do
+ read -n 4 chars < $i
+ if [[ $chars =~ ELF ]]; then continue; fi
+ wrapProgram $i \
+ --prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
+ --prefix PATH ':' $out/bin:$hydraPath \
+ --set HYDRA_RELEASE ${version} \
+ --set HYDRA_HOME $out/libexec/hydra \
+ --set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
+ --set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
+ done
+ '';
+
+ dontStrip = true;
+
+ meta.description = "Build of Hydra on ${stdenv.system}";
+ passthru = {
+ inherit perlDeps;
+ nix = nixComponents.nix-cli;
+ };
+})
diff --git a/shell.nix b/shell.nix
index 9e967032..1ad58f49 100644
--- a/shell.nix
+++ b/shell.nix
@@ -1,6 +1,6 @@
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
# returns an attribute set of the shape `{ defaultNix, shellNix }`
-(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
+(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
src = ./.;
}).shellNix
diff --git a/src/Makefile.am b/src/Makefile.am
deleted file mode 100644
index a28780b6..00000000
--- a/src/Makefile.am
+++ /dev/null
@@ -1,3 +0,0 @@
-SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
-BOOTCLEAN_SUBDIRS = $(SUBDIRS)
-DIST_SUBDIRS = $(SUBDIRS)
diff --git a/src/hydra-eval-jobs/Makefile.am b/src/hydra-eval-jobs/Makefile.am
deleted file mode 100644
index 7a4e9c91..00000000
--- a/src/hydra-eval-jobs/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-bin_PROGRAMS = hydra-eval-jobs
-
-hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
-hydra_eval_jobs_LDADD = $(NIX_LIBS)
-hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc
deleted file mode 100644
index 918bd451..00000000
--- a/src/hydra-eval-jobs/hydra-eval-jobs.cc
+++ /dev/null
@@ -1,558 +0,0 @@
-#include
-#include
-#include
-#include
-
-#include "shared.hh"
-#include "store-api.hh"
-#include "eval.hh"
-#include "eval-inline.hh"
-#include "util.hh"
-#include "get-drvs.hh"
-#include "globals.hh"
-#include "common-eval-args.hh"
-#include "flake/flakeref.hh"
-#include "flake/flake.hh"
-#include "attr-path.hh"
-#include "derivations.hh"
-#include "local-fs-store.hh"
-
-#include "hydra-config.hh"
-
-#include
-#include
-#include
-
-#include
-
-void check_pid_status_nonblocking(pid_t check_pid) {
- // Only check 'initialized' and known PID's
- if (check_pid <= 0) { return; }
-
- int wstatus = 0;
- pid_t pid = waitpid(check_pid, &wstatus, WNOHANG);
- // -1 = failure, WNOHANG: 0 = no change
- if (pid <= 0) { return; }
-
- std::cerr << "child process (" << pid << ") ";
-
- if (WIFEXITED(wstatus)) {
- std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl;
- } else if (WIFSIGNALED(wstatus)) {
- std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl;
- } else if (WIFSTOPPED(wstatus)) {
- std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl;
- } else if (WIFCONTINUED(wstatus)) {
- std::cerr << "continued" << std::endl;
- }
-}
-
-using namespace nix;
-
-static Path gcRootsDir;
-static size_t maxMemorySize;
-
-struct MyArgs : MixEvalArgs, MixCommonArgs
-{
- Path releaseExpr;
- bool flake = false;
- bool dryRun = false;
-
- MyArgs() : MixCommonArgs("hydra-eval-jobs")
- {
- addFlag({
- .longName = "gc-roots-dir",
- .description = "garbage collector roots directory",
- .labels = {"path"},
- .handler = {&gcRootsDir}
- });
-
- addFlag({
- .longName = "dry-run",
- .description = "don't create store derivations",
- .handler = {&dryRun, true}
- });
-
- addFlag({
- .longName = "flake",
- .description = "build a flake",
- .handler = {&flake, true}
- });
-
- expectArg("expr", &releaseExpr);
- }
-};
-
-static MyArgs myArgs;
-
-static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
-{
- Strings res;
- std::function rec;
-
- rec = [&](Value & v) {
- state.forceValue(v, noPos);
- if (v.type() == nString)
- res.push_back(v.string.s);
- else if (v.isList())
- for (unsigned int n = 0; n < v.listSize(); ++n)
- rec(*v.listElems()[n]);
- else if (v.type() == nAttrs) {
- auto a = v.attrs->find(state.symbols.create(subAttribute));
- if (a != v.attrs->end())
- res.push_back(std::string(state.forceString(*a->value)));
- }
- };
-
- Value * v = drv.queryMeta(name);
- if (v) rec(*v);
-
- return concatStringsSep(", ", res);
-}
-
-static void worker(
- EvalState & state,
- Bindings & autoArgs,
- AutoCloseFD & to,
- AutoCloseFD & from)
-{
- Value vTop;
-
- if (myArgs.flake) {
- using namespace flake;
-
- auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
-
- auto vFlake = state.allocValue();
-
- auto lockedFlake = lockFlake(state, flakeRef,
- LockFlags {
- .updateLockFile = false,
- .useRegistries = false,
- .allowMutable = false,
- });
-
- callFlake(state, lockedFlake, *vFlake);
-
- auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
- state.forceValue(*vOutputs, noPos);
-
- auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
- if (!aHydraJobs)
- aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
- if (!aHydraJobs)
- throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
-
- vTop = *aHydraJobs->value;
-
- } else {
- state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
- }
-
- auto vRoot = state.allocValue();
- state.autoCallFunction(autoArgs, vTop, *vRoot);
-
- while (true) {
- /* Wait for the master to send us a job name. */
- writeLine(to.get(), "next");
-
- auto s = readLine(from.get());
- if (s == "exit") break;
- if (!hasPrefix(s, "do ")) abort();
- std::string attrPath(s, 3);
-
- debug("worker process %d at '%s'", getpid(), attrPath);
-
- /* Evaluate it and send info back to the master. */
- nlohmann::json reply;
-
- try {
- auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
-
- auto v = state.allocValue();
- state.autoCallFunction(autoArgs, *vTmp, *v);
-
- if (auto drv = getDerivation(state, *v, false)) {
-
- DrvInfo::Outputs outputs = drv->queryOutputs();
-
- if (drv->querySystem() == "unknown")
- throw EvalError("derivation must have a 'system' attribute");
-
- auto drvPath = state.store->printStorePath(drv->requireDrvPath());
-
- nlohmann::json job;
-
- job["nixName"] = drv->queryName();
- job["system"] =drv->querySystem();
- job["drvPath"] = drvPath;
- job["description"] = drv->queryMetaString("description");
- job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
- job["homepage"] = drv->queryMetaString("homepage");
- job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
- job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
- job["timeout"] = drv->queryMetaInt("timeout", 36000);
- job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
- job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
-
- /* If this is an aggregate, then get its constituents. */
- auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
- if (a && state.forceBool(*a->value, *a->pos)) {
- auto a = v->attrs->get(state.symbols.create("constituents"));
- if (!a)
- throw EvalError("derivation must have a ‘constituents’ attribute");
-
-
- PathSet context;
- state.coerceToString(*a->pos, *a->value, context, true, false);
- for (auto & i : context)
- if (i.at(0) == '!') {
- size_t index = i.find("!", 1);
- job["constituents"].push_back(std::string(i, index + 1));
- }
-
- state.forceList(*a->value, *a->pos);
- for (unsigned int n = 0; n < a->value->listSize(); ++n) {
- auto v = a->value->listElems()[n];
- state.forceValue(*v, noPos);
- if (v->type() == nString)
- job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
- }
- }
-
- /* Register the derivation as a GC root. !!! This
- registers roots for jobs that we may have already
- done. */
- auto localStore = state.store.dynamic_pointer_cast();
- if (gcRootsDir != "" && localStore) {
- Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
- if (!pathExists(root))
- localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
- }
-
- nlohmann::json out;
- for (auto & j : outputs)
- // FIXME: handle CA/impure builds.
- if (j.second)
- out[j.first] = state.store->printStorePath(*j.second);
- job["outputs"] = std::move(out);
-
- reply["job"] = std::move(job);
- }
-
- else if (v->type() == nAttrs) {
- auto attrs = nlohmann::json::array();
- StringSet ss;
- for (auto & i : v->attrs->lexicographicOrder()) {
- std::string name(i->name);
- if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
- printError("skipping job with illegal name '%s'", name);
- continue;
- }
- attrs.push_back(name);
- }
- reply["attrs"] = std::move(attrs);
- }
-
- else if (v->type() == nNull)
- ;
-
- else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
-
- } catch (EvalError & e) {
- auto msg = e.msg();
- // Transmits the error we got from the previous evaluation
- // in the JSON output.
- reply["error"] = filterANSIEscapes(msg, true);
- // Don't forget to print it into the STDERR log, this is
- // what's shown in the Hydra UI.
- printError(msg);
- }
-
- writeLine(to.get(), reply.dump());
-
- /* If our RSS exceeds the maximum, exit. The master will
- start a new process. */
- struct rusage r;
- getrusage(RUSAGE_SELF, &r);
- if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
- }
-
- writeLine(to.get(), "restart");
-}
-
-int main(int argc, char * * argv)
-{
- /* Prevent undeclared dependencies in the evaluation via
- $NIX_PATH. */
- unsetenv("NIX_PATH");
-
- return handleExceptions(argv[0], [&]() {
-
- auto config = std::make_unique();
-
- auto nrWorkers = config->getIntOption("evaluator_workers", 1);
- maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
-
- initNix();
- initGC();
-
- myArgs.parseCmdline(argvToStrings(argc, argv));
-
- auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake);
-
- /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
- settings.builders = "";
-
- /* Prevent access to paths outside of the Nix search path and
- to the environment. */
- evalSettings.restrictEval = true;
-
- /* When building a flake, use pure evaluation (no access to
- 'getEnv', 'currentSystem' etc. */
- evalSettings.pureEval = pureEval;
-
- if (myArgs.dryRun) settings.readOnlyMode = true;
-
- if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
-
- if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
-
- struct State
- {
- std::set todo{""};
- std::set active;
- nlohmann::json jobs;
- std::exception_ptr exc;
- };
-
- std::condition_variable wakeup;
-
- Sync state_;
-
- /* Start a handler thread per worker process. */
- auto handler = [&]()
- {
- pid_t pid = -1;
- try {
- AutoCloseFD from, to;
-
- while (true) {
-
- /* Start a new worker process if necessary. */
- if (pid == -1) {
- Pipe toPipe, fromPipe;
- toPipe.create();
- fromPipe.create();
- pid = startProcess(
- [&,
- to{std::make_shared(std::move(fromPipe.writeSide))},
- from{std::make_shared(std::move(toPipe.readSide))}
- ]()
- {
- try {
- EvalState state(myArgs.searchPath, openStore());
- Bindings & autoArgs = *myArgs.getAutoArgs(state);
- worker(state, autoArgs, *to, *from);
- } catch (Error & e) {
- nlohmann::json err;
- auto msg = e.msg();
- err["error"] = filterANSIEscapes(msg, true);
- printError(msg);
- writeLine(to->get(), err.dump());
- // Don't forget to print it into the STDERR log, this is
- // what's shown in the Hydra UI.
- writeLine(to->get(), "restart");
- }
- },
- ProcessOptions { .allowVfork = false });
- from = std::move(fromPipe.readSide);
- to = std::move(toPipe.writeSide);
- debug("created worker process %d", pid);
- }
-
- /* Check whether the existing worker process is still there. */
- auto s = readLine(from.get());
- if (s == "restart") {
- pid = -1;
- continue;
- } else if (s != "next") {
- auto json = nlohmann::json::parse(s);
- throw Error("worker error: %s", (std::string) json["error"]);
- }
-
- /* Wait for a job name to become available. */
- std::string attrPath;
-
- while (true) {
- checkInterrupt();
- auto state(state_.lock());
- if ((state->todo.empty() && state->active.empty()) || state->exc) {
- writeLine(to.get(), "exit");
- return;
- }
- if (!state->todo.empty()) {
- attrPath = *state->todo.begin();
- state->todo.erase(state->todo.begin());
- state->active.insert(attrPath);
- break;
- } else
- state.wait(wakeup);
- }
-
- /* Tell the worker to evaluate it. */
- writeLine(to.get(), "do " + attrPath);
-
- /* Wait for the response. */
- auto response = nlohmann::json::parse(readLine(from.get()));
-
- /* Handle the response. */
- StringSet newAttrs;
-
- if (response.find("job") != response.end()) {
- auto state(state_.lock());
- state->jobs[attrPath] = response["job"];
- }
-
- if (response.find("attrs") != response.end()) {
- for (auto & i : response["attrs"]) {
- auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
- newAttrs.insert(s);
- }
- }
-
- if (response.find("error") != response.end()) {
- auto state(state_.lock());
- state->jobs[attrPath]["error"] = response["error"];
- }
-
- /* Add newly discovered job names to the queue. */
- {
- auto state(state_.lock());
- state->active.erase(attrPath);
- for (auto & s : newAttrs)
- state->todo.insert(s);
- wakeup.notify_all();
- }
- }
- } catch (...) {
- check_pid_status_nonblocking(pid);
- auto state(state_.lock());
- state->exc = std::current_exception();
- wakeup.notify_all();
- }
- };
-
- std::vector threads;
- for (size_t i = 0; i < nrWorkers; i++)
- threads.emplace_back(std::thread(handler));
-
- for (auto & thread : threads)
- thread.join();
-
- auto state(state_.lock());
-
- if (state->exc)
- std::rethrow_exception(state->exc);
-
- /* For aggregate jobs that have named consistuents
- (i.e. constituents that are a job name rather than a
- derivation), look up the referenced job and add it to the
- dependencies of the aggregate derivation. */
- auto store = openStore();
-
- for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
- auto jobName = i.key();
- auto & job = i.value();
-
- auto named = job.find("namedConstituents");
- if (named == job.end()) continue;
-
- std::unordered_map brokenJobs;
- auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state](
- const std::string & childJobName) -> std::optional {
- auto childJob = state->jobs.find(childJobName);
- if (childJob == state->jobs.end()) {
- printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName);
- brokenJobs[childJobName] = "does not exist";
- return std::nullopt;
- }
- if (childJob->find("error") != childJob->end()) {
- std::string error = (*childJob)["error"];
- printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error);
- brokenJobs[childJobName] = error;
- return std::nullopt;
- }
- return *childJob;
- };
-
- if (myArgs.dryRun) {
- for (std::string jobName2 : *named) {
- auto job2 = getNonBrokenJobOrRecordError(jobName2);
- if (!job2) {
- continue;
- }
- std::string drvPath2 = (*job2)["drvPath"];
- job["constituents"].push_back(drvPath2);
- }
- } else {
- auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
- auto drv = store->readDerivation(drvPath);
-
- for (std::string jobName2 : *named) {
- auto job2 = getNonBrokenJobOrRecordError(jobName2);
- if (!job2) {
- continue;
- }
- auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
- auto drv2 = store->readDerivation(drvPath2);
- job["constituents"].push_back(store->printStorePath(drvPath2));
- drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
- }
-
- if (brokenJobs.empty()) {
- std::string drvName(drvPath.name());
- assert(hasSuffix(drvName, drvExtension));
- drvName.resize(drvName.size() - drvExtension.size());
-
- auto hashModulo = hashDerivationModulo(*store, drv, true);
- if (hashModulo.kind != DrvHash::Kind::Regular) continue;
- auto h = hashModulo.hashes.find("out");
- if (h == hashModulo.hashes.end()) continue;
- auto outPath = store->makeOutputPath("out", h->second, drvName);
- drv.env["out"] = store->printStorePath(outPath);
- drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath });
- auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
-
- debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
-
- job["drvPath"] = newDrvPath;
- job["outputs"]["out"] = store->printStorePath(outPath);
- }
- }
-
- job.erase("namedConstituents");
-
- /* Register the derivation as a GC root. !!! This
- registers roots for jobs that we may have already
- done. */
- auto localStore = store.dynamic_pointer_cast();
- if (gcRootsDir != "" && localStore) {
- auto drvPath = job["drvPath"].get();
- Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
- if (!pathExists(root))
- localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
- }
-
- if (!brokenJobs.empty()) {
- std::stringstream ss;
- for (const auto& [jobName, error] : brokenJobs) {
- ss << jobName << ": " << error << "\n";
- }
- job["error"] = ss.str();
- }
- }
-
- std::cout << state->jobs.dump(2) << "\n";
- });
-}
diff --git a/src/hydra-evaluator/Makefile.am b/src/hydra-evaluator/Makefile.am
deleted file mode 100644
index 73638cfe..00000000
--- a/src/hydra-evaluator/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-bin_PROGRAMS = hydra-evaluator
-
-hydra_evaluator_SOURCES = hydra-evaluator.cc
-hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
-hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
diff --git a/src/hydra-evaluator/hydra-evaluator.cc b/src/hydra-evaluator/hydra-evaluator.cc
index 2d7e68d9..52664188 100644
--- a/src/hydra-evaluator/hydra-evaluator.cc
+++ b/src/hydra-evaluator/hydra-evaluator.cc
@@ -1,7 +1,8 @@
#include "db.hh"
#include "hydra-config.hh"
-#include "pool.hh"
-#include "shared.hh"
+#include
+#include
+#include
#include
#include
@@ -37,7 +38,7 @@ class JobsetId {
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
std::string display() const {
- return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
+ return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
}
};
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
@@ -366,6 +367,9 @@ struct Evaluator
printInfo("received jobset event");
}
+ } catch (pqxx::broken_connection & e) {
+ printError("Database connection broken: %s", e.what());
+ std::_Exit(1);
} catch (std::exception & e) {
printError("exception in database monitor thread: %s", e.what());
sleep(30);
@@ -473,6 +477,9 @@ struct Evaluator
while (true) {
try {
loop();
+ } catch (pqxx::broken_connection & e) {
+ printError("Database connection broken: %s", e.what());
+ std::_Exit(1);
} catch (std::exception & e) {
printError("exception in main loop: %s", e.what());
sleep(30);
diff --git a/src/hydra-evaluator/meson.build b/src/hydra-evaluator/meson.build
new file mode 100644
index 00000000..53ddc354
--- /dev/null
+++ b/src/hydra-evaluator/meson.build
@@ -0,0 +1,10 @@
+hydra_evaluator = executable('hydra-evaluator',
+ 'hydra-evaluator.cc',
+ dependencies: [
+ libhydra_dep,
+ nix_util_dep,
+ nix_main_dep,
+ pqxx_dep,
+ ],
+ install: true,
+)
diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am
deleted file mode 100644
index 117112f6..00000000
--- a/src/hydra-queue-runner/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-bin_PROGRAMS = hydra-queue-runner
-
-hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
- builder.cc build-result.cc build-remote.cc \
- hydra-build-result.hh counter.hh state.hh db.hh \
- nar-extractor.cc nar-extractor.hh
-hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core
-hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc
index 57a5f0df..b372e7dd 100644
--- a/src/hydra-queue-runner/build-remote.cc
+++ b/src/hydra-queue-runner/build-remote.cc
@@ -5,107 +5,77 @@
#include
#include
-#include "build-result.hh"
-#include "serve-protocol.hh"
+#include
+#include
+#include
+#include
+#include
#include "state.hh"
-#include "util.hh"
-#include "worker-protocol.hh"
-#include "finally.hh"
-#include "url.hh"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
using namespace nix;
-
-struct Child
+bool ::Machine::isLocalhost() const
{
- Pid pid;
- AutoCloseFD to, from;
-};
-
-
-static void append(Strings & dst, const Strings & src)
-{
- dst.insert(dst.end(), src.begin(), src.end());
+ return storeUri.params.empty() && std::visit(overloaded {
+ [](const StoreReference::Auto &) {
+ return true;
+ },
+ [](const StoreReference::Specified & s) {
+ return
+ (s.scheme == "local" || s.scheme == "unix") ||
+ ((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
+ s.authority == "localhost");
+ },
+ }, storeUri.variant);
}
-static Strings extraStoreArgs(std::string & machine)
+namespace nix::build_remote {
+
+static std::unique_ptr openConnection(
+ ::Machine::ptr machine, SSHMaster & master)
{
- Strings result;
- try {
- auto parsed = parseURL(machine);
- if (parsed.scheme != "ssh") {
- throw SysError("Currently, only (legacy-)ssh stores are supported!");
- }
- machine = parsed.authority.value_or("");
- auto remoteStore = parsed.query.find("remote-store");
- if (remoteStore != parsed.query.end()) {
- result = {"--store", shellEscape(remoteStore->second)};
- }
- } catch (BadURL &) {
- // We just try to continue with `machine->sshName` here for backwards compat.
- }
-
- return result;
-}
-
-static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
-{
- std::string pgmName;
- Pipe to, from;
- to.create();
- from.create();
-
- Strings argv;
+ Strings command = {"nix-store", "--serve", "--write"};
if (machine->isLocalhost()) {
- pgmName = "nix-store";
- argv = {"nix-store", "--builders", "", "--serve", "--write"};
+ command.push_back("--builders");
+ command.push_back("");
} else {
- pgmName = "ssh";
- auto sshName = machine->sshName;
- Strings extraArgs = extraStoreArgs(sshName);
- argv = {"ssh", sshName};
- if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
- if (machine->sshPublicHostKey != "") {
- Path fileName = tmpDir + "/host-key";
- auto p = machine->sshName.find("@");
- std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
- writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
- append(argv, {"-oUserKnownHostsFile=" + fileName});
+ auto remoteStore = machine->storeUri.params.find("remote-store");
+ if (remoteStore != machine->storeUri.params.end()) {
+ command.push_back("--store");
+ command.push_back(escapeShellArgAlways(remoteStore->second));
}
- append(argv,
- { "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
- , "--", "nix-store", "--serve", "--write" });
- append(argv, extraArgs);
}
- child.pid = startProcess([&]() {
- restoreProcessContext();
-
- if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
- throw SysError("cannot dup input pipe to stdin");
-
- if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
- throw SysError("cannot dup output pipe to stdout");
-
- if (dup2(stderrFD, STDERR_FILENO) == -1)
- throw SysError("cannot dup stderr");
-
- execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
-
- throw SysError("cannot start %s", pgmName);
+ auto ret = master.startCommand(std::move(command), {
+ "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
});
- to.readSide = -1;
- from.writeSide = -1;
+ // XXX: determine the actual max value we can use from /proc.
- child.to = to.writeSide.release();
- child.from = from.readSide.release();
+ // FIXME: Should this be upstreamed into `startCommand` in Nix?
+
+ int pipesize = 1024 * 1024;
+
+ fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
+ fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
+
+ return ret;
}
-static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
- FdSource & from, FdSink & to, const StorePathSet & paths,
- bool useSubstitutes = false)
+static void copyClosureTo(
+ ::Machine::Connection & conn,
+ Store & destStore,
+ const StorePathSet & paths,
+ SubstituteFlag useSubstitutes = NoSubstitute)
{
StorePathSet closure;
destStore.computeFSClosure(paths, closure);
@@ -115,13 +85,10 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
garbage-collect paths that are already there. Optionally, ask
the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log
- to << cmdQueryValidPaths << 1 << useSubstitutes;
- worker_proto::write(destStore, to, closure);
- to.flush();
-
/* Get back the set of paths that are already valid on the remote
host. */
- auto present = worker_proto::read(destStore, from, Phantom {});
+ auto present = conn.queryValidPaths(
+ destStore, true, closure, useSubstitutes);
if (present.size() == closure.size()) return;
@@ -133,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
printMsg(lvlDebug, "sending %d missing paths", missing.size());
- std::unique_lock sendLock(sendMutex,
+ std::unique_lock sendLock(conn.machine->state->sendLock,
std::chrono::seconds(600));
- to << cmdImportPaths;
- destStore.exportPaths(missing, to);
- to.flush();
+ conn.to << ServeProto::Command::ImportPaths;
+ destStore.exportPaths(missing, conn.to);
+ conn.to.flush();
- if (readInt(from) != 1)
+ if (readInt(conn.from) != 1)
throw Error("remote machine failed to import closure");
}
// FIXME: use Store::topoSortPaths().
-StorePaths reverseTopoSortPaths(const std::map & paths)
+static StorePaths reverseTopoSortPaths(const std::map & paths)
{
StorePaths sorted;
StorePathSet visited;
@@ -174,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map & paths
return sorted;
}
+static std::pair openLogFile(const std::string & logDir, const StorePath & drvPath)
+{
+ std::string base(drvPath.to_string());
+ auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
+
+ createDirs(dirOf(logFile));
+
+ AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
+ if (!logFD) throw SysError("creating log file ‘%s’", logFile);
+
+ return {std::move(logFile), std::move(logFD)};
+}
+
+static BasicDerivation sendInputs(
+ State & state,
+ Step & step,
+ Store & localStore,
+ Store & destStore,
+ ::Machine::Connection & conn,
+ unsigned int & overhead,
+ counter & nrStepsWaiting,
+ counter & nrStepsCopyingTo
+)
+{
+ /* Replace the input derivations by their output paths to send a
+ minimal closure to the builder.
+
+ `tryResolve` currently does *not* rewrite input addresses, so it
+ is safe to do this in all cases. (It should probably have a mode
+ to do that, however, but we would not use it here.)
+ */
+ BasicDerivation basicDrv = ({
+ auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
+ if (!maybeBasicDrv)
+ throw Error(
+ "the derivation '%s' can’t be resolved. It’s probably "
+ "missing some outputs",
+ localStore.printStorePath(step.drvPath));
+ *maybeBasicDrv;
+ });
+
+ /* Ensure that the inputs exist in the destination store. This is
+ a no-op for regular stores, but for the binary cache store,
+ this will copy the inputs to the binary cache from the local
+ store. */
+ if (&localStore != &destStore) {
+ copyClosure(localStore, destStore,
+ step.drv->inputSrcs,
+ NoRepair, NoCheckSigs, NoSubstitute);
+ }
+
+ {
+ auto mc1 = std::make_shared>(nrStepsWaiting);
+ mc1.reset();
+ MaintainCount mc2(nrStepsCopyingTo);
+
+ printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
+ localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
+
+ auto now1 = std::chrono::steady_clock::now();
+
+ /* Copy the input closure. */
+ if (conn.machine->isLocalhost()) {
+ StorePathSet closure;
+ destStore.computeFSClosure(basicDrv.inputSrcs, closure);
+ copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
+ } else {
+ copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
+ }
+
+ auto now2 = std::chrono::steady_clock::now();
+
+ overhead += std::chrono::duration_cast(now2 - now1).count();
+ }
+
+ return basicDrv;
+}
+
+static BuildResult performBuild(
+ ::Machine::Connection & conn,
+ Store & localStore,
+ StorePath drvPath,
+ const BasicDerivation & drv,
+ const ServeProto::BuildOptions & options,
+ counter & nrStepsBuilding
+)
+{
+ conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
+
+ BuildResult result;
+
+ time_t startTime, stopTime;
+
+ startTime = time(0);
+ {
+ MaintainCount mc(nrStepsBuilding);
+ result = ServeProto::Serialise::read(localStore, conn);
+ }
+ stopTime = time(0);
+
+ if (!result.startTime) {
+ // If the builder gave `startTime = 0`, use our measurements
+ // instead of the builder's.
+ //
+ // Note: this represents the duration of a single round, rather
+ // than all rounds.
+ result.startTime = startTime;
+ result.stopTime = stopTime;
+ }
+
+ // If the protocol was too old to give us `builtOutputs`, initialize
+ // it manually by introspecting the derivation.
+ if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
+ {
+ // If the remote is too old to handle CA derivations, we can’t get this
+ // far anyways
+ assert(drv.type().hasKnownOutputPaths());
+ DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
+ // Since this a `BasicDerivation`, `staticOutputHashes` will not
+ // do any real work.
+ auto outputHashes = staticOutputHashes(localStore, drv);
+ for (auto & [outputName, output] : drvOutputs) {
+ auto outputPath = output.second;
+ // We’ve just asserted that the output paths of the derivation
+ // were known
+ assert(outputPath);
+ auto outputHash = outputHashes.at(outputName);
+ auto drvOutput = DrvOutput { outputHash, outputName };
+ result.builtOutputs.insert_or_assign(
+ std::move(outputName),
+ Realisation { drvOutput, *outputPath });
+ }
+ }
+
+ return result;
+}
+
+static void copyPathFromRemote(
+ ::Machine::Connection & conn,
+ NarMemberDatas & narMembers,
+ Store & localStore,
+ Store & destStore,
+ const ValidPathInfo & info
+)
+{
+ /* Receive the NAR from the remote and add it to the
+ destination store. Meanwhile, extract all the info from the
+ NAR that getBuildOutput() needs. */
+ auto source2 = sinkToSource([&](Sink & sink)
+ {
+ /* Note: we should only send the command to dump the store
+ path to the remote if the NAR is actually going to get read
+ by the destination store, which won't happen if this path
+ is already valid on the destination store. Since this
+ lambda function only gets executed if someone tries to read
+ from source2, we will send the command from here rather
+ than outside the lambda. */
+ conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
+ conn.to.flush();
+
+ TeeSource tee(conn.from, sink);
+ extractNarData(tee, localStore.printStorePath(info.path), narMembers);
+ });
+
+ destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
+}
+
+static void copyPathsFromRemote(
+ ::Machine::Connection & conn,
+ NarMemberDatas & narMembers,
+ Store & localStore,
+ Store & destStore,
+ const std::map & infos
+)
+{
+ auto pathsSorted = reverseTopoSortPaths(infos);
+
+ for (auto & path : pathsSorted) {
+ auto & info = infos.find(path)->second;
+ copyPathFromRemote(
+ conn, narMembers, localStore, destStore,
+ ValidPathInfo { path, info });
+ }
+
+}
+
+}
+
+/* using namespace nix::build_remote; */
+
+void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
+{
+ startTime = buildResult.startTime;
+ stopTime = buildResult.stopTime;
+ timesBuilt = buildResult.timesBuilt;
+ errorMsg = buildResult.errorMsg;
+ isNonDeterministic = buildResult.isNonDeterministic;
+
+ switch ((BuildResult::Status) buildResult.status) {
+ case BuildResult::Built:
+ stepStatus = bsSuccess;
+ break;
+ case BuildResult::Substituted:
+ case BuildResult::AlreadyValid:
+ stepStatus = bsSuccess;
+ isCached = true;
+ break;
+ case BuildResult::PermanentFailure:
+ stepStatus = bsFailed;
+ canCache = true;
+ errorMsg = "";
+ break;
+ case BuildResult::InputRejected:
+ case BuildResult::OutputRejected:
+ stepStatus = bsFailed;
+ canCache = true;
+ break;
+ case BuildResult::TransientFailure:
+ stepStatus = bsFailed;
+ canRetry = true;
+ errorMsg = "";
+ break;
+ case BuildResult::TimedOut:
+ stepStatus = bsTimedOut;
+ errorMsg = "";
+ break;
+ case BuildResult::MiscFailure:
+ stepStatus = bsAborted;
+ canRetry = true;
+ break;
+ case BuildResult::LogLimitExceeded:
+ stepStatus = bsLogLimitExceeded;
+ break;
+ case BuildResult::NotDeterministic:
+ stepStatus = bsNotDeterministic;
+ canRetry = false;
+ canCache = true;
+ break;
+ default:
+ stepStatus = bsAborted;
+ break;
+ }
+
+}
+
+/* Utility guard object to auto-release a semaphore on destruction. */
+template
+class SemaphoreReleaser {
+public:
+ SemaphoreReleaser(T* s) : sem(s) {}
+ ~SemaphoreReleaser() { sem->release(); }
+
+private:
+ T* sem;
+};
void State::buildRemote(ref destStore,
- Machine::ptr machine, Step::ptr step,
- unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
+ std::unique_ptr reservation,
+ ::Machine::ptr machine, Step::ptr step,
+ const ServeProto::BuildOptions & buildOptions,
RemoteResult & result, std::shared_ptr activeStep,
std::function updateStep,
NarMemberDatas & narMembers)
{
assert(BuildResult::TimedOut == 8);
- std::string base(step->drvPath.to_string());
- result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
- AutoDelete autoDelete(result.logFile, false);
-
- createDirs(dirOf(result.logFile));
-
- AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
- if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
-
- nix::Path tmpDir = createTempDir();
- AutoDelete tmpDirDel(tmpDir, true);
+ auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
+ AutoDelete logFileDel(logFile, false);
+ result.logFile = logFile;
try {
updateStep(ssConnecting);
+ auto storeRef = machine->completeStoreReference();
+
+ auto * pSpecified = std::get_if(&storeRef.variant);
+ if (!pSpecified || pSpecified->scheme != "ssh") {
+ throw Error("Currently, only (legacy-)ssh stores are supported!");
+ }
+
+ LegacySSHStoreConfig storeConfig {
+ pSpecified->scheme,
+ pSpecified->authority,
+ storeRef.params
+ };
+
+ auto master = storeConfig.createSSHMaster(
+ false, // no SSH master yet
+ logFD.get());
+
// FIXME: rewrite to use Store.
- Child child;
- openConnection(machine, tmpDir, logFD.get(), child);
+ auto child = build_remote::openConnection(machine, master);
{
auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) throw Error("step cancelled");
- activeStepState->pid = child.pid;
+ activeStepState->pid = child->sshPid;
}
Finally clearPid([&]() {
@@ -222,34 +453,33 @@ void State::buildRemote(ref destStore,
process. Meh. */
});
- FdSource from(child.from.get());
- FdSink to(child.to.get());
+ ::Machine::Connection conn {
+ {
+ .to = child->in.get(),
+ .from = child->out.get(),
+ /* Handshake. */
+ .remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
+ },
+ /*.machine =*/ machine,
+ };
Finally updateStats([&]() {
- bytesReceived += from.read;
- bytesSent += to.written;
+ bytesReceived += conn.from.read;
+ bytesSent += conn.to.written;
});
- /* Handshake. */
- unsigned int remoteVersion;
+ constexpr ServeProto::Version our_version = 0x206;
try {
- to << SERVE_MAGIC_1 << 0x204;
- to.flush();
-
- unsigned int magic = readInt(from);
- if (magic != SERVE_MAGIC_2)
- throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
- remoteVersion = readInt(from);
- if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
- throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
- if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
- throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
-
+ conn.remoteVersion = decltype(conn)::handshake(
+ conn.to,
+ conn.from,
+ our_version,
+ machine->storeUri.render());
} catch (EndOfFile & e) {
- child.pid.wait();
+ child->sshPid.wait();
std::string s = chomp(readFile(result.logFile));
- throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
+ throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
}
{
@@ -263,62 +493,12 @@ void State::buildRemote(ref destStore,
copy the immediate sources of the derivation and the required
outputs of the input derivations. */
updateStep(ssSendingInputs);
+ BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
- StorePathSet inputs;
- BasicDerivation basicDrv(*step->drv);
-
- for (auto & p : step->drv->inputSrcs)
- inputs.insert(p);
-
- for (auto & input : step->drv->inputDrvs) {
- auto drv2 = localStore->readDerivation(input.first);
- for (auto & name : input.second) {
- if (auto i = get(drv2.outputs, name)) {
- auto outPath = i->path(*localStore, drv2.name, name);
- inputs.insert(*outPath);
- basicDrv.inputSrcs.insert(*outPath);
- }
- }
- }
-
- /* Ensure that the inputs exist in the destination store. This is
- a no-op for regular stores, but for the binary cache store,
- this will copy the inputs to the binary cache from the local
- store. */
- if (localStore != std::shared_ptr(destStore)) {
- copyClosure(*localStore, *destStore,
- step->drv->inputSrcs,
- NoRepair, NoCheckSigs, NoSubstitute);
- }
-
- {
- auto mc1 = std::make_shared>(nrStepsWaiting);
- mc1.reset();
- MaintainCount mc2(nrStepsCopyingTo);
-
- printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
- localStore->printStorePath(step->drvPath), machine->sshName);
-
- auto now1 = std::chrono::steady_clock::now();
-
- /* Copy the input closure. */
- if (machine->isLocalhost()) {
- StorePathSet closure;
- destStore->computeFSClosure(inputs, closure);
- copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
- } else {
- copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
- }
-
- auto now2 = std::chrono::steady_clock::now();
-
- result.overhead += std::chrono::duration_cast(now2 - now1).count();
- }
-
- autoDelete.cancel();
+ logFileDel.cancel();
/* Truncate the log to get rid of messages about substitutions
- etc. on the remote system. */
+ etc. on the remote system. */
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
@@ -330,89 +510,21 @@ void State::buildRemote(ref destStore,
/* Do the build. */
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
localStore->printStorePath(step->drvPath),
- machine->sshName);
+ machine->storeUri.render());
updateStep(ssBuilding);
- to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
- writeDerivation(to, *localStore, basicDrv);
- to << maxSilentTime << buildTimeout;
- if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
- to << maxLogSize;
- if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
- to << repeats // == build-repeat
- << step->isDeterministic; // == enforce-determinism
- }
- to.flush();
+ BuildResult buildResult = build_remote::performBuild(
+ conn,
+ *localStore,
+ step->drvPath,
+ resolvedDrv,
+ buildOptions,
+ nrStepsBuilding
+ );
- result.startTime = time(0);
- int res;
- {
- MaintainCount mc(nrStepsBuilding);
- res = readInt(from);
- }
- result.stopTime = time(0);
+ result.updateWithBuildResult(buildResult);
- result.errorMsg = readString(from);
- if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
- result.timesBuilt = readInt(from);
- result.isNonDeterministic = readInt(from);
- auto start = readInt(from);
- auto stop = readInt(from);
- if (start && start) {
- /* Note: this represents the duration of a single
- round, rather than all rounds. */
- result.startTime = start;
- result.stopTime = stop;
- }
- }
- if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
- worker_proto::read(*localStore, from, Phantom {});
- }
- switch ((BuildResult::Status) res) {
- case BuildResult::Built:
- result.stepStatus = bsSuccess;
- break;
- case BuildResult::Substituted:
- case BuildResult::AlreadyValid:
- result.stepStatus = bsSuccess;
- result.isCached = true;
- break;
- case BuildResult::PermanentFailure:
- result.stepStatus = bsFailed;
- result.canCache = true;
- result.errorMsg = "";
- break;
- case BuildResult::InputRejected:
- case BuildResult::OutputRejected:
- result.stepStatus = bsFailed;
- result.canCache = true;
- break;
- case BuildResult::TransientFailure:
- result.stepStatus = bsFailed;
- result.canRetry = true;
- result.errorMsg = "";
- break;
- case BuildResult::TimedOut:
- result.stepStatus = bsTimedOut;
- result.errorMsg = "";
- break;
- case BuildResult::MiscFailure:
- result.stepStatus = bsAborted;
- result.canRetry = true;
- break;
- case BuildResult::LogLimitExceeded:
- result.stepStatus = bsLogLimitExceeded;
- break;
- case BuildResult::NotDeterministic:
- result.stepStatus = bsNotDeterministic;
- result.canRetry = false;
- result.canCache = true;
- break;
- default:
- result.stepStatus = bsAborted;
- break;
- }
if (result.stepStatus != bsSuccess) return;
result.errorMsg = "";
@@ -421,11 +533,32 @@ void State::buildRemote(ref destStore,
get a build log. */
if (result.isCached) {
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
- localStore->printStorePath(step->drvPath), machine->sshName);
+ localStore->printStorePath(step->drvPath), machine->storeUri.render());
unlink(result.logFile.c_str());
result.logFile = "";
}
+ /* Throttle CPU-bound work. Opportunistically skip updating the current
+ * step, since this requires a DB roundtrip. */
+ if (!localWorkThrottler.try_acquire()) {
+ MaintainCount mc(nrStepsWaitingForDownloadSlot);
+ updateStep(ssWaitingForLocalSlot);
+ localWorkThrottler.acquire();
+ }
+ SemaphoreReleaser releaser(&localWorkThrottler);
+
+ /* Once we've started copying outputs, release the machine reservation
+ * so further builds can happen. We do not release the machine earlier
+ * to avoid situations where the queue runner is bottlenecked on
+ * copying outputs and we end up building too many things that we
+ * haven't been able to allow copy slots for. */
+ reservation.reset();
+ wakeDispatcher();
+
+ StorePathSet outputs;
+ for (auto & [_, realisation] : buildResult.builtOutputs)
+ outputs.insert(realisation.outPath);
+
/* Copy the output paths. */
if (!machine->isLocalhost() || localStore != std::shared_ptr(destStore)) {
updateStep(ssReceivingOutputs);
@@ -434,39 +567,10 @@ void State::buildRemote(ref destStore,
auto now1 = std::chrono::steady_clock::now();
- StorePathSet outputs;
- for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
- if (i.second.second)
- outputs.insert(*i.second.second);
- }
+ auto infos = conn.queryPathInfos(*localStore, outputs);
- /* Get info about each output path. */
- std::map infos;
size_t totalNarSize = 0;
- to << cmdQueryPathInfos;
- worker_proto::write(*localStore, to, outputs);
- to.flush();
- while (true) {
- auto storePathS = readString(from);
- if (storePathS == "") break;
- auto deriver = readString(from); // deriver
- auto references = worker_proto::read(*localStore, from, Phantom {});
- readLongLong(from); // download size
- auto narSize = readLongLong(from);
- auto narHash = Hash::parseAny(readString(from), htSHA256);
- auto ca = parseContentAddressOpt(readString(from));
- readStrings(from); // sigs
- ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
- assert(outputs.count(info.path));
- info.references = references;
- info.narSize = narSize;
- totalNarSize += info.narSize;
- info.narHash = narHash;
- info.ca = ca;
- if (deriver != "")
- info.deriver = localStore->parseStorePath(deriver);
- infos.insert_or_assign(info.path, info);
- }
+ for (auto & [_, info] : infos) totalNarSize += info.narSize;
if (totalNarSize > maxOutputSize) {
result.stepStatus = bsNarSizeLimitExceeded;
@@ -475,43 +579,32 @@ void State::buildRemote(ref destStore,
/* Copy each path. */
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
- localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
-
- auto pathsSorted = reverseTopoSortPaths(infos);
-
- for (auto & path : pathsSorted) {
- auto & info = infos.find(path)->second;
-
- /* Receive the NAR from the remote and add it to the
- destination store. Meanwhile, extract all the info from the
- NAR that getBuildOutput() needs. */
- auto source2 = sinkToSource([&](Sink & sink)
- {
- /* Note: we should only send the command to dump the store
- path to the remote if the NAR is actually going to get read
- by the destination store, which won't happen if this path
- is already valid on the destination store. Since this
- lambda function only gets executed if someone tries to read
- from source2, we will send the command from here rather
- than outside the lambda. */
- to << cmdDumpStorePath << localStore->printStorePath(path);
- to.flush();
-
- TeeSource tee(from, sink);
- extractNarData(tee, localStore->printStorePath(path), narMembers);
- });
-
- destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
- }
+ localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
+ build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
auto now2 = std::chrono::steady_clock::now();
result.overhead += std::chrono::duration_cast(now2 - now1).count();
}
+ /* Register the outputs of the newly built drv */
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
+ auto outputHashes = staticOutputHashes(*localStore, *step->drv);
+ for (auto & [outputName, realisation] : buildResult.builtOutputs) {
+ // Register the resolved drv output
+ destStore->registerDrvOutput(realisation);
+
+ // Also register the unresolved one
+ auto unresolvedRealisation = realisation;
+ unresolvedRealisation.signatures.clear();
+ unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
+ destStore->registerDrvOutput(unresolvedRealisation);
+ }
+ }
+
/* Shut down the connection. */
- child.to = -1;
- child.pid.wait();
+ child->in = -1;
+ child->sshPid.wait();
} catch (Error & e) {
/* Disable this machine until a certain period of time has
@@ -525,7 +618,7 @@ void State::buildRemote(ref destStore,
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
info->lastFailure = now;
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
- printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
+ printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
info->disabledUntil = now + std::chrono::seconds(delta);
}
throw;
diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc
index ea8b4a6a..b0695e8b 100644
--- a/src/hydra-queue-runner/build-result.cc
+++ b/src/hydra-queue-runner/build-result.cc
@@ -1,7 +1,7 @@
#include "hydra-build-result.hh"
-#include "store-api.hh"
-#include "util.hh"
-#include "fs-accessor.hh"
+#include
+#include
+#include
#include
@@ -11,18 +11,18 @@ using namespace nix;
BuildOutput getBuildOutput(
nix::ref store,
NarMemberDatas & narMembers,
- const Derivation & drv)
+ const OutputPathMap derivationOutputs)
{
BuildOutput res;
/* Compute the closure size. */
StorePathSet outputs;
StorePathSet closure;
- for (auto & i : drv.outputsAndOptPaths(*store))
- if (i.second.second) {
- store->computeFSClosure(*i.second.second, closure);
- outputs.insert(*i.second.second);
- }
+ for (auto& [outputName, outputPath] : derivationOutputs) {
+ store->computeFSClosure(outputPath, closure);
+ outputs.insert(outputPath);
+ res.outputs.insert({outputName, outputPath});
+ }
for (auto & path : closure) {
auto info = store->queryPathInfo(path);
res.closureSize += info->narSize;
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
if (productsFile == narMembers.end() ||
- productsFile->second.type != FSAccessor::Type::tRegular)
+ productsFile->second.type != SourceAccessor::Type::tRegular)
continue;
assert(productsFile->second.contents);
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
- if (file->second.type == FSAccessor::Type::tRegular) {
+ if (file->second.type == SourceAccessor::Type::tRegular) {
product.isRegular = true;
product.fileSize = file->second.fileSize.value();
product.sha256hash = file->second.sha256.value();
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
/* If no build products were explicitly declared, then add all
outputs as a product of type "nix-build". */
if (!explicitProducts) {
- for (auto & [name, output] : drv.outputs) {
+ for (auto & [name, output] : derivationOutputs) {
BuildProduct product;
- auto outPath = output.path(*store, drv.name, name);
- product.path = store->printStorePath(*outPath);
+ product.path = store->printStorePath(output);
product.type = "nix-build";
product.subtype = name == "out" ? "" : name;
- product.name = outPath->name();
+ product.name = output.name();
auto file = narMembers.find(product.path);
assert(file != narMembers.end());
- if (file->second.type == FSAccessor::Type::tDirectory)
+ if (file->second.type == SourceAccessor::Type::tDirectory)
res.products.push_back(product);
}
}
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
if (file == narMembers.end() ||
- file->second.type != FSAccessor::Type::tRegular)
+ file->second.type != SourceAccessor::Type::tRegular)
continue;
res.releaseName = trim(file->second.contents.value());
// FIXME: validate release name
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
if (file == narMembers.end() ||
- file->second.type != FSAccessor::Type::tRegular)
+ file->second.type != SourceAccessor::Type::tRegular)
continue;
for (auto & line : tokenizeString(file->second.contents.value(), "\n")) {
auto fields = tokenizeString>(line);
diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc
index 37022522..ff0634b1 100644
--- a/src/hydra-queue-runner/builder.cc
+++ b/src/hydra-queue-runner/builder.cc
@@ -2,8 +2,8 @@
#include "state.hh"
#include "hydra-build-result.hh"
-#include "finally.hh"
-#include "binary-cache-store.hh"
+#include
+#include
using namespace nix;
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
}
-void State::builder(MachineReservation::ptr reservation)
+void State::builder(std::unique_ptr reservation)
{
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
@@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
activeSteps_.lock()->erase(activeStep);
});
+ std::string machine = reservation->machine->storeUri.render();
+
try {
auto destStore = getDestStore();
- res = doBuildStep(destStore, reservation, activeStep);
+ // Might release the reservation.
+ res = doBuildStep(destStore, std::move(reservation), activeStep);
} catch (std::exception & e) {
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
- localStore->printStorePath(reservation->step->drvPath),
- reservation->machine->sshName,
+ localStore->printStorePath(activeStep->step->drvPath),
+ machine,
e.what());
}
}
- /* Release the machine and wake up the dispatcher. */
- assert(reservation.unique());
- reservation = 0;
- wakeDispatcher();
-
/* If there was a temporary failure, retry the step after an
exponentially increasing interval. */
Step::ptr step = wstep.lock();
@@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
State::StepResult State::doBuildStep(nix::ref destStore,
- MachineReservation::ptr reservation,
+ std::unique_ptr reservation,
std::shared_ptr activeStep)
{
- auto & step(reservation->step);
- auto & machine(reservation->machine);
+ auto step(reservation->step);
+ auto machine(reservation->machine);
{
auto step_(step->state.lock());
@@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref destStore,
it). */
BuildID buildId;
std::optional buildDrvPath;
- unsigned int maxSilentTime, buildTimeout;
- unsigned int repeats = step->isDeterministic ? 1 : 0;
+ // Other fields set below
+ nix::ServeProto::BuildOptions buildOptions {
+ .maxLogSize = maxLogSize,
+ .nrRepeats = step->isDeterministic ? 1u : 0u,
+ .enforceDeterminism = step->isDeterministic,
+ .keepFailed = false,
+ };
auto conn(dbPool.get());
@@ -134,18 +137,18 @@ State::StepResult State::doBuildStep(nix::ref destStore,
{
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
if (i != jobsetRepeats.end())
- repeats = std::max(repeats, i->second);
+ buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
}
}
if (!build) build = *dependents.begin();
buildId = build->id;
buildDrvPath = build->drvPath;
- maxSilentTime = build->maxSilentTime;
- buildTimeout = build->buildTimeout;
+ buildOptions.maxSilentTime = build->maxSilentTime;
+ buildOptions.buildTimeout = build->buildTimeout;
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
- localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
+ localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
}
if (!buildOneDone)
@@ -173,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
unlink(result.logFile.c_str());
}
} catch (...) {
- ignoreException();
+ ignoreExceptionInDestructor();
}
}
});
@@ -191,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(*conn);
- stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
+ stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
txn.commit();
}
@@ -206,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
try {
/* FIXME: referring builds may have conflicting timeouts. */
- buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
+ buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
} catch (Error & e) {
if (activeStep->state_.lock()->cancelled) {
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
@@ -221,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
if (result.stepStatus == bsSuccess) {
updateStep(ssPostProcessing);
- res = getBuildOutput(destStore, narMembers, *step->drv);
+ res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
}
}
@@ -248,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
/* Finish the step in the database. */
if (stepNr) {
pqxx::work txn(*conn);
- finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
+ finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
txn.commit();
}
@@ -256,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
issue). Retry a number of times. */
if (result.canRetry) {
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
- localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
+ localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
assert(stepNr);
bool retry;
{
@@ -275,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref destStore,
assert(stepNr);
- for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
- if (i.second.second)
- addRoot(*i.second.second);
+ for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
+ if (!optOutputPath)
+ throw Error(
+ "Missing output %s for derivation %d which was supposed to have succeeded",
+ outputName, localStore->printStorePath(step->drvPath));
+ addRoot(*optOutputPath);
}
/* Register success in the database for all Build objects that
@@ -323,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref destStore,
pqxx::work txn(*conn);
for (auto & b : direct) {
- printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
+ printInfo("marking build %1% as succeeded", b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime);
}
@@ -398,7 +404,7 @@ void State::failStep(
Step::ptr step,
BuildID buildId,
const RemoteResult & result,
- Machine::ptr machine,
+ ::Machine::ptr machine,
bool & stepFinished)
{
/* Register failure in the database for all Build objects that
@@ -444,14 +450,14 @@ void State::failStep(
build->finishedInDB)
continue;
createBuildStep(txn,
- 0, build->id, step, machine ? machine->sshName : "",
+ 0, build->id, step, machine ? machine->storeUri.render() : "",
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
}
/* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) {
if (build->finishedInDB) continue;
- printMsg(lvlError, format("marking build %1% as failed") % build->id);
+ printError("marking build %1% as failed", build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id,
diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc
index d2bb3c90..ada25dc6 100644
--- a/src/hydra-queue-runner/dispatcher.cc
+++ b/src/hydra-queue-runner/dispatcher.cc
@@ -2,6 +2,7 @@
#include
#include
#include
+#include
#include "state.hh"
@@ -39,28 +40,34 @@ void State::dispatcher()
printMsg(lvlDebug, "dispatcher woken up");
nrDispatcherWakeups++;
- auto now1 = std::chrono::steady_clock::now();
+ auto t_before_work = std::chrono::steady_clock::now();
auto sleepUntil = doDispatch();
- auto now2 = std::chrono::steady_clock::now();
+ auto t_after_work = std::chrono::steady_clock::now();
- dispatchTimeMs += std::chrono::duration_cast(now2 - now1).count();
+ prom.dispatcher_time_spent_running.Increment(
+ std::chrono::duration_cast(t_after_work - t_before_work).count());
+ dispatchTimeMs += std::chrono::duration_cast(t_after_work - t_before_work).count();
/* Sleep until we're woken up (either because a runnable build
is added, or because a build finishes). */
{
auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) {
- printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
+ debug("dispatcher sleeping for %1%s",
std::chrono::duration_cast(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
}
*dispatcherWakeup_ = false;
}
+ auto t_after_sleep = std::chrono::steady_clock::now();
+ prom.dispatcher_time_spent_waiting.Increment(
+ std::chrono::duration_cast(t_after_sleep - t_after_work).count());
+
} catch (std::exception & e) {
- printMsg(lvlError, format("dispatcher: %1%") % e.what());
+ printError("dispatcher: %s", e.what());
sleep(1);
}
@@ -80,17 +87,124 @@ system_time State::doDispatch()
jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed();
if (s1 != s2)
- printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
- % jobset.first.first % jobset.first.second % s1 % s2);
+ debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
+ jobset.first.first, jobset.first.second, s1, s2);
}
}
+ system_time now = std::chrono::system_clock::now();
+
/* Start steps until we're out of steps or slots. */
auto sleepUntil = system_time::max();
bool keepGoing;
+ /* Sort the runnable steps by priority. Priority is establised
+ as follows (in order of precedence):
+
+ - The global priority of the builds that depend on the
+ step. This allows admins to bump a build to the front of
+ the queue.
+
+ - The lowest used scheduling share of the jobsets depending
+ on the step.
+
+ - The local priority of the build, as set via the build's
+ meta.schedulingPriority field. Note that this is not
+ quite correct: the local priority should only be used to
+ establish priority between builds in the same jobset, but
+ here it's used between steps in different jobsets if they
+ happen to have the same lowest used scheduling share. But
+ that's not very likely.
+
+ - The lowest ID of the builds depending on the step;
+ i.e. older builds take priority over new ones.
+
+ FIXME: O(n lg n); obviously, it would be better to keep a
+ runnable queue sorted by priority. */
+ struct StepInfo
+ {
+ Step::ptr step;
+ bool alreadyScheduled = false;
+
+ /* The lowest share used of any jobset depending on this
+ step. */
+ double lowestShareUsed = 1e9;
+
+ /* Info copied from step->state to ensure that the
+ comparator is a partial ordering (see MachineInfo). */
+ int highestGlobalPriority;
+ int highestLocalPriority;
+ size_t numRequiredSystemFeatures;
+ size_t numRevDeps;
+ BuildID lowestBuildID;
+
+ StepInfo(Step::ptr step, Step::State & step_) : step(step)
+ {
+ for (auto & jobset : step_.jobsets)
+ lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
+ highestGlobalPriority = step_.highestGlobalPriority;
+ highestLocalPriority = step_.highestLocalPriority;
+ numRequiredSystemFeatures = step->requiredSystemFeatures.size();
+ numRevDeps = step_.rdeps.size();
+ lowestBuildID = step_.lowestBuildID;
+ }
+ };
+
+ std::vector runnableSorted;
+
+ struct RunnablePerType
+ {
+ unsigned int count{0};
+ std::chrono::seconds waitTime{0};
+ };
+
+ std::unordered_map runnablePerType;
+
+ {
+ auto runnable_(runnable.lock());
+ runnableSorted.reserve(runnable_->size());
+ for (auto i = runnable_->begin(); i != runnable_->end(); ) {
+ auto step = i->lock();
+
+ /* Remove dead steps. */
+ if (!step) {
+ i = runnable_->erase(i);
+ continue;
+ }
+
+ ++i;
+
+ auto & r = runnablePerType[step->systemType];
+ r.count++;
+
+ /* Skip previously failed steps that aren't ready
+ to be retried. */
+ auto step_(step->state.lock());
+ r.waitTime += std::chrono::duration_cast(now - step_->runnableSince);
+ if (step_->tries > 0 && step_->after > now) {
+ if (step_->after < sleepUntil)
+ sleepUntil = step_->after;
+ continue;
+ }
+
+ runnableSorted.emplace_back(step, *step_);
+ }
+ }
+
+ sort(runnableSorted.begin(), runnableSorted.end(),
+ [](const StepInfo & a, const StepInfo & b)
+ {
+ return
+ a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
+ a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
+ a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
+ a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
+ a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
+ a.lowestBuildID < b.lowestBuildID;
+ });
+
do {
- system_time now = std::chrono::system_clock::now();
+ now = std::chrono::system_clock::now();
/* Copy the currentJobs field of each machine. This is
necessary to ensure that the sort comparator below is
@@ -98,7 +212,7 @@ system_time State::doDispatch()
filter out temporarily disabled machines. */
struct MachineInfo
{
- Machine::ptr machine;
+ ::Machine::ptr machine;
unsigned long currentJobs;
};
std::vector machinesSorted;
@@ -138,104 +252,6 @@ system_time State::doDispatch()
a.currentJobs > b.currentJobs;
});
- /* Sort the runnable steps by priority. Priority is establised
- as follows (in order of precedence):
-
- - The global priority of the builds that depend on the
- step. This allows admins to bump a build to the front of
- the queue.
-
- - The lowest used scheduling share of the jobsets depending
- on the step.
-
- - The local priority of the build, as set via the build's
- meta.schedulingPriority field. Note that this is not
- quite correct: the local priority should only be used to
- establish priority between builds in the same jobset, but
- here it's used between steps in different jobsets if they
- happen to have the same lowest used scheduling share. But
- that's not very likely.
-
- - The lowest ID of the builds depending on the step;
- i.e. older builds take priority over new ones.
-
- FIXME: O(n lg n); obviously, it would be better to keep a
- runnable queue sorted by priority. */
- struct StepInfo
- {
- Step::ptr step;
-
- /* The lowest share used of any jobset depending on this
- step. */
- double lowestShareUsed = 1e9;
-
- /* Info copied from step->state to ensure that the
- comparator is a partial ordering (see MachineInfo). */
- int highestGlobalPriority;
- int highestLocalPriority;
- BuildID lowestBuildID;
-
- StepInfo(Step::ptr step, Step::State & step_) : step(step)
- {
- for (auto & jobset : step_.jobsets)
- lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
- highestGlobalPriority = step_.highestGlobalPriority;
- highestLocalPriority = step_.highestLocalPriority;
- lowestBuildID = step_.lowestBuildID;
- }
- };
-
- std::vector runnableSorted;
-
- struct RunnablePerType
- {
- unsigned int count{0};
- std::chrono::seconds waitTime{0};
- };
-
- std::unordered_map runnablePerType;
-
- {
- auto runnable_(runnable.lock());
- runnableSorted.reserve(runnable_->size());
- for (auto i = runnable_->begin(); i != runnable_->end(); ) {
- auto step = i->lock();
-
- /* Remove dead steps. */
- if (!step) {
- i = runnable_->erase(i);
- continue;
- }
-
- ++i;
-
- auto & r = runnablePerType[step->systemType];
- r.count++;
-
- /* Skip previously failed steps that aren't ready
- to be retried. */
- auto step_(step->state.lock());
- r.waitTime += std::chrono::duration_cast(now - step_->runnableSince);
- if (step_->tries > 0 && step_->after > now) {
- if (step_->after < sleepUntil)
- sleepUntil = step_->after;
- continue;
- }
-
- runnableSorted.emplace_back(step, *step_);
- }
- }
-
- sort(runnableSorted.begin(), runnableSorted.end(),
- [](const StepInfo & a, const StepInfo & b)
- {
- return
- a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
- a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
- a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
- a.lowestBuildID < b.lowestBuildID;
- });
-
/* Find a machine with a free slot and find a step to run
on it. Once we find such a pair, we restart the outer
loop because the machine sorting will have changed. */
@@ -245,12 +261,14 @@ system_time State::doDispatch()
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
for (auto & stepInfo : runnableSorted) {
+ if (stepInfo.alreadyScheduled) continue;
+
auto & step(stepInfo.step);
/* Can this machine do this step? */
if (!mi.machine->supportsStep(step)) {
debug("machine '%s' does not support step '%s' (system type '%s')",
- mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
+ mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
continue;
}
@@ -271,10 +289,12 @@ system_time State::doDispatch()
r.count--;
}
+ stepInfo.alreadyScheduled = true;
+
/* Make a slot reservation and start a thread to
do the build. */
auto builderThread = std::thread(&State::builder, this,
- std::make_shared(*this, step, mi.machine));
+ std::make_unique(*this, step, mi.machine));
builderThread.detach(); // FIXME?
keepGoing = true;
@@ -428,7 +448,7 @@ void Jobset::pruneSteps()
}
-State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
+State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
: state(state), step(step), machine(machine)
{
machine->state->currentJobs++;
diff --git a/src/hydra-queue-runner/hydra-build-result.hh b/src/hydra-queue-runner/hydra-build-result.hh
index a3f71ae9..654bf1be 100644
--- a/src/hydra-queue-runner/hydra-build-result.hh
+++ b/src/hydra-queue-runner/hydra-build-result.hh
@@ -2,9 +2,9 @@
#include
-#include "hash.hh"
-#include "derivations.hh"
-#include "store-api.hh"
+#include
+#include
+#include
#include "nar-extractor.hh"
struct BuildProduct
@@ -36,10 +36,12 @@ struct BuildOutput
std::list products;
+ std::map outputs;
+
std::map metrics;
};
BuildOutput getBuildOutput(
nix::ref store,
NarMemberDatas & narMembers,
- const nix::Derivation & drv);
+ const nix::OutputPathMap derivationOutputs);
diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc
index 723bf223..a4a7f0a7 100644
--- a/src/hydra-queue-runner/hydra-queue-runner.cc
+++ b/src/hydra-queue-runner/hydra-queue-runner.cc
@@ -1,6 +1,7 @@
#include
#include
#include
+#include
#include
#include
@@ -8,27 +9,21 @@
#include
+#include
+
+#include
#include "state.hh"
#include "hydra-build-result.hh"
-#include "store-api.hh"
-#include "remote-store.hh"
+#include
+#include
-#include "globals.hh"
+#include
#include "hydra-config.hh"
-#include "json.hh"
-#include "s3-binary-cache-store.hh"
-#include "shared.hh"
+#include
+#include
using namespace nix;
-
-
-namespace nix {
-
-template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; }
-template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; }
-template<> void toJSON(std::ostream & str, const double & n) { str << n; }
-
-}
+using nlohmann::json;
std::string getEnvOrDie(const std::string & key)
@@ -75,10 +70,31 @@ State::PromMetrics::PromMetrics()
.Register(*registry)
.Add({})
)
- , queue_max_id(
- prometheus::BuildGauge()
- .Name("hydraqueuerunner_queue_max_build_id_info")
- .Help("Maximum build record ID in the queue")
+ , dispatcher_time_spent_running(
+ prometheus::BuildCounter()
+ .Name("hydraqueuerunner_dispatcher_time_spent_running")
+ .Help("Time (in micros) spent running the dispatcher")
+ .Register(*registry)
+ .Add({})
+ )
+ , dispatcher_time_spent_waiting(
+ prometheus::BuildCounter()
+ .Name("hydraqueuerunner_dispatcher_time_spent_waiting")
+ .Help("Time (in micros) spent waiting for the dispatcher to obtain work")
+ .Register(*registry)
+ .Add({})
+ )
+ , queue_monitor_time_spent_running(
+ prometheus::BuildCounter()
+ .Name("hydraqueuerunner_queue_monitor_time_spent_running")
+ .Help("Time (in micros) spent running the queue monitor")
+ .Register(*registry)
+ .Add({})
+ )
+ , queue_monitor_time_spent_waiting(
+ prometheus::BuildCounter()
+ .Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
+ .Help("Time (in micros) spent waiting for the queue monitor to obtain work")
.Register(*registry)
.Add({})
)
@@ -90,6 +106,7 @@ State::State(std::optional metricsAddrOpt)
: config(std::make_unique())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
+ , localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
@@ -140,50 +157,29 @@ void State::parseMachines(const std::string & contents)
oldMachines = *machines_;
}
- for (auto line : tokenizeString(contents, "\n")) {
- line = trim(std::string(line, 0, line.find('#')));
- auto tokens = tokenizeString>(line);
- if (tokens.size() < 3) continue;
- tokens.resize(8);
-
- auto machine = std::make_shared();
- machine->sshName = tokens[0];
- machine->systemTypes = tokenizeString(tokens[1], ",");
- machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
- if (tokens[3] != "")
- machine->maxJobs = string2IntmaxJobs)>(tokens[3]).value();
- else
- machine->maxJobs = 1;
- machine->speedFactor = atof(tokens[4].c_str());
- if (tokens[5] == "-") tokens[5] = "";
- machine->supportedFeatures = tokenizeString(tokens[5], ",");
- if (tokens[6] == "-") tokens[6] = "";
- machine->mandatoryFeatures = tokenizeString(tokens[6], ",");
- for (auto & f : machine->mandatoryFeatures)
- machine->supportedFeatures.insert(f);
- if (tokens[7] != "" && tokens[7] != "-")
- machine->sshPublicHostKey = base64Decode(tokens[7]);
+ for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
+ auto machine = std::make_shared<::Machine>(std::move(machine_));
/* Re-use the State object of the previous machine with the
same name. */
- auto i = oldMachines.find(machine->sshName);
+ auto i = oldMachines.find(machine->storeUri.variant);
if (i == oldMachines.end())
- printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
+ printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
else
- printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
+ printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
machine->state = i == oldMachines.end()
- ? std::make_shared()
+ ? std::make_shared<::Machine::State>()
: i->second->state;
- newMachines[machine->sshName] = machine;
+ newMachines[machine->storeUri.variant] = machine;
}
for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled)
- printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
- /* Add a disabled Machine object to make sure stats are
+ printInfo("removing machine ‘%1%’", m.second->storeUri.render());
+ /* Add a disabled ::Machine object to make sure stats are
maintained. */
- auto machine = std::make_shared(*(m.second));
+ auto machine = std::make_shared<::Machine>(*(m.second));
machine->enabled = false;
newMachines[m.first] = machine;
}
@@ -211,7 +207,7 @@ void State::monitorMachinesFile()
parseMachines("localhost " +
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
- + concatStringsSep(",", settings.systemFeatures.get()));
+ + concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
machinesReadyLock.unlock();
return;
}
@@ -318,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
if (r.affected_rows() == 0) goto restart;
- for (auto & [name, output] : step->drv->outputs)
+ for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
txn.exec_params0
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
- buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
+ buildId, stepNr, name,
+ output
+ ? std::optional { localStore->printStorePath(*output)}
+ : std::nullopt);
if (status == bsBusy)
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
@@ -358,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
assert(result.logFile.find('\t') == std::string::npos);
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
buildId, stepNr, result.logFile));
+
+ if (result.stepStatus == bsSuccess) {
+ // Update the corresponding `BuildStepOutputs` row to add the output path
+ auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
+ assert(res.size());
+ StorePath drvPath = localStore->parseStorePath(res[0].as());
+ // If we've finished building, all the paths should be known
+ for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
+ txn.exec_params0
+ ("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
+ buildId, stepNr, name, localStore->printStorePath(output));
+ }
}
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
- Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
+ Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
{
restart:
auto stepNr = allocBuildStep(txn, build->id);
@@ -463,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
isCachedBuild ? 1 : 0);
+ for (auto & [outputName, outputPath] : res.outputs) {
+ txn.exec_params0
+ ("update BuildOutputs set path = $3 where build = $1 and name = $2",
+ build->id,
+ outputName,
+ localStore->printStorePath(outputPath)
+ );
+ }
+
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
unsigned int productNr = 1;
@@ -474,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
product.type,
product.subtype,
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
- product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
+ product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
product.path,
product.name,
product.defaultPath);
@@ -542,182 +562,174 @@ std::shared_ptr State::acquireGlobalLock()
void State::dumpStatus(Connection & conn)
{
- std::ostringstream out;
+ time_t now = time(0);
+ json statusJson = {
+ {"status", "up"},
+ {"time", time(0)},
+ {"uptime", now - startedAt},
+ {"pid", getpid()},
+ {"nrQueuedBuilds", builds.lock()->size()},
+ {"nrActiveSteps", activeSteps_.lock()->size()},
+ {"nrStepsBuilding", nrStepsBuilding.load()},
+ {"nrStepsCopyingTo", nrStepsCopyingTo.load()},
+ {"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
+ {"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
+ {"nrStepsWaiting", nrStepsWaiting.load()},
+ {"nrUnsupportedSteps", nrUnsupportedSteps.load()},
+ {"bytesSent", bytesSent.load()},
+ {"bytesReceived", bytesReceived.load()},
+ {"nrBuildsRead", nrBuildsRead.load()},
+ {"buildReadTimeMs", buildReadTimeMs.load()},
+ {"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
+ {"nrBuildsDone", nrBuildsDone.load()},
+ {"nrStepsStarted", nrStepsStarted.load()},
+ {"nrStepsDone", nrStepsDone.load()},
+ {"nrRetries", nrRetries.load()},
+ {"maxNrRetries", maxNrRetries.load()},
+ {"nrQueueWakeups", nrQueueWakeups.load()},
+ {"nrDispatcherWakeups", nrDispatcherWakeups.load()},
+ {"dispatchTimeMs", dispatchTimeMs.load()},
+ {"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
+ {"nrDbConnections", dbPool.count()},
+ {"nrActiveDbUpdates", nrActiveDbUpdates.load()},
+ };
{
- JSONObject root(out);
- time_t now = time(0);
- root.attr("status", "up");
- root.attr("time", time(0));
- root.attr("uptime", now - startedAt);
- root.attr("pid", getpid());
- {
- auto builds_(builds.lock());
- root.attr("nrQueuedBuilds", builds_->size());
- }
{
auto steps_(steps.lock());
for (auto i = steps_->begin(); i != steps_->end(); )
if (i->second.lock()) ++i; else i = steps_->erase(i);
- root.attr("nrUnfinishedSteps", steps_->size());
+ statusJson["nrUnfinishedSteps"] = steps_->size();
}
{
auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); )
if (i->lock()) ++i; else i = runnable_->erase(i);
- root.attr("nrRunnableSteps", runnable_->size());
+ statusJson["nrRunnableSteps"] = runnable_->size();
}
- root.attr("nrActiveSteps", activeSteps_.lock()->size());
- root.attr("nrStepsBuilding", nrStepsBuilding);
- root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
- root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
- root.attr("nrStepsWaiting", nrStepsWaiting);
- root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
- root.attr("bytesSent", bytesSent);
- root.attr("bytesReceived", bytesReceived);
- root.attr("nrBuildsRead", nrBuildsRead);
- root.attr("buildReadTimeMs", buildReadTimeMs);
- root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
- root.attr("nrBuildsDone", nrBuildsDone);
- root.attr("nrStepsStarted", nrStepsStarted);
- root.attr("nrStepsDone", nrStepsDone);
- root.attr("nrRetries", nrRetries);
- root.attr("maxNrRetries", maxNrRetries);
if (nrStepsDone) {
- root.attr("totalStepTime", totalStepTime);
- root.attr("totalStepBuildTime", totalStepBuildTime);
- root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
- root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
+ statusJson["totalStepTime"] = totalStepTime.load();
+ statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
+ statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
+ statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
}
- root.attr("nrQueueWakeups", nrQueueWakeups);
- root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
- root.attr("dispatchTimeMs", dispatchTimeMs);
- root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
- root.attr("nrDbConnections", dbPool.count());
- root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
{
- auto nested = root.object("machines");
+ auto machines_json = json::object();
auto machines_(machines.lock());
for (auto & i : *machines_) {
auto & m(i.second);
auto & s(m->state);
- auto nested2 = nested.object(m->sshName);
- nested2.attr("enabled", m->enabled);
-
- {
- auto list = nested2.list("systemTypes");
- for (auto & s : m->systemTypes)
- list.elem(s);
- }
-
- {
- auto list = nested2.list("supportedFeatures");
- for (auto & s : m->supportedFeatures)
- list.elem(s);
- }
-
- {
- auto list = nested2.list("mandatoryFeatures");
- for (auto & s : m->mandatoryFeatures)
- list.elem(s);
- }
-
- nested2.attr("currentJobs", s->currentJobs);
- if (s->currentJobs == 0)
- nested2.attr("idleSince", s->idleSince);
- nested2.attr("nrStepsDone", s->nrStepsDone);
- if (m->state->nrStepsDone) {
- nested2.attr("totalStepTime", s->totalStepTime);
- nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
- nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
- nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
- }
-
auto info(m->state->connectInfo.lock());
- nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
- nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
- nested2.attr("consecutiveFailures", info->consecutiveFailures);
+ json machine = {
+ {"enabled", m->enabled},
+ {"systemTypes", m->systemTypes},
+ {"supportedFeatures", m->supportedFeatures},
+ {"mandatoryFeatures", m->mandatoryFeatures},
+ {"nrStepsDone", s->nrStepsDone.load()},
+ {"currentJobs", s->currentJobs.load()},
+ {"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
+ {"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
+ {"consecutiveFailures", info->consecutiveFailures},
+ };
+
+ if (s->currentJobs == 0)
+ machine["idleSince"] = s->idleSince.load();
+ if (m->state->nrStepsDone) {
+ machine["totalStepTime"] = s->totalStepTime.load();
+ machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
+ machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
+ machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
+ }
+ machines_json[m->storeUri.render()] = machine;
}
+ statusJson["machines"] = machines_json;
}
{
- auto nested = root.object("jobsets");
+ auto jobsets_json = json::object();
auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) {
- auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
- nested2.attr("shareUsed", jobset.second->shareUsed());
- nested2.attr("seconds", jobset.second->getSeconds());
+ jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
+ {"shareUsed", jobset.second->shareUsed()},
+ {"seconds", jobset.second->getSeconds()},
+ };
}
+ statusJson["jobsets"] = jobsets_json;
}
{
- auto nested = root.object("machineTypes");
+ auto machineTypesJson = json::object();
auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) {
- auto nested2 = nested.object(i.first);
- nested2.attr("runnable", i.second.runnable);
- nested2.attr("running", i.second.running);
+ auto machineTypeJson = machineTypesJson[i.first] = {
+ {"runnable", i.second.runnable},
+ {"running", i.second.running},
+ };
if (i.second.runnable > 0)
- nested2.attr("waitTime", i.second.waitTime.count() +
- i.second.runnable * (time(0) - lastDispatcherCheck));
+ machineTypeJson["waitTime"] = i.second.waitTime.count() +
+ i.second.runnable * (time(0) - lastDispatcherCheck);
if (i.second.running == 0)
- nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
+ machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
}
+ statusJson["machineTypes"] = machineTypesJson;
}
auto store = getDestStore();
- auto nested = root.object("store");
-
auto & stats = store->getStats();
- nested.attr("narInfoRead", stats.narInfoRead);
- nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
- nested.attr("narInfoMissing", stats.narInfoMissing);
- nested.attr("narInfoWrite", stats.narInfoWrite);
- nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
- nested.attr("narRead", stats.narRead);
- nested.attr("narReadBytes", stats.narReadBytes);
- nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
- nested.attr("narWrite", stats.narWrite);
- nested.attr("narWriteAverted", stats.narWriteAverted);
- nested.attr("narWriteBytes", stats.narWriteBytes);
- nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
- nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
- nested.attr("narCompressionSavings",
- stats.narWriteBytes
- ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
- : 0.0);
- nested.attr("narCompressionSpeed", // MiB/s
+ statusJson["store"] = {
+ {"narInfoRead", stats.narInfoRead.load()},
+ {"narInfoReadAverted", stats.narInfoReadAverted.load()},
+ {"narInfoMissing", stats.narInfoMissing.load()},
+ {"narInfoWrite", stats.narInfoWrite.load()},
+ {"narInfoCacheSize", stats.pathInfoCacheSize.load()},
+ {"narRead", stats.narRead.load()},
+ {"narReadBytes", stats.narReadBytes.load()},
+ {"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
+ {"narWrite", stats.narWrite.load()},
+ {"narWriteAverted", stats.narWriteAverted.load()},
+ {"narWriteBytes", stats.narWriteBytes.load()},
+ {"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
+ {"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
+ {"narCompressionSavings",
+ stats.narWriteBytes
+ ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
+ : 0.0},
+ {"narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
- : 0.0);
+ : 0.0},
+ };
+#if NIX_WITH_S3_SUPPORT
auto s3Store = dynamic_cast(&*store);
if (s3Store) {
- auto nested2 = nested.object("s3");
auto & s3Stats = s3Store->getS3Stats();
- nested2.attr("put", s3Stats.put);
- nested2.attr("putBytes", s3Stats.putBytes);
- nested2.attr("putTimeMs", s3Stats.putTimeMs);
- nested2.attr("putSpeed",
- s3Stats.putTimeMs
- ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
- : 0.0);
- nested2.attr("get", s3Stats.get);
- nested2.attr("getBytes", s3Stats.getBytes);
- nested2.attr("getTimeMs", s3Stats.getTimeMs);
- nested2.attr("getSpeed",
- s3Stats.getTimeMs
- ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
- : 0.0);
- nested2.attr("head", s3Stats.head);
- nested2.attr("costDollarApprox",
- (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
- + s3Stats.put / 1000.0 * 0.005 +
- + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
+ auto jsonS3 = statusJson["s3"] = {
+ {"put", s3Stats.put.load()},
+ {"putBytes", s3Stats.putBytes.load()},
+ {"putTimeMs", s3Stats.putTimeMs.load()},
+ {"putSpeed",
+ s3Stats.putTimeMs
+ ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
+ : 0.0},
+ {"get", s3Stats.get.load()},
+ {"getBytes", s3Stats.getBytes.load()},
+ {"getTimeMs", s3Stats.getTimeMs.load()},
+ {"getSpeed",
+ s3Stats.getTimeMs
+ ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
+ : 0.0},
+ {"head", s3Stats.head.load()},
+ {"costDollarApprox",
+ (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ + s3Stats.put / 1000.0 * 0.005 +
+ + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
+ };
}
+#endif
}
{
@@ -725,7 +737,7 @@ void State::dumpStatus(Connection & conn)
pqxx::work txn(conn);
// FIXME: use PostgreSQL 9.5 upsert.
txn.exec("delete from SystemStatus where what = 'queue-runner'");
- txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
+ txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
txn.exec("notify status_dumped");
txn.commit();
}
@@ -820,7 +832,7 @@ void State::run(BuildID buildOne)
<< metricsAddr << "/metrics (port " << exposerPort << ")"
<< std::endl;
- Store::Params localParams;
+ Store::Config::Params localParams;
localParams["max-connections"] = "16";
localParams["max-connection-age"] = "600";
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
@@ -902,10 +914,17 @@ void State::run(BuildID buildOne)
while (true) {
try {
auto conn(dbPool.get());
- receiver dumpStatus_(*conn, "dump_status");
- while (true) {
- conn->await_notification();
- dumpStatus(*conn);
+ try {
+ receiver dumpStatus_(*conn, "dump_status");
+ while (true) {
+ conn->await_notification();
+ dumpStatus(*conn);
+ }
+ } catch (pqxx::broken_connection & connEx) {
+ printMsg(lvlError, "main thread: %s", connEx.what());
+ printMsg(lvlError, "main thread: Reconnecting in 10s");
+ conn.markBad();
+ sleep(10);
}
} catch (std::exception & e) {
printMsg(lvlError, "main thread: %s", e.what());
@@ -950,7 +969,6 @@ int main(int argc, char * * argv)
});
settings.verboseBuild = true;
- settings.lockCPU = false;
State state{metricsAddrOpt};
if (status)
diff --git a/src/hydra-queue-runner/meson.build b/src/hydra-queue-runner/meson.build
new file mode 100644
index 00000000..27dad2c0
--- /dev/null
+++ b/src/hydra-queue-runner/meson.build
@@ -0,0 +1,24 @@
+srcs = files(
+ 'builder.cc',
+ 'build-remote.cc',
+ 'build-result.cc',
+ 'dispatcher.cc',
+ 'hydra-queue-runner.cc',
+ 'nar-extractor.cc',
+ 'queue-monitor.cc',
+)
+
+hydra_queue_runner = executable('hydra-queue-runner',
+ 'hydra-queue-runner.cc',
+ srcs,
+ dependencies: [
+ libhydra_dep,
+ nix_util_dep,
+ nix_store_dep,
+ nix_main_dep,
+ pqxx_dep,
+ prom_cpp_core_dep,
+ prom_cpp_pull_dep,
+ ],
+ install: true,
+)
diff --git a/src/hydra-queue-runner/nar-extractor.cc b/src/hydra-queue-runner/nar-extractor.cc
index 9f0eb431..3bf06ef3 100644
--- a/src/hydra-queue-runner/nar-extractor.cc
+++ b/src/hydra-queue-runner/nar-extractor.cc
@@ -1,12 +1,51 @@
#include "nar-extractor.hh"
-#include "archive.hh"
+#include
#include
using namespace nix;
-struct Extractor : ParseSink
+
+struct NarMemberConstructor : CreateRegularFileSink
+{
+ NarMemberData & curMember;
+
+ HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
+
+ std::optional expectedSize;
+
+ NarMemberConstructor(NarMemberData & curMember)
+ : curMember(curMember)
+ { }
+
+ void isExecutable() override
+ {
+ }
+
+ void preallocateContents(uint64_t size) override
+ {
+ expectedSize = size;
+ }
+
+ void operator () (std::string_view data) override
+ {
+ assert(expectedSize);
+ *curMember.fileSize += data.size();
+ hashSink(data);
+ if (curMember.contents) {
+ curMember.contents->append(data);
+ }
+ assert(curMember.fileSize <= expectedSize);
+ if (curMember.fileSize == expectedSize) {
+ auto [hash, len] = hashSink.finish();
+ assert(curMember.fileSize == len);
+ curMember.sha256 = hash;
+ }
+ }
+};
+
+struct Extractor : FileSystemObjectSink
{
std::unordered_set filesToKeep {
"/nix-support/hydra-build-products",
@@ -15,58 +54,40 @@ struct Extractor : ParseSink
};
NarMemberDatas & members;
- NarMemberData * curMember = nullptr;
- Path prefix;
+ std::filesystem::path prefix;
+
+ Path toKey(const CanonPath & path)
+ {
+ std::filesystem::path p = prefix;
+ // Conditional to avoid trailing slash
+ if (!path.isRoot()) p /= path.rel();
+ return p;
+ }
Extractor(NarMemberDatas & members, const Path & prefix)
: members(members), prefix(prefix)
{ }
- void createDirectory(const Path & path) override
+ void createDirectory(const CanonPath & path) override
{
- members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
+ members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
}
- void createRegularFile(const Path & path) override
+ void createRegularFile(const CanonPath & path, std::function func) override
{
- curMember = &members.insert_or_assign(prefix + path, NarMemberData {
- .type = FSAccessor::Type::tRegular,
- .fileSize = 0,
- .contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
- }).first->second;
+ NarMemberConstructor nmc {
+ members.insert_or_assign(toKey(path), NarMemberData {
+ .type = SourceAccessor::Type::tRegular,
+ .fileSize = 0,
+ .contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
+ }).first->second,
+ };
+ func(nmc);
}
- std::optional expectedSize;
- std::unique_ptr hashSink;
-
- void preallocateContents(uint64_t size) override
+ void createSymlink(const CanonPath & path, const std::string & target) override
{
- expectedSize = size;
- hashSink = std::make_unique(htSHA256);
- }
-
- void receiveContents(std::string_view data) override
- {
- assert(expectedSize);
- assert(curMember);
- assert(hashSink);
- *curMember->fileSize += data.size();
- (*hashSink)(data);
- if (curMember->contents) {
- curMember->contents->append(data);
- }
- assert(curMember->fileSize <= expectedSize);
- if (curMember->fileSize == expectedSize) {
- auto [hash, len] = hashSink->finish();
- assert(curMember->fileSize == len);
- curMember->sha256 = hash;
- hashSink.reset();
- }
- }
-
- void createSymlink(const Path & path, const std::string & target) override
- {
- members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
+ members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
}
};
diff --git a/src/hydra-queue-runner/nar-extractor.hh b/src/hydra-queue-runner/nar-extractor.hh
index 45b2706c..0060efe2 100644
--- a/src/hydra-queue-runner/nar-extractor.hh
+++ b/src/hydra-queue-runner/nar-extractor.hh
@@ -1,13 +1,13 @@
#pragma once
-#include "fs-accessor.hh"
-#include "types.hh"
-#include "serialise.hh"
-#include "hash.hh"
+#include
+#include
+#include
+#include
struct NarMemberData
{
- nix::FSAccessor::Type type;
+ nix::SourceAccessor::Type type;
std::optional fileSize;
std::optional contents;
std::optional sha256;
diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc
index 3bde0d99..0785be6f 100644
--- a/src/hydra-queue-runner/queue-monitor.cc
+++ b/src/hydra-queue-runner/queue-monitor.cc
@@ -1,6 +1,8 @@
#include "state.hh"
#include "hydra-build-result.hh"
-#include "globals.hh"
+#include
+#include
+#include
#include
@@ -10,63 +12,74 @@ using namespace nix;
void State::queueMonitor()
{
while (true) {
+ auto conn(dbPool.get());
try {
- queueMonitorLoop();
+ queueMonitorLoop(*conn);
+ } catch (pqxx::broken_connection & e) {
+ printMsg(lvlError, "queue monitor: %s", e.what());
+ printMsg(lvlError, "queue monitor: Reconnecting in 10s");
+ conn.markBad();
+ sleep(10);
} catch (std::exception & e) {
- printMsg(lvlError, format("queue monitor: %1%") % e.what());
+ printError("queue monitor: %s", e.what());
sleep(10); // probably a DB problem, so don't retry right away
}
}
}
-void State::queueMonitorLoop()
+void State::queueMonitorLoop(Connection & conn)
{
- auto conn(dbPool.get());
-
- receiver buildsAdded(*conn, "builds_added");
- receiver buildsRestarted(*conn, "builds_restarted");
- receiver buildsCancelled(*conn, "builds_cancelled");
- receiver buildsDeleted(*conn, "builds_deleted");
- receiver buildsBumped(*conn, "builds_bumped");
- receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
+ receiver buildsAdded(conn, "builds_added");
+ receiver buildsRestarted(conn, "builds_restarted");
+ receiver buildsCancelled(conn, "builds_cancelled");
+ receiver buildsDeleted(conn, "builds_deleted");
+ receiver buildsBumped(conn, "builds_bumped");
+ receiver jobsetSharesChanged(conn, "jobset_shares_changed");
auto destStore = getDestStore();
- unsigned int lastBuildId = 0;
-
bool quit = false;
while (!quit) {
+ auto t_before_work = std::chrono::steady_clock::now();
+
localStore->clearPathInfoCache();
- bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
+ bool done = getQueuedBuilds(conn, destStore);
if (buildOne && buildOneDone) quit = true;
+ auto t_after_work = std::chrono::steady_clock::now();
+
+ prom.queue_monitor_time_spent_running.Increment(
+ std::chrono::duration_cast(t_after_work - t_before_work).count());
+
/* Sleep until we get notification from the database about an
event. */
if (done && !quit) {
- conn->await_notification();
+ conn.await_notification();
nrQueueWakeups++;
} else
- conn->get_notifs();
+ conn.get_notifs();
if (auto lowestId = buildsAdded.get()) {
- lastBuildId = std::min(lastBuildId, static_cast(std::stoul(*lowestId) - 1));
printMsg(lvlTalkative, "got notification: new builds added to the queue");
}
if (buildsRestarted.get()) {
printMsg(lvlTalkative, "got notification: builds restarted");
- lastBuildId = 0; // check all builds
}
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
- processQueueChange(*conn);
+ processQueueChange(conn);
}
if (jobsetSharesChanged.get()) {
printMsg(lvlTalkative, "got notification: jobset shares changed");
- processJobsetSharesChange(*conn);
+ processJobsetSharesChange(conn);
}
+
+ auto t_after_sleep = std::chrono::steady_clock::now();
+ prom.queue_monitor_time_spent_waiting.Increment(
+ std::chrono::duration_cast(t_after_sleep - t_after_work).count());
}
exit(0);
@@ -80,20 +93,18 @@ struct PreviousFailure : public std::exception {
bool State::getQueuedBuilds(Connection & conn,
- ref destStore, unsigned int & lastBuildId)
+ ref destStore)
{
prom.queue_checks_started.Increment();
- printInfo("checking the queue for builds > %d...", lastBuildId);
+ printInfo("checking the queue for builds...");
/* Grab the queued builds from the database, but don't process
them yet (since we don't want a long-running transaction). */
std::vector newIDs;
- std::map newBuildsByID;
+ std::unordered_map newBuildsByID;
std::multimap newBuildsByPath;
- unsigned int newLastBuildId = lastBuildId;
-
{
pqxx::work txn(conn);
@@ -102,17 +113,12 @@ bool State::getQueuedBuilds(Connection & conn,
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
"globalPriority, priority from Builds "
"inner join jobsets on builds.jobset_id = jobsets.id "
- "where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
- lastBuildId);
+ "where finished = 0 order by globalPriority desc, random()");
for (auto const & row : res) {
auto builds_(builds.lock());
BuildID id = row["id"].as();
if (buildOne && id != buildOne) continue;
- if (id > newLastBuildId) {
- newLastBuildId = id;
- prom.queue_max_id.Set(id);
- }
if (builds_->count(id)) continue;
auto build = std::make_shared(
@@ -142,13 +148,13 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
- printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
+ printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
nrAdded++;
newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
- printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
+ printError("aborting GC'ed build %1%", build->id);
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
@@ -192,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn,
if (!res[0].is_null()) propagatedFrom = res[0].as();
if (!propagatedFrom) {
- for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
- if (i.second.second) {
- auto res = txn.exec_params
- ("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
- localStore->printStorePath(*i.second.second));
- if (!res[0][0].is_null()) {
- propagatedFrom = res[0][0].as();
- break;
- }
+ for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
+ constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
+ auto res = optOutputPath
+ ? txn.exec_params(
+ std::string { common } + " and path = $1",
+ localStore->printStorePath(*optOutputPath))
+ : txn.exec_params(
+ std::string { common } + " and drvPath = $1 and name = $2",
+ localStore->printStorePath(ex.step->drvPath),
+ outputName);
+ if (!res[0][0].is_null()) {
+ propagatedFrom = res[0][0].as();
+ break;
}
}
}
@@ -236,12 +246,10 @@ bool State::getQueuedBuilds(Connection & conn,
/* If we didn't get a step, it means the step's outputs are
all valid. So we mark this as a finished, cached build. */
if (!step) {
- auto drv = localStore->readDerivation(build->drvPath);
- BuildOutput res = getBuildOutputCached(conn, destStore, drv);
+ BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
- for (auto & i : drv.outputsAndOptPaths(*localStore))
- if (i.second.second)
- addRoot(*i.second.second);
+ for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
+ addRoot(i.second);
{
auto mc = startDbUpdate();
@@ -292,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn,
try {
createBuild(build);
} catch (Error & e) {
- e.addTrace({}, hintfmt("while loading build %d: ", build->id));
+ e.addTrace({}, HintFmt("while loading build %d: ", build->id));
throw;
}
@@ -302,7 +310,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to ‘runnable’ and wake up
the builder threads. */
- printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
+ printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
for (auto & r : newRunnable)
makeRunnable(r);
@@ -312,15 +320,13 @@ bool State::getQueuedBuilds(Connection & conn,
/* Stop after a certain time to allow priority bumps to be
processed. */
- if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
+ if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
prom.queue_checks_early_exits.Increment();
break;
- }
+ }
}
prom.queue_checks_finished.Increment();
-
- lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
return newBuildsByID.empty();
}
@@ -358,13 +364,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
- printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
+ printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
- printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
+ printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
@@ -399,6 +405,34 @@ void State::processQueueChange(Connection & conn)
}
+std::map> State::getMissingRemotePaths(
+ ref destStore,
+ const std::map> & paths)
+{
+ Sync>> missing_;
+ ThreadPool tp;
+
+ for (auto & [output, maybeOutputPath] : paths) {
+ if (!maybeOutputPath) {
+ auto missing(missing_.lock());
+ missing->insert({output, maybeOutputPath});
+ } else {
+ tp.enqueue([&] {
+ if (!destStore->isValidPath(*maybeOutputPath)) {
+ auto missing(missing_.lock());
+ missing->insert({output, maybeOutputPath});
+ }
+ });
+ }
+ }
+
+ tp.process();
+
+ auto missing(missing_.lock());
+ return *missing;
+}
+
+
Step::ptr State::createStep(ref destStore,
Connection & conn, Build::ptr build, const StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs,
@@ -457,17 +491,23 @@ Step::ptr State::createStep(ref destStore,
it's not runnable yet, and other threads won't make it
runnable while step->created == false. */
step->drv = std::make_unique(localStore->readDerivation(drvPath));
- step->parsedDrv = std::make_unique(drvPath, *step->drv);
+ {
+ auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
+ try {
+ step->drvOptions = std::make_unique(
+ DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
+ } catch (Error & e) {
+ e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
+ throw;
+ }
+ }
- step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
- step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
+ step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
+ step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
step->systemType = step->drv->platform;
{
- auto i = step->drv->env.find("requiredSystemFeatures");
- StringSet features;
- if (i != step->drv->env.end())
- features = step->requiredSystemFeatures = tokenizeString>(i->second);
+ StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
if (step->preferLocalBuild)
features.insert("local");
if (!features.empty()) {
@@ -481,26 +521,40 @@ Step::ptr State::createStep(ref destStore,
throw PreviousFailure{step};
/* Are all outputs valid? */
- bool valid = true;
- DerivationOutputs missing;
- for (auto & i : step->drv->outputs)
- if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
- valid = false;
- missing.insert_or_assign(i.first, i.second);
- }
+ auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
+ std::map> paths;
+ for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
+ auto outputHash = outputHashes.at(outputName);
+ paths.insert({{outputHash, outputName}, maybeOutputPath});
+ }
+
+ auto missing = getMissingRemotePaths(destStore, paths);
+ bool valid = missing.empty();
/* Try to copy the missing paths from the local store or from
substitutes. */
if (!missing.empty()) {
size_t avail = 0;
- for (auto & i : missing) {
- auto path = i.second.path(*localStore, step->drv->name, i.first);
- if (/* localStore != destStore && */ localStore->isValidPath(*path))
+ for (auto & [i, pathOpt] : missing) {
+ // If we don't know the output path from the destination
+ // store, see if the local store can tell us.
+ if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
+ if (auto maybeRealisation = localStore->queryRealisation(i))
+ pathOpt = maybeRealisation->outPath;
+
+ if (!pathOpt) {
+ // No hope of getting the store object if we don't know
+ // the path.
+ continue;
+ }
+ auto & path = *pathOpt;
+
+ if (/* localStore != destStore && */ localStore->isValidPath(path))
avail++;
else if (useSubstitutes) {
SubstitutablePathInfos infos;
- localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
+ localStore->querySubstitutablePathInfos({{path, {}}}, infos);
if (infos.size() == 1)
avail++;
}
@@ -508,26 +562,29 @@ Step::ptr State::createStep(ref destStore,
if (missing.size() == avail) {
valid = true;
- for (auto & i : missing) {
- auto path = i.second.path(*localStore, step->drv->name, i.first);
+ for (auto & [i, pathOpt] : missing) {
+ // If we found everything, then we should know the path
+ // to every missing store object now.
+ assert(pathOpt);
+ auto & path = *pathOpt;
try {
time_t startTime = time(0);
- if (localStore->isValidPath(*path))
+ if (localStore->isValidPath(path))
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
- localStore->printStorePath(*path),
+ localStore->printStorePath(path),
localStore->printStorePath(drvPath));
else {
printInfo("substituting output ‘%1%’ of ‘%2%’",
- localStore->printStorePath(*path),
+ localStore->printStorePath(path),
localStore->printStorePath(drvPath));
- localStore->ensurePath(*path);
+ localStore->ensurePath(path);
// FIXME: should copy directly from substituter to destStore.
}
copyClosure(*localStore, *destStore,
- StorePathSet { *path },
+ StorePathSet { path },
NoRepair, CheckSigs, NoSubstitute);
time_t stopTime = time(0);
@@ -535,13 +592,13 @@ Step::ptr State::createStep(ref destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
- createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
+ createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
txn.commit();
}
} catch (Error & e) {
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
- localStore->printStorePath(*path),
+ localStore->printStorePath(path),
localStore->printStorePath(drvPath),
e.what());
valid = false;
@@ -561,7 +618,7 @@ Step::ptr State::createStep(ref destStore,
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
/* Create steps for the dependencies. */
- for (auto & i : step->drv->inputDrvs) {
+ for (auto & i : step->drv->inputDrvs.map) {
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
if (dep) {
auto step_(step->state.lock());
@@ -640,21 +697,23 @@ void State::processJobsetSharesChange(Connection & conn)
}
-BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::Derivation & drv)
+BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref destStore, const nix::StorePath & drvPath)
{
+ auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
+
{
pqxx::work txn(conn);
- for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
+ for (auto & [name, output] : derivationOutputs) {
auto r = txn.exec_params
("select id, buildStatus, releaseName, closureSize, size from Builds b "
"join BuildOutputs o on b.id = o.build "
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
- localStore->printStorePath(*output.second));
+ localStore->printStorePath(output));
if (r.empty()) continue;
BuildID id = r[0][0].as();
- printMsg(lvlInfo, format("reusing build %d") % id);
+ printInfo("reusing build %d", id);
BuildOutput res;
res.failed = r[0][1].as() == bsFailedWithOutput;
@@ -677,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref
product.fileSize = row[2].as();
}
if (!row[3].is_null())
- product.sha256hash = Hash::parseAny(row[3].as(), htSHA256);
+ product.sha256hash = Hash::parseAny(row[3].as(), HashAlgorithm::SHA256);
if (!row[4].is_null())
product.path = row[4].as();
product.name = row[5].as();
@@ -704,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref
}
NarMemberDatas narMembers;
- return getBuildOutput(destStore, narMembers, drv);
+ return getBuildOutput(destStore, narMembers, derivationOutputs);
}
diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh
index 47e74f55..f7ab7de3 100644
--- a/src/hydra-queue-runner/state.hh
+++ b/src/hydra-queue-runner/state.hh
@@ -6,6 +6,8 @@
#include