Merge branch 'master' into CORE-21733-add-link-to-raw-log
This commit is contained in:
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
@@ -1,14 +1,17 @@
|
||||
name: "Test"
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: cachix/install-nix-action@v16
|
||||
- uses: cachix/install-nix-action@v31
|
||||
#- run: nix flake check
|
||||
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
||||
|
28
.github/workflows/update-flakes.yml
vendored
Normal file
28
.github/workflows/update-flakes.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "Update Flakes"
|
||||
on:
|
||||
schedule:
|
||||
# Run weekly on Monday at 00:00 UTC
|
||||
- cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
update-flakes:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- name: Update flake inputs
|
||||
run: nix flake update
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
commit-message: "flake.lock: Update"
|
||||
title: "Update flake inputs"
|
||||
body: |
|
||||
Automated flake input updates.
|
||||
|
||||
This PR was automatically created by the update-flakes workflow.
|
||||
branch: update-flakes
|
||||
delete-branch: true
|
44
.gitignore
vendored
44
.gitignore
vendored
@@ -1,47 +1,9 @@
|
||||
/.pls_cache
|
||||
*.o
|
||||
*~
|
||||
Makefile
|
||||
Makefile.in
|
||||
.deps
|
||||
.hydra-data
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/configure
|
||||
/depcomp
|
||||
/libtool
|
||||
/ltmain.sh
|
||||
/autom4te.cache
|
||||
/aclocal.m4
|
||||
/missing
|
||||
/install-sh
|
||||
.test_info.*
|
||||
/src/sql/hydra-postgresql.sql
|
||||
/src/sql/hydra-sqlite.sql
|
||||
/src/sql/tmp.sqlite
|
||||
/src/hydra-eval-jobs/hydra-eval-jobs
|
||||
/src/root/static/bootstrap
|
||||
/src/root/static/js/flot
|
||||
/tests
|
||||
/doc/manual/images
|
||||
/doc/manual/manual.html
|
||||
/doc/manual/manual.pdf
|
||||
/t/.bzr*
|
||||
/t/.git*
|
||||
/t/.hg*
|
||||
/t/nix
|
||||
/t/data
|
||||
/t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
/inst
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
.hydra-data
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
config
|
||||
stamp-h1
|
||||
src/hydra-evaluator/hydra-evaluator
|
||||
src/hydra-queue-runner/hydra-queue-runner
|
||||
src/root/static/fontawesome/
|
||||
src/root/static/bootstrap*/
|
||||
|
@@ -1,8 +0,0 @@
|
||||
SUBDIRS = src t doc
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
EXTRA_DIST = hydra-module.nix
|
||||
|
||||
install-data-local: hydra-module.nix
|
||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
37
README.md
37
README.md
@@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta
|
||||
#### Creating A Project
|
||||
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Display name**: `hello`
|
||||
- **Description**: `hello project`
|
||||
|
||||
Click "_Create project_".
|
||||
|
||||
#### Creating A Jobset
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Nix expression**: `examples/hello.nix` in `hydra`
|
||||
- **Check interval**: 60
|
||||
- **Scheduling shares**: 1
|
||||
@@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_
|
||||
|
||||
- **Input name**: `nixpkgs`
|
||||
- **Type**: `Git checkout`
|
||||
- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
|
||||
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
|
||||
|
||||
- **Input name**: `hydra`
|
||||
- **Type**: `Git checkout`
|
||||
@@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
|
||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||
|
||||
```
|
||||
$ nix-build
|
||||
$ nix build
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix-shell
|
||||
$ ./bootstrap
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
$ nix develop
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
### Executing Hydra During Development
|
||||
@@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ nix develop
|
||||
$ # hack hack
|
||||
$ make
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
@@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
|
||||
Then, you can run the tests and the perlcritic linter together with:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
$ make check
|
||||
$ nix develop
|
||||
$ ninja -C build test
|
||||
```
|
||||
|
||||
You can run a single test with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ yath test ./t/foo/bar.t
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||
```
|
||||
|
||||
And you can run just perlcritic with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ make perlcritic
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test perlcritic
|
||||
```
|
||||
|
||||
### JSON API
|
||||
@@ -140,7 +141,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
||||
## Additional Resources
|
||||
|
||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
||||
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||
|
||||
|
85
configure.ac
85
configure.ac
@@ -1,85 +0,0 @@
|
||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
|
||||
AC_CONFIG_AUX_DIR(config)
|
||||
AM_INIT_AUTOMAKE([foreign serial-tests])
|
||||
|
||||
AC_LANG([C++])
|
||||
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
CXXFLAGS+=" -std=c++17"
|
||||
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
||||
[path of the DocBook XSL stylesheets])],
|
||||
[docbookxsl="$withval"],
|
||||
[docbookxsl="/docbook-xsl-missing"])
|
||||
AC_SUBST([docbookxsl])
|
||||
|
||||
|
||||
AC_DEFUN([NEED_PROG],
|
||||
[
|
||||
AC_PATH_PROG($1, $2)
|
||||
if test -z "$$1"; then
|
||||
AC_MSG_ERROR([$2 is required])
|
||||
fi
|
||||
])
|
||||
|
||||
NEED_PROG(perl, perl)
|
||||
|
||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
||||
|
||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
||||
then
|
||||
# This may be executed from within a build chroot, so pacify
|
||||
# `nix-store' instead of letting it choke while trying to mkdir
|
||||
# /nix/var.
|
||||
NIX_STATE_DIR="$TMPDIR"
|
||||
export NIX_STATE_DIR
|
||||
fi
|
||||
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
||||
fi
|
||||
|
||||
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
||||
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
|
||||
CXXFLAGS+=" -include nix/config.h"
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
doc/Makefile
|
||||
doc/manual/Makefile
|
||||
src/Makefile
|
||||
src/hydra-evaluator/Makefile
|
||||
src/hydra-eval-jobs/Makefile
|
||||
src/hydra-queue-runner/Makefile
|
||||
src/sql/Makefile
|
||||
src/ttf/Makefile
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
|
||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||
|
||||
AC_CONFIG_HEADER([hydra-config.h])
|
||||
|
||||
AC_OUTPUT
|
@@ -1,6 +1,6 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).defaultNix
|
||||
|
@@ -1,4 +0,0 @@
|
||||
SUBDIRS = manual
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
@@ -1,6 +0,0 @@
|
||||
MD_FILES = src/*.md
|
||||
|
||||
EXTRA_DIST = $(MD_FILES)
|
||||
|
||||
install: $(MD_FILES)
|
||||
mdbook build . -d $(docdir)
|
36
doc/manual/meson.build
Normal file
36
doc/manual/meson.build
Normal file
@@ -0,0 +1,36 @@
|
||||
srcs = files(
|
||||
'src/SUMMARY.md',
|
||||
'src/about.md',
|
||||
'src/api.md',
|
||||
'src/configuration.md',
|
||||
'src/hacking.md',
|
||||
'src/installation.md',
|
||||
'src/introduction.md',
|
||||
'src/jobs.md',
|
||||
'src/monitoring/README.md',
|
||||
'src/notifications.md',
|
||||
'src/plugins/README.md',
|
||||
'src/plugins/RunCommand.md',
|
||||
'src/plugins/declarative-projects.md',
|
||||
'src/projects.md',
|
||||
'src/webhooks.md',
|
||||
)
|
||||
|
||||
manual = custom_target(
|
||||
'manual',
|
||||
command: [
|
||||
mdbook,
|
||||
'build',
|
||||
'@SOURCE_ROOT@/doc/manual',
|
||||
'-d', meson.current_build_dir() / 'html'
|
||||
],
|
||||
depend_files: srcs,
|
||||
output: ['html'],
|
||||
build_by_default: true,
|
||||
)
|
||||
|
||||
install_subdir(
|
||||
manual.full_path(),
|
||||
install_dir: get_option('datadir') / 'doc/hydra',
|
||||
strip_directory: true,
|
||||
)
|
@@ -51,10 +51,12 @@ base_uri example.com
|
||||
`base_uri` should be your hydra servers proxied URL. If you are using
|
||||
Hydra nixos module then setting `hydraURL` option should be enough.
|
||||
|
||||
If you want to serve Hydra with a prefix path, for example
|
||||
[http://example.com/hydra]() then you need to configure your reverse
|
||||
proxy to pass `X-Request-Base` to hydra, with prefix path as value. For
|
||||
example if you are using nginx, then use configuration similar to
|
||||
You also need to configure your reverse proxy to pass `X-Request-Base`
|
||||
to hydra, with the same value as `base_uri`.
|
||||
This also covers the case of serving Hydra with a prefix path,
|
||||
as in [http://example.com/hydra]().
|
||||
|
||||
For example if you are using nginx, then use configuration similar to
|
||||
following:
|
||||
|
||||
server {
|
||||
@@ -63,8 +65,7 @@ following:
|
||||
.. other configuration ..
|
||||
location /hydra/ {
|
||||
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
|
||||
proxy_pass http://127.0.0.1:3000/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
@@ -74,6 +75,33 @@ following:
|
||||
}
|
||||
}
|
||||
|
||||
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
|
||||
strip off the `/hydra/` part of the URL before passing it to hydra.
|
||||
|
||||
Populating a Cache
|
||||
------------------
|
||||
|
||||
A common use for Hydra is to pre-build and cache derivations which
|
||||
take a long time to build. While it is possible to direcly access the
|
||||
Hydra server's store over SSH, a more scalable option is to upload
|
||||
built derivations to a remote store like an [S3-compatible object
|
||||
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||
the `store_uri` parameter will cause Hydra to sign and upload
|
||||
derivations as they are built:
|
||||
|
||||
```
|
||||
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||
```
|
||||
|
||||
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||
compression on derivations to reduce CPU usage on the server, but
|
||||
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||
build logs because it has better browser support.
|
||||
|
||||
See [`nix help
|
||||
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||
for a description of the store URI format.
|
||||
|
||||
Statsd Configuration
|
||||
--------------------
|
||||
|
||||
@@ -131,8 +159,8 @@ use LDAP to manage roles and users.
|
||||
This is configured by defining the `<ldap>` block in the configuration file.
|
||||
In this block it's possible to configure the authentication plugin in the
|
||||
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||
The documentation for the available settings can be found [here]
|
||||
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
The documentation for the available settings can be found
|
||||
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
|
||||
Note that the bind password (if needed) should be supplied as an included file to
|
||||
prevent it from leaking to the Nix store.
|
||||
@@ -179,13 +207,15 @@ Example configuration:
|
||||
<role_search_options>
|
||||
deref = always
|
||||
</role_search_options>
|
||||
</store>
|
||||
</config>
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
hydra_admin = admin
|
||||
# Allow all users in the dev group to restart jobs and cancel builds
|
||||
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||
dev = eval-jobset
|
||||
dev = restart-jobs
|
||||
dev = cancel-builds
|
||||
dev = cancel-build
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
```
|
||||
|
@@ -12,24 +12,26 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||
and dependencies can be found:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
$ nix develop
|
||||
```
|
||||
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ ./bootstrap
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
foreman:
|
||||
|
||||
```console
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
@@ -39,18 +41,11 @@ $ ./src/script/hydra-server
|
||||
You can run Hydra's test suite with the following:
|
||||
|
||||
```console
|
||||
[nix-shell]$ make check
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
||||
[nix-shell]$ # or run yath directly:
|
||||
[nix-shell]$ yath test
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
||||
$ meson test
|
||||
# to run as many tests as you have cores:
|
||||
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||
```
|
||||
|
||||
When using `yath` instead of `make check`, ensure you have run `make`
|
||||
in the root of the repository at least once.
|
||||
|
||||
**Warning**: Currently, the tests can fail
|
||||
if run with high parallelism [due to an issue in
|
||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||
@@ -67,7 +62,7 @@ will reload the page every time you save.
|
||||
To build Hydra and its dependencies:
|
||||
|
||||
```console
|
||||
$ nix-build release.nix -A build.x86_64-linux
|
||||
$ nix build .#packages.x86_64-linux.default
|
||||
```
|
||||
|
||||
## Development Tasks
|
||||
@@ -92,7 +87,7 @@ On NixOS:
|
||||
|
||||
```nix
|
||||
{
|
||||
nix.trustedUsers = [ "YOURUSER" ];
|
||||
nix.settings.trusted-users = [ "YOURUSER" ];
|
||||
}
|
||||
```
|
||||
|
||||
|
@@ -48,7 +48,7 @@ Getting Nix
|
||||
If your server runs NixOS you are all set to continue with installation
|
||||
of Hydra. Otherwise you first need to install Nix. The latest stable
|
||||
version can be found one [the Nix web
|
||||
site](http://nixos.org/nix/download.html), along with a manual, which
|
||||
site](https://nixos.org/download/), along with a manual, which
|
||||
includes installation instructions.
|
||||
|
||||
Installation
|
||||
|
@@ -42,7 +42,7 @@ Sets CircleCI status.
|
||||
|
||||
## Compress build logs
|
||||
|
||||
Compresses build logs after a build with bzip2.
|
||||
Compresses build logs after a build with bzip2 or zstd.
|
||||
|
||||
### Configuration options
|
||||
|
||||
@@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2.
|
||||
|
||||
Enable log compression
|
||||
|
||||
- `compress_build_logs_compression`
|
||||
|
||||
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||
|
||||
- `compress_build_logs_silent`
|
||||
|
||||
Whether to compress logs silently.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
@@ -172,17 +180,6 @@ Sets Gitlab CI status.
|
||||
|
||||
- `gitlab_authorization.<projectId>`
|
||||
|
||||
## HipChat notification
|
||||
|
||||
Sends hipchat chat notifications when a build finish.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `hipchat.[].jobs`
|
||||
- `hipchat.[].builds`
|
||||
- `hipchat.[].token`
|
||||
- `hipchat.[].notify`
|
||||
|
||||
## InfluxDB notification
|
||||
|
||||
Writes InfluxDB events when a builds finished.
|
||||
|
@@ -378,13 +378,18 @@ This section describes how it can be implemented for `gitea`, but the approach f
|
||||
analogous:
|
||||
|
||||
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
|
||||
* Add it to your `hydra.conf` like this:
|
||||
* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information
|
||||
```
|
||||
<gitea_authorization>
|
||||
your_username=your_token
|
||||
</gitea_authorization>
|
||||
```
|
||||
|
||||
* Include the file in your `hydra.conf` like this:
|
||||
``` nix
|
||||
{
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
your_username=your_token
|
||||
</gitea_authorization>
|
||||
Include /path/to/secret/file
|
||||
'';
|
||||
}
|
||||
```
|
||||
@@ -399,3 +404,10 @@ analogous:
|
||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||
|
||||
Content-addressed derivations
|
||||
-----------------------------
|
||||
|
||||
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||
|
||||
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||
|
@@ -1,9 +1,12 @@
|
||||
# Webhooks
|
||||
|
||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
||||
click on `Add webhook`.
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
@@ -11,3 +14,14 @@ click on `Add webhook`.
|
||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# jobset example file. This file canbe referenced as Nix expression
|
||||
# jobset example file. This file can be referenced as Nix expression
|
||||
# in a jobset configuration along with inputs for nixpkgs and the
|
||||
# repository containing this file.
|
||||
#
|
||||
|
95
flake.lock
generated
95
flake.lock
generated
@@ -1,94 +1,59 @@
|
||||
{
|
||||
"nodes": {
|
||||
"lowdown-src": {
|
||||
"nix": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"newNixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1647380550,
|
||||
"narHash": "sha256-909TI9poX7CIUiFx203WL29YON6m/I6k0ExbZvR7bLM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "6e3ee8957637a60f5072e33d78e05c0f65c54366",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1649172203,
|
||||
"narHash": "sha256-Q3nYaXqbseDOvZrlePKeIrx0/KzqyrtNpxHIUbtFHuI=",
|
||||
"lastModified": 1750777360,
|
||||
"narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "5fe4fe823c193cbb7bfa05a468de91eeab09058d",
|
||||
"rev": "7bb200199705eddd53cb34660a76567c6f1295d9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nix",
|
||||
"type": "indirect"
|
||||
"owner": "NixOS",
|
||||
"ref": "2.29-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1748680938,
|
||||
"narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1645296114,
|
||||
"narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=",
|
||||
"lastModified": 1750736827,
|
||||
"narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1",
|
||||
"rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-21.05-small",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs-regression": {
|
||||
"locked": {
|
||||
"lastModified": 1643052045,
|
||||
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"newNixpkgs": "newNixpkgs",
|
||||
"nix": "nix",
|
||||
"nixpkgs": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
"nix-eval-jobs": "nix-eval-jobs",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@@ -70,7 +70,7 @@ paths:
|
||||
$ref: '#/components/examples/projects-success'
|
||||
|
||||
/api/push:
|
||||
put:
|
||||
post:
|
||||
summary: trigger jobsets
|
||||
parameters:
|
||||
- in: query
|
||||
@@ -533,13 +533,13 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/eval/{build-id}:
|
||||
/eval/{eval-id}:
|
||||
get:
|
||||
summary: Retrieves evaluations identified by build id
|
||||
summary: Retrieves evaluations identified by eval id
|
||||
parameters:
|
||||
- name: build-id
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: build identifier
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
@@ -551,6 +551,24 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEval'
|
||||
|
||||
/eval/{eval-id}/builds:
|
||||
get:
|
||||
summary: Retrieves all builds belonging to an evaluation identified by eval id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
'200':
|
||||
description: builds
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEvalBuilds'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
|
||||
@@ -796,6 +814,13 @@ components:
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/JobsetEvalInput'
|
||||
|
||||
JobsetEvalBuilds:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/Build'
|
||||
|
||||
JobsetOverview:
|
||||
type: array
|
||||
items:
|
||||
@@ -870,7 +895,7 @@ components:
|
||||
description: Size of the produced file
|
||||
type: integer
|
||||
defaultpath:
|
||||
description: This is a Git/Mercurial commit hash or a Subversion revision number
|
||||
description: if path is a directory, the default file relative to path to be served
|
||||
type: string
|
||||
'type':
|
||||
description: Types of build product (user defined)
|
||||
|
26
meson.build
Normal file
26
meson.build
Normal file
@@ -0,0 +1,26 @@
|
||||
project('hydra', 'cpp',
|
||||
version: files('version.txt'),
|
||||
license: 'GPL-3.0',
|
||||
default_options: [
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'cpp_std=c++20',
|
||||
],
|
||||
)
|
||||
|
||||
nix_util_dep = dependency('nix-util', required: true)
|
||||
nix_store_dep = dependency('nix-store', required: true)
|
||||
nix_main_dep = dependency('nix-main', required: true)
|
||||
|
||||
pqxx_dep = dependency('libpqxx', required: true)
|
||||
|
||||
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||
|
||||
mdbook = find_program('mdbook', native: true)
|
||||
perl = find_program('perl', native: true)
|
||||
|
||||
subdir('doc/manual')
|
||||
subdir('nixos-modules')
|
||||
subdir('src')
|
||||
subdir('t')
|
47
nixos-modules/default.nix
Normal file
47
nixos-modules/default.nix
Normal file
@@ -0,0 +1,47 @@
|
||||
{ self }:
|
||||
|
||||
{
|
||||
hydra = { pkgs, lib,... }: {
|
||||
_file = ./default.nix;
|
||||
imports = [ ./hydra.nix ];
|
||||
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
@@ -68,8 +68,6 @@ in
|
||||
|
||||
package = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.hydra;
|
||||
defaultText = literalExpression "pkgs.hydra";
|
||||
description = "The Hydra package.";
|
||||
};
|
||||
|
||||
@@ -228,7 +226,11 @@ in
|
||||
useDefaultShell = true;
|
||||
};
|
||||
|
||||
nix.trustedUsers = [ "hydra-queue-runner" ];
|
||||
nix.settings = {
|
||||
trusted-users = [ "hydra-queue-runner" ];
|
||||
keep-outputs = true;
|
||||
keep-derivations = true;
|
||||
};
|
||||
|
||||
services.hydra-dev.extraConfig =
|
||||
''
|
||||
@@ -256,11 +258,6 @@ in
|
||||
|
||||
environment.variables = hydraEnv;
|
||||
|
||||
nix.extraOptions = ''
|
||||
gc-keep-outputs = true
|
||||
gc-keep-derivations = true
|
||||
'';
|
||||
|
||||
systemd.services.hydra-init =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = optional haveLocalDB "postgresql.service";
|
||||
@@ -268,17 +265,17 @@ in
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||
};
|
||||
path = [ pkgs.utillinux ];
|
||||
path = [ pkgs.util-linux ];
|
||||
preStart = ''
|
||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/www
|
||||
chown hydra-www.hydra ${baseDir}/www
|
||||
chown hydra-www:hydra ${baseDir}/www
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/queue-runner
|
||||
mkdir -m 0750 -p ${baseDir}/build-logs
|
||||
mkdir -m 0750 -p ${baseDir}/runcommand-logs
|
||||
chown hydra-queue-runner.hydra \
|
||||
chown hydra-queue-runner:hydra \
|
||||
${baseDir}/queue-runner \
|
||||
${baseDir}/build-logs \
|
||||
${baseDir}/runcommand-logs
|
||||
@@ -309,7 +306,7 @@ in
|
||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||
fi
|
||||
|
||||
chown hydra.hydra ${cfg.gcRootsDir}
|
||||
chown hydra:hydra ${cfg.gcRootsDir}
|
||||
chmod 2775 ${cfg.gcRootsDir}
|
||||
'';
|
||||
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
|
||||
@@ -341,8 +338,9 @@ in
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
@@ -366,7 +364,7 @@ in
|
||||
requires = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = with pkgs; [ nettools cfg.package jq ];
|
||||
path = with pkgs; [ hostname-debian cfg.package jq ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
|
||||
};
|
||||
@@ -409,6 +407,7 @@ in
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
@@ -459,10 +458,17 @@ in
|
||||
# logs automatically after a step finishes, but this doesn't work
|
||||
# if the queue runner is stopped prematurely.
|
||||
systemd.services.hydra-compress-logs =
|
||||
{ path = [ pkgs.bzip2 ];
|
||||
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||
script =
|
||||
''
|
||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" ]]; then
|
||||
compression="bzip2"
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compression="zstd --rm"
|
||||
fi
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
};
|
4
nixos-modules/meson.build
Normal file
4
nixos-modules/meson.build
Normal file
@@ -0,0 +1,4 @@
|
||||
install_data('hydra.nix',
|
||||
install_dir: get_option('datadir') / 'nix',
|
||||
rename: ['hydra-module.nix'],
|
||||
)
|
306
nixos-tests.nix
Normal file
306
nixos-tests.nix
Normal file
@@ -0,0 +1,306 @@
|
||||
{ forEachSystem, nixpkgs, nixosModules }:
|
||||
|
||||
let
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
nixosModules.hydra
|
||||
nixosModules.hydraTest
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
install = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
notifications = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = { nodes, ... }: ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
gitea = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
server.HTTP_PORT = 3001;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
validate-openapi = forEachSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
}
|
284
package.nix
Normal file
284
package.nix
Normal file
@@ -0,0 +1,284 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fileset
|
||||
|
||||
, rawSrc
|
||||
|
||||
, buildEnv
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nixComponents
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, meson
|
||||
, ninja
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
|
||||
, unzip
|
||||
, libpqxx
|
||||
, top-git
|
||||
, mercurial
|
||||
, darcs
|
||||
, subversion
|
||||
, breezy
|
||||
, openssl
|
||||
, bzip2
|
||||
, libxslt
|
||||
, perl
|
||||
, pixz
|
||||
, boost
|
||||
, postgresql_13
|
||||
, nlohmann_json
|
||||
, prometheus-cpp
|
||||
|
||||
, cacert
|
||||
, foreman
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
, python3
|
||||
|
||||
, openssh
|
||||
, coreutils
|
||||
, findutils
|
||||
, gzip
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
, nix-eval-jobs
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
, cdrkit
|
||||
}:
|
||||
|
||||
let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nixComponents.nix-perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DBIxClassHelpers
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileCopyRecursive
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
]));
|
||||
};
|
||||
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "hydra";
|
||||
inherit version;
|
||||
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./doc
|
||||
./meson.build
|
||||
./nixos-modules
|
||||
./src
|
||||
./t
|
||||
./version.txt
|
||||
./.perlcriticrc
|
||||
]);
|
||||
};
|
||||
|
||||
outputs = [ "out" "doc" ];
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
meson
|
||||
ninja
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nixComponents.nix-cli
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nixComponents.nix-util
|
||||
nixComponents.nix-store
|
||||
nixComponents.nix-main
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
nlohmann_json
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
bzip2
|
||||
darcs
|
||||
foreman
|
||||
top-git
|
||||
mercurial
|
||||
subversion
|
||||
breezy
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
nix-eval-jobs
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
nixComponents.nix-cli
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nixComponents.nix-cli
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
nix-eval-jobs
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
mesonBuildType = "release";
|
||||
|
||||
postPatch = ''
|
||||
patchShebangs .
|
||||
'';
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
mesonCheckFlags = [ "--verbose" ];
|
||||
|
||||
preCheck = ''
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
|
||||
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = {
|
||||
inherit perlDeps;
|
||||
nix = nixComponents.nix-cli;
|
||||
};
|
||||
})
|
@@ -1,6 +1,6 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).shellNix
|
||||
|
@@ -1,3 +0,0 @@
|
||||
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-eval-jobs
|
||||
|
||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
||||
hydra_eval_jobs_LDADD = $(NIX_LIBS)
|
||||
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
|
@@ -1,558 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
#include "common-eval-args.hh"
|
||||
#include "flake/flakeref.hh"
|
||||
#include "flake/flake.hh"
|
||||
#include "attr-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-fs-store.hh"
|
||||
|
||||
#include "hydra-config.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
void check_pid_status_nonblocking(pid_t check_pid) {
|
||||
// Only check 'initialized' and known PID's
|
||||
if (check_pid <= 0) { return; }
|
||||
|
||||
int wstatus = 0;
|
||||
pid_t pid = waitpid(check_pid, &wstatus, WNOHANG);
|
||||
// -1 = failure, WNOHANG: 0 = no change
|
||||
if (pid <= 0) { return; }
|
||||
|
||||
std::cerr << "child process (" << pid << ") ";
|
||||
|
||||
if (WIFEXITED(wstatus)) {
|
||||
std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl;
|
||||
} else if (WIFSIGNALED(wstatus)) {
|
||||
std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl;
|
||||
} else if (WIFSTOPPED(wstatus)) {
|
||||
std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl;
|
||||
} else if (WIFCONTINUED(wstatus)) {
|
||||
std::cerr << "continued" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
using namespace nix;
|
||||
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
bool dryRun = false;
|
||||
|
||||
MyArgs() : MixCommonArgs("hydra-eval-jobs")
|
||||
{
|
||||
addFlag({
|
||||
.longName = "gc-roots-dir",
|
||||
.description = "garbage collector roots directory",
|
||||
.labels = {"path"},
|
||||
.handler = {&gcRootsDir}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "dry-run",
|
||||
.description = "don't create store derivations",
|
||||
.handler = {&dryRun, true}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "flake",
|
||||
.description = "build a flake",
|
||||
.handler = {&flake, true}
|
||||
});
|
||||
|
||||
expectArg("expr", &releaseExpr);
|
||||
}
|
||||
};
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v, noPos);
|
||||
if (v.type() == nString)
|
||||
res.push_back(v.string.s);
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(std::string(state.forceString(*a->value)));
|
||||
}
|
||||
};
|
||||
|
||||
Value * v = drv.queryMeta(name);
|
||||
if (v) rec(*v);
|
||||
|
||||
return concatStringsSep(", ", res);
|
||||
}
|
||||
|
||||
static void worker(
|
||||
EvalState & state,
|
||||
Bindings & autoArgs,
|
||||
AutoCloseFD & to,
|
||||
AutoCloseFD & from)
|
||||
{
|
||||
Value vTop;
|
||||
|
||||
if (myArgs.flake) {
|
||||
using namespace flake;
|
||||
|
||||
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
|
||||
|
||||
auto vFlake = state.allocValue();
|
||||
|
||||
auto lockedFlake = lockFlake(state, flakeRef,
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = false,
|
||||
.allowMutable = false,
|
||||
});
|
||||
|
||||
callFlake(state, lockedFlake, *vFlake);
|
||||
|
||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
||||
state.forceValue(*vOutputs, noPos);
|
||||
|
||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
||||
if (!aHydraJobs)
|
||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
||||
if (!aHydraJobs)
|
||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||
|
||||
vTop = *aHydraJobs->value;
|
||||
|
||||
} else {
|
||||
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
|
||||
}
|
||||
|
||||
auto vRoot = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, vTop, *vRoot);
|
||||
|
||||
while (true) {
|
||||
/* Wait for the master to send us a job name. */
|
||||
writeLine(to.get(), "next");
|
||||
|
||||
auto s = readLine(from.get());
|
||||
if (s == "exit") break;
|
||||
if (!hasPrefix(s, "do ")) abort();
|
||||
std::string attrPath(s, 3);
|
||||
|
||||
debug("worker process %d at '%s'", getpid(), attrPath);
|
||||
|
||||
/* Evaluate it and send info back to the master. */
|
||||
nlohmann::json reply;
|
||||
|
||||
try {
|
||||
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
|
||||
|
||||
auto v = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, *vTmp, *v);
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
|
||||
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
|
||||
|
||||
nlohmann::json job;
|
||||
|
||||
job["nixName"] = drv->queryName();
|
||||
job["system"] =drv->querySystem();
|
||||
job["drvPath"] = drvPath;
|
||||
job["description"] = drv->queryMetaString("description");
|
||||
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
|
||||
job["homepage"] = drv->queryMetaString("homepage");
|
||||
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
|
||||
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
|
||||
job["timeout"] = drv->queryMetaInt("timeout", 36000);
|
||||
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
|
||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, *a->pos)) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
|
||||
|
||||
PathSet context;
|
||||
state.coerceToString(*a->pos, *a->value, context, true, false);
|
||||
for (auto & i : context)
|
||||
if (i.at(0) == '!') {
|
||||
size_t index = i.find("!", 1);
|
||||
job["constituents"].push_back(std::string(i, index + 1));
|
||||
}
|
||||
|
||||
state.forceList(*a->value, *a->pos);
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v, noPos);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
||||
}
|
||||
}
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & j : outputs)
|
||||
// FIXME: handle CA/impure builds.
|
||||
if (j.second)
|
||||
out[j.first] = state.store->printStorePath(*j.second);
|
||||
job["outputs"] = std::move(out);
|
||||
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
else if (v->type() == nAttrs) {
|
||||
auto attrs = nlohmann::json::array();
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder()) {
|
||||
std::string name(i->name);
|
||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
attrs.push_back(name);
|
||||
}
|
||||
reply["attrs"] = std::move(attrs);
|
||||
}
|
||||
|
||||
else if (v->type() == nNull)
|
||||
;
|
||||
|
||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
||||
|
||||
} catch (EvalError & e) {
|
||||
auto msg = e.msg();
|
||||
// Transmits the error we got from the previous evaluation
|
||||
// in the JSON output.
|
||||
reply["error"] = filterANSIEscapes(msg, true);
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError(msg);
|
||||
}
|
||||
|
||||
writeLine(to.get(), reply.dump());
|
||||
|
||||
/* If our RSS exceeds the maximum, exit. The master will
|
||||
start a new process. */
|
||||
struct rusage r;
|
||||
getrusage(RUSAGE_SELF, &r);
|
||||
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
|
||||
}
|
||||
|
||||
writeLine(to.get(), "restart");
|
||||
}
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
/* Prevent undeclared dependencies in the evaluation via
|
||||
$NIX_PATH. */
|
||||
unsetenv("NIX_PATH");
|
||||
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
|
||||
auto config = std::make_unique<HydraConfig>();
|
||||
|
||||
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
|
||||
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
|
||||
|
||||
initNix();
|
||||
initGC();
|
||||
|
||||
myArgs.parseCmdline(argvToStrings(argc, argv));
|
||||
|
||||
auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake);
|
||||
|
||||
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
|
||||
settings.builders = "";
|
||||
|
||||
/* Prevent access to paths outside of the Nix search path and
|
||||
to the environment. */
|
||||
evalSettings.restrictEval = true;
|
||||
|
||||
/* When building a flake, use pure evaluation (no access to
|
||||
'getEnv', 'currentSystem' etc. */
|
||||
evalSettings.pureEval = pureEval;
|
||||
|
||||
if (myArgs.dryRun) settings.readOnlyMode = true;
|
||||
|
||||
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
|
||||
|
||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
||||
|
||||
struct State
|
||||
{
|
||||
std::set<std::string> todo{""};
|
||||
std::set<std::string> active;
|
||||
nlohmann::json jobs;
|
||||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
/* Start a handler thread per worker process. */
|
||||
auto handler = [&]()
|
||||
{
|
||||
pid_t pid = -1;
|
||||
try {
|
||||
AutoCloseFD from, to;
|
||||
|
||||
while (true) {
|
||||
|
||||
/* Start a new worker process if necessary. */
|
||||
if (pid == -1) {
|
||||
Pipe toPipe, fromPipe;
|
||||
toPipe.create();
|
||||
fromPipe.create();
|
||||
pid = startProcess(
|
||||
[&,
|
||||
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
|
||||
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
|
||||
]()
|
||||
{
|
||||
try {
|
||||
EvalState state(myArgs.searchPath, openStore());
|
||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||
worker(state, autoArgs, *to, *from);
|
||||
} catch (Error & e) {
|
||||
nlohmann::json err;
|
||||
auto msg = e.msg();
|
||||
err["error"] = filterANSIEscapes(msg, true);
|
||||
printError(msg);
|
||||
writeLine(to->get(), err.dump());
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
writeLine(to->get(), "restart");
|
||||
}
|
||||
},
|
||||
ProcessOptions { .allowVfork = false });
|
||||
from = std::move(fromPipe.readSide);
|
||||
to = std::move(toPipe.writeSide);
|
||||
debug("created worker process %d", pid);
|
||||
}
|
||||
|
||||
/* Check whether the existing worker process is still there. */
|
||||
auto s = readLine(from.get());
|
||||
if (s == "restart") {
|
||||
pid = -1;
|
||||
continue;
|
||||
} else if (s != "next") {
|
||||
auto json = nlohmann::json::parse(s);
|
||||
throw Error("worker error: %s", (std::string) json["error"]);
|
||||
}
|
||||
|
||||
/* Wait for a job name to become available. */
|
||||
std::string attrPath;
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
auto state(state_.lock());
|
||||
if ((state->todo.empty() && state->active.empty()) || state->exc) {
|
||||
writeLine(to.get(), "exit");
|
||||
return;
|
||||
}
|
||||
if (!state->todo.empty()) {
|
||||
attrPath = *state->todo.begin();
|
||||
state->todo.erase(state->todo.begin());
|
||||
state->active.insert(attrPath);
|
||||
break;
|
||||
} else
|
||||
state.wait(wakeup);
|
||||
}
|
||||
|
||||
/* Tell the worker to evaluate it. */
|
||||
writeLine(to.get(), "do " + attrPath);
|
||||
|
||||
/* Wait for the response. */
|
||||
auto response = nlohmann::json::parse(readLine(from.get()));
|
||||
|
||||
/* Handle the response. */
|
||||
StringSet newAttrs;
|
||||
|
||||
if (response.find("job") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath] = response["job"];
|
||||
}
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.find("error") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath]["error"] = response["error"];
|
||||
}
|
||||
|
||||
/* Add newly discovered job names to the queue. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->active.erase(attrPath);
|
||||
for (auto & s : newAttrs)
|
||||
state->todo.insert(s);
|
||||
wakeup.notify_all();
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
check_pid_status_nonblocking(pid);
|
||||
auto state(state_.lock());
|
||||
state->exc = std::current_exception();
|
||||
wakeup.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (size_t i = 0; i < nrWorkers; i++)
|
||||
threads.emplace_back(std::thread(handler));
|
||||
|
||||
for (auto & thread : threads)
|
||||
thread.join();
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (state->exc)
|
||||
std::rethrow_exception(state->exc);
|
||||
|
||||
/* For aggregate jobs that have named consistuents
|
||||
(i.e. constituents that are a job name rather than a
|
||||
derivation), look up the referenced job and add it to the
|
||||
dependencies of the aggregate derivation. */
|
||||
auto store = openStore();
|
||||
|
||||
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
|
||||
auto jobName = i.key();
|
||||
auto & job = i.value();
|
||||
|
||||
auto named = job.find("namedConstituents");
|
||||
if (named == job.end()) continue;
|
||||
|
||||
std::unordered_map<std::string, std::string> brokenJobs;
|
||||
auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state](
|
||||
const std::string & childJobName) -> std::optional<nlohmann::json> {
|
||||
auto childJob = state->jobs.find(childJobName);
|
||||
if (childJob == state->jobs.end()) {
|
||||
printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName);
|
||||
brokenJobs[childJobName] = "does not exist";
|
||||
return std::nullopt;
|
||||
}
|
||||
if (childJob->find("error") != childJob->end()) {
|
||||
std::string error = (*childJob)["error"];
|
||||
printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error);
|
||||
brokenJobs[childJobName] = error;
|
||||
return std::nullopt;
|
||||
}
|
||||
return *childJob;
|
||||
};
|
||||
|
||||
if (myArgs.dryRun) {
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
||||
if (!job2) {
|
||||
continue;
|
||||
}
|
||||
std::string drvPath2 = (*job2)["drvPath"];
|
||||
job["constituents"].push_back(drvPath2);
|
||||
}
|
||||
} else {
|
||||
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
|
||||
auto drv = store->readDerivation(drvPath);
|
||||
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
||||
if (!job2) {
|
||||
continue;
|
||||
}
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
if (brokenJobs.empty()) {
|
||||
std::string drvName(drvPath.name());
|
||||
assert(hasSuffix(drvName, drvExtension));
|
||||
drvName.resize(drvName.size() - drvExtension.size());
|
||||
|
||||
auto hashModulo = hashDerivationModulo(*store, drv, true);
|
||||
if (hashModulo.kind != DrvHash::Kind::Regular) continue;
|
||||
auto h = hashModulo.hashes.find("out");
|
||||
if (h == hashModulo.hashes.end()) continue;
|
||||
auto outPath = store->makeOutputPath("out", h->second, drvName);
|
||||
drv.env["out"] = store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath });
|
||||
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
|
||||
|
||||
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
|
||||
|
||||
job["drvPath"] = newDrvPath;
|
||||
job["outputs"]["out"] = store->printStorePath(outPath);
|
||||
}
|
||||
}
|
||||
|
||||
job.erase("namedConstituents");
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
auto drvPath = job["drvPath"].get<std::string>();
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
if (!brokenJobs.empty()) {
|
||||
std::stringstream ss;
|
||||
for (const auto& [jobName, error] : brokenJobs) {
|
||||
ss << jobName << ": " << error << "\n";
|
||||
}
|
||||
job["error"] = ss.str();
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << state->jobs.dump(2) << "\n";
|
||||
});
|
||||
}
|
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-evaluator
|
||||
|
||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
@@ -1,7 +1,8 @@
|
||||
#include "db.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include "pool.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
#include <nix/util/signals.hh>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -37,7 +38,7 @@ class JobsetId {
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
@@ -366,6 +367,9 @@ struct Evaluator
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
@@ -473,6 +477,9 @@ struct Evaluator
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
|
10
src/hydra-evaluator/meson.build
Normal file
10
src/hydra-evaluator/meson.build
Normal file
@@ -0,0 +1,10 @@
|
||||
hydra_evaluator = executable('hydra-evaluator',
|
||||
'hydra-evaluator.cc',
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
@@ -1,8 +0,0 @@
|
||||
bin_PROGRAMS = hydra-queue-runner
|
||||
|
||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
||||
builder.cc build-result.cc build-remote.cc \
|
||||
hydra-build-result.hh counter.hh state.hh db.hh \
|
||||
nar-extractor.cc nar-extractor.hh
|
||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core
|
||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
@@ -5,107 +5,77 @@
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "build-result.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/path.hh>
|
||||
#include <nix/store/legacy-ssh-store.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include "state.hh"
|
||||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "finally.hh"
|
||||
#include "url.hh"
|
||||
#include <nix/util/current-process.hh>
|
||||
#include <nix/util/processes.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/ssh.hh>
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/util/url.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct Child
|
||||
bool ::Machine::isLocalhost() const
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
};
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
return storeUri.params.empty() && std::visit(overloaded {
|
||||
[](const StoreReference::Auto &) {
|
||||
return true;
|
||||
},
|
||||
[](const StoreReference::Specified & s) {
|
||||
return
|
||||
(s.scheme == "local" || s.scheme == "unix") ||
|
||||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
|
||||
s.authority == "localhost");
|
||||
},
|
||||
}, storeUri.variant);
|
||||
}
|
||||
|
||||
static Strings extraStoreArgs(std::string & machine)
|
||||
namespace nix::build_remote {
|
||||
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
Strings result;
|
||||
try {
|
||||
auto parsed = parseURL(machine);
|
||||
if (parsed.scheme != "ssh") {
|
||||
throw SysError("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
machine = parsed.authority.value_or("");
|
||||
auto remoteStore = parsed.query.find("remote-store");
|
||||
if (remoteStore != parsed.query.end()) {
|
||||
result = {"--store", shellEscape(remoteStore->second)};
|
||||
}
|
||||
} catch (BadURL &) {
|
||||
// We just try to continue with `machine->sshName` here for backwards compat.
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
||||
{
|
||||
std::string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
Strings argv;
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
pgmName = "ssh";
|
||||
auto sshName = machine->sshName;
|
||||
Strings extraArgs = extraStoreArgs(sshName);
|
||||
argv = {"ssh", sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||
if (remoteStore != machine->storeUri.params.end()) {
|
||||
command.push_back("--store");
|
||||
command.push_back(escapeShellArgAlways(remoteStore->second));
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
append(argv, extraArgs);
|
||||
}
|
||||
|
||||
child.pid = startProcess([&]() {
|
||||
restoreProcessContext();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
child.to = to.writeSide.release();
|
||||
child.from = from.readSide.release();
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
||||
bool useSubstitutes = false)
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
@@ -115,13 +85,10 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
||||
worker_proto::write(destStore, to, closure);
|
||||
to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
@@ -133,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
to << cmdImportPaths;
|
||||
destStore.exportPaths(missing, to);
|
||||
to.flush();
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(from) != 1)
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
@@ -174,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
stepStatus = bsSuccess;
|
||||
isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
default:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||
template <typename T>
|
||||
class SemaphoreReleaser {
|
||||
public:
|
||||
SemaphoreReleaser(T* s) : sem(s) {}
|
||||
~SemaphoreReleaser() { sem->release(); }
|
||||
|
||||
private:
|
||||
T* sem;
|
||||
};
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
std::string base(step->drvPath.to_string());
|
||||
result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
AutoDelete autoDelete(result.logFile, false);
|
||||
|
||||
createDirs(dirOf(result.logFile));
|
||||
|
||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
auto storeRef = machine->completeStoreReference();
|
||||
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
|
||||
LegacySSHStoreConfig storeConfig {
|
||||
pSpecified->scheme,
|
||||
pSpecified->authority,
|
||||
storeRef.params
|
||||
};
|
||||
|
||||
auto master = storeConfig.createSSHMaster(
|
||||
false, // no SSH master yet
|
||||
logFD.get());
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
Child child;
|
||||
openConnection(machine, tmpDir, logFD.get(), child);
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child.pid;
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -222,34 +453,33 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
FdSource from(child.from.get());
|
||||
FdSink to(child.to.get());
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += from.read;
|
||||
bytesSent += to.written;
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
/* Handshake. */
|
||||
unsigned int remoteVersion;
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
to << SERVE_MAGIC_1 << 0x204;
|
||||
to.flush();
|
||||
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
||||
remoteVersion = readInt(from);
|
||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
||||
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->storeUri.render());
|
||||
} catch (EndOfFile & e) {
|
||||
child.pid.wait();
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -263,62 +493,12 @@ void State::buildRemote(ref<Store> destStore,
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
StorePathSet inputs;
|
||||
BasicDerivation basicDrv(*step->drv);
|
||||
|
||||
for (auto & p : step->drv->inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & input : step->drv->inputDrvs) {
|
||||
auto drv2 = localStore->readDerivation(input.first);
|
||||
for (auto & name : input.second) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*localStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (localStore != std::shared_ptr<Store>(destStore)) {
|
||||
copyClosure(*localStore, *destStore,
|
||||
step->drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore->computeFSClosure(inputs, closure);
|
||||
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
autoDelete.cancel();
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
@@ -330,89 +510,21 @@ void State::buildRemote(ref<Store> destStore,
|
||||
/* Do the build. */
|
||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
machine->sshName);
|
||||
machine->storeUri.render());
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
||||
writeDerivation(to, *localStore, basicDrv);
|
||||
to << maxSilentTime << buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
||||
to << maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
to << repeats // == build-repeat
|
||||
<< step->isDeterministic; // == enforce-determinism
|
||||
}
|
||||
to.flush();
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.startTime = time(0);
|
||||
int res;
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
res = readInt(from);
|
||||
}
|
||||
result.stopTime = time(0);
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
result.errorMsg = readString(from);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
result.timesBuilt = readInt(from);
|
||||
result.isNonDeterministic = readInt(from);
|
||||
auto start = readInt(from);
|
||||
auto stop = readInt(from);
|
||||
if (start && start) {
|
||||
/* Note: this represents the duration of a single
|
||||
round, rather than all rounds. */
|
||||
result.startTime = start;
|
||||
result.stopTime = stop;
|
||||
}
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
|
||||
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
|
||||
}
|
||||
switch ((BuildResult::Status) res) {
|
||||
case BuildResult::Built:
|
||||
result.stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
result.stepStatus = bsSuccess;
|
||||
result.isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canRetry = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
result.stepStatus = bsTimedOut;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
result.stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
result.stepStatus = bsNotDeterministic;
|
||||
result.canRetry = false;
|
||||
result.canCache = true;
|
||||
break;
|
||||
default:
|
||||
result.stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
@@ -421,11 +533,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
get a build log. */
|
||||
if (result.isCached) {
|
||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||
unlink(result.logFile.c_str());
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||
* step, since this requires a DB roundtrip. */
|
||||
if (!localWorkThrottler.try_acquire()) {
|
||||
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||
updateStep(ssWaitingForLocalSlot);
|
||||
localWorkThrottler.acquire();
|
||||
}
|
||||
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||
|
||||
/* Once we've started copying outputs, release the machine reservation
|
||||
* so further builds can happen. We do not release the machine earlier
|
||||
* to avoid situations where the queue runner is bottlenecked on
|
||||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
reservation.reset();
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
@@ -434,39 +567,10 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
size_t totalNarSize = 0;
|
||||
to << cmdQueryPathInfos;
|
||||
worker_proto::write(*localStore, to, outputs);
|
||||
to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(from); // deriver
|
||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
||||
readLongLong(from); // download size
|
||||
auto narSize = readLongLong(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
auto ca = parseContentAddressOpt(readString(from));
|
||||
readStrings<StringSet>(from); // sigs
|
||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore->parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
@@ -475,43 +579,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
/* Copy each path. */
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
||||
to.flush();
|
||||
|
||||
TeeSource tee(from, sink);
|
||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
||||
});
|
||||
|
||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child.to = -1;
|
||||
child.pid.wait();
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
@@ -525,7 +618,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
throw;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/util/source-accessor.hh>
|
||||
|
||||
#include <regex>
|
||||
|
||||
@@ -11,18 +11,18 @@ using namespace nix;
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const Derivation & drv)
|
||||
const OutputPathMap derivationOutputs)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
||||
if (i.second.second) {
|
||||
store->computeFSClosure(*i.second.second, closure);
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
|
||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : drv.outputs) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
BuildProduct product;
|
||||
auto outPath = output.path(*store, drv.name, name);
|
||||
product.path = store->printStorePath(*outPath);
|
||||
product.path = store->printStorePath(output);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = outPath->name();
|
||||
product.name = output.name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
res.releaseName = trim(file->second.contents.value());
|
||||
// FIXME: validate release name
|
||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
|
@@ -2,8 +2,8 @@
|
||||
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "finally.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/store/binary-cache-store.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
|
||||
}
|
||||
|
||||
|
||||
void State::builder(MachineReservation::ptr reservation)
|
||||
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||
{
|
||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||
|
||||
@@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
activeSteps_.lock()->erase(activeStep);
|
||||
});
|
||||
|
||||
std::string machine = reservation->machine->storeUri.render();
|
||||
|
||||
try {
|
||||
auto destStore = getDestStore();
|
||||
res = doBuildStep(destStore, reservation, activeStep);
|
||||
// Might release the reservation.
|
||||
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(reservation->step->drvPath),
|
||||
reservation->machine->sshName,
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
machine,
|
||||
e.what());
|
||||
}
|
||||
}
|
||||
|
||||
/* Release the machine and wake up the dispatcher. */
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
wakeDispatcher();
|
||||
|
||||
/* If there was a temporary failure, retry the step after an
|
||||
exponentially increasing interval. */
|
||||
Step::ptr step = wstep.lock();
|
||||
@@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
|
||||
|
||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep)
|
||||
{
|
||||
auto & step(reservation->step);
|
||||
auto & machine(reservation->machine);
|
||||
auto step(reservation->step);
|
||||
auto machine(reservation->machine);
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
@@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -134,18 +137,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
repeats = std::max(repeats, i->second);
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
maxSilentTime = build->maxSilentTime;
|
||||
buildTimeout = build->buildTimeout;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
if (!buildOneDone)
|
||||
@@ -173,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -191,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -206,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
||||
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
@@ -221,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -256,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
issue). Retry a number of times. */
|
||||
if (result.canRetry) {
|
||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||
assert(stepNr);
|
||||
bool retry;
|
||||
{
|
||||
@@ -275,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
@@ -323,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
@@ -398,7 +404,7 @@ void State::failStep(
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
::Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
@@ -444,14 +450,14 @@ void State::failStep(
|
||||
build->finishedInDB)
|
||||
continue;
|
||||
createBuildStep(txn,
|
||||
0, build->id, step, machine ? machine->sshName : "",
|
||||
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||
}
|
||||
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
||||
printError("marking build %1% as failed", build->id);
|
||||
txn.exec_params0
|
||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "state.hh"
|
||||
|
||||
@@ -39,28 +40,34 @@ void State::dispatcher()
|
||||
printMsg(lvlDebug, "dispatcher woken up");
|
||||
nrDispatcherWakeups++;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
auto sleepUntil = doDispatch();
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
prom.dispatcher_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||
|
||||
/* Sleep until we're woken up (either because a runnable build
|
||||
is added, or because a build finishes). */
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
*dispatcherWakeup_ = false;
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.dispatcher_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
||||
printError("dispatcher: %s", e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -80,17 +87,124 @@ system_time State::doDispatch()
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
size_t numRequiredSystemFeatures;
|
||||
size_t numRevDeps;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
|
||||
numRevDeps = step_.rdeps.size();
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
|
||||
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
@@ -98,7 +212,7 @@ system_time State::doDispatch()
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
Machine::ptr machine;
|
||||
::Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
@@ -138,104 +252,6 @@ system_time State::doDispatch()
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
@@ -245,12 +261,14 @@ system_time State::doDispatch()
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
if (!mi.machine->supportsStep(step)) {
|
||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -271,10 +289,12 @@ system_time State::doDispatch()
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
std::make_shared<MachineReservation>(*this, step, mi.machine));
|
||||
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||
builderThread.detach(); // FIXME?
|
||||
|
||||
keepGoing = true;
|
||||
@@ -428,7 +448,7 @@ void Jobset::pruneSteps()
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
@@ -2,9 +2,9 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "hash.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include <nix/util/hash.hh>
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
struct BuildProduct
|
||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::Derivation & drv);
|
||||
const nix::OutputPathMap derivationOutputs);
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
@@ -8,27 +9,21 @@
|
||||
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <nix/util/signals.hh>
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "remote-store.hh"
|
||||
#include <nix/store/store-open.hh>
|
||||
#include <nix/store/remote-store.hh>
|
||||
|
||||
#include "globals.hh"
|
||||
#include <nix/store/globals.hh>
|
||||
#include "hydra-config.hh"
|
||||
#include "json.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/store/s3-binary-cache-store.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
}
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
@@ -75,10 +70,31 @@ State::PromMetrics::PromMetrics()
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_max_id(
|
||||
prometheus::BuildGauge()
|
||||
.Name("hydraqueuerunner_queue_max_build_id_info")
|
||||
.Help("Maximum build record ID in the queue")
|
||||
, dispatcher_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||
.Help("Time (in micros) spent running the dispatcher")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||
.Help("Time (in micros) spent running the queue monitor")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
@@ -90,6 +106,7 @@ State::State(std::optional<std::string> metricsAddrOpt)
|
||||
: config(std::make_unique<HydraConfig>())
|
||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||
@@ -140,50 +157,29 @@ void State::parseMachines(const std::string & contents)
|
||||
oldMachines = *machines_;
|
||||
}
|
||||
|
||||
for (auto line : tokenizeString<Strings>(contents, "\n")) {
|
||||
line = trim(std::string(line, 0, line.find('#')));
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(line);
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
auto machine = std::make_shared<Machine>();
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
|
||||
if (tokens[3] != "")
|
||||
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
|
||||
else
|
||||
machine->maxJobs = 1;
|
||||
machine->speedFactor = atof(tokens[4].c_str());
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
for (auto & f : machine->mandatoryFeatures)
|
||||
machine->supportedFeatures.insert(f);
|
||||
if (tokens[7] != "" && tokens[7] != "-")
|
||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
||||
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
auto i = oldMachines.find(machine->storeUri.variant);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||
else
|
||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<Machine::State>()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
newMachines[machine->storeUri.variant] = machine;
|
||||
}
|
||||
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
||||
/* Add a disabled Machine object to make sure stats are
|
||||
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<Machine>(*(m.second));
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
@@ -211,7 +207,7 @@ void State::monitorMachinesFile()
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
@@ -318,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : step->drv->outputs)
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
||||
buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt);
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
@@ -358,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
buildId, stepNr, name, localStore->printStorePath(output));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
@@ -463,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0);
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec_params0
|
||||
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)
|
||||
);
|
||||
}
|
||||
|
||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||
|
||||
unsigned int productNr = 1;
|
||||
@@ -474,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
@@ -542,182 +562,174 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
std::ostringstream out;
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
JSONObject root(out);
|
||||
time_t now = time(0);
|
||||
root.attr("status", "up");
|
||||
root.attr("time", time(0));
|
||||
root.attr("uptime", now - startedAt);
|
||||
root.attr("pid", getpid());
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
root.attr("nrQueuedBuilds", builds_->size());
|
||||
}
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
root.attr("nrUnfinishedSteps", steps_->size());
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
root.attr("nrRunnableSteps", runnable_->size());
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
}
|
||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsStarted", nrStepsStarted);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
root.attr("nrRetries", nrRetries);
|
||||
root.attr("maxNrRetries", maxNrRetries);
|
||||
if (nrStepsDone) {
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
||||
root.attr("nrDbConnections", dbPool.count());
|
||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
||||
|
||||
{
|
||||
auto nested = root.object("machines");
|
||||
auto machines_json = json::object();
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto nested2 = nested.object(m->sshName);
|
||||
nested2.attr("enabled", m->enabled);
|
||||
|
||||
{
|
||||
auto list = nested2.list("systemTypes");
|
||||
for (auto & s : m->systemTypes)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("supportedFeatures");
|
||||
for (auto & s : m->supportedFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("mandatoryFeatures");
|
||||
for (auto & s : m->mandatoryFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
nested2.attr("currentJobs", s->currentJobs);
|
||||
if (s->currentJobs == 0)
|
||||
nested2.attr("idleSince", s->idleSince);
|
||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
||||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
|
||||
auto info(m->state->connectInfo.lock());
|
||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
machines_json[m->storeUri.render()] = machine;
|
||||
}
|
||||
statusJson["machines"] = machines_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("jobsets");
|
||||
auto jobsets_json = json::object();
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("machineTypes");
|
||||
auto machineTypesJson = json::object();
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto nested2 = nested.object(i.first);
|
||||
nested2.attr("runnable", i.second.runnable);
|
||||
nested2.attr("running", i.second.running);
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
if (i.second.runnable > 0)
|
||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto nested = root.object("store");
|
||||
|
||||
auto & stats = store->getStats();
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto nested2 = nested.object("s3");
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
@@ -725,7 +737,7 @@ void State::dumpStatus(Connection & conn)
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
@@ -820,7 +832,7 @@ void State::run(BuildID buildOne)
|
||||
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||
<< std::endl;
|
||||
|
||||
Store::Params localParams;
|
||||
Store::Config::Params localParams;
|
||||
localParams["max-connections"] = "16";
|
||||
localParams["max-connection-age"] = "600";
|
||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||
@@ -902,10 +914,17 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
@@ -950,7 +969,6 @@ int main(int argc, char * * argv)
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
settings.lockCPU = false;
|
||||
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
|
24
src/hydra-queue-runner/meson.build
Normal file
24
src/hydra-queue-runner/meson.build
Normal file
@@ -0,0 +1,24 @@
|
||||
srcs = files(
|
||||
'builder.cc',
|
||||
'build-remote.cc',
|
||||
'build-result.cc',
|
||||
'dispatcher.cc',
|
||||
'hydra-queue-runner.cc',
|
||||
'nar-extractor.cc',
|
||||
'queue-monitor.cc',
|
||||
)
|
||||
|
||||
hydra_queue_runner = executable('hydra-queue-runner',
|
||||
'hydra-queue-runner.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_store_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
prom_cpp_core_dep,
|
||||
prom_cpp_pull_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
@@ -1,12 +1,51 @@
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
#include "archive.hh"
|
||||
#include <nix/util/archive.hh>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -15,58 +54,40 @@ struct Extractor : ParseSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
std::filesystem::path prefix;
|
||||
|
||||
Path toKey(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::path p = prefix;
|
||||
// Conditional to avoid trailing slash
|
||||
if (!path.isRoot()) p /= path.rel();
|
||||
return p;
|
||||
}
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(toKey(path), NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const std::string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
#include "types.hh"
|
||||
#include "serialise.hh"
|
||||
#include "hash.hh"
|
||||
#include <nix/util/source-accessor.hh>
|
||||
#include <nix/util/types.hh>
|
||||
#include <nix/util/serialise.hh>
|
||||
#include <nix/util/hash.hh>
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::FSAccessor::Type type;
|
||||
nix::SourceAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "globals.hh"
|
||||
#include <nix/store/globals.hh>
|
||||
#include <nix/store/parsed-derivations.hh>
|
||||
#include <nix/util/thread-pool.hh>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
@@ -10,63 +12,74 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop();
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop()
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
unsigned int lastBuildId = 0;
|
||||
|
||||
bool quit = false;
|
||||
while (!quit) {
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(conn, destStore);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.queue_monitor_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done && !quit) {
|
||||
conn->await_notification();
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn->get_notifs();
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||
}
|
||||
if (buildsRestarted.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||
lastBuildId = 0; // check all builds
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(*conn);
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(*conn);
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.queue_monitor_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
}
|
||||
|
||||
exit(0);
|
||||
@@ -80,20 +93,18 @@ struct PreviousFailure : public std::exception {
|
||||
|
||||
|
||||
bool State::getQueuedBuilds(Connection & conn,
|
||||
ref<Store> destStore, unsigned int & lastBuildId)
|
||||
ref<Store> destStore)
|
||||
{
|
||||
prom.queue_checks_started.Increment();
|
||||
|
||||
printInfo("checking the queue for builds > %d...", lastBuildId);
|
||||
printInfo("checking the queue for builds...");
|
||||
|
||||
/* Grab the queued builds from the database, but don't process
|
||||
them yet (since we don't want a long-running transaction). */
|
||||
std::vector<BuildID> newIDs;
|
||||
std::map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||
|
||||
unsigned int newLastBuildId = lastBuildId;
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
@@ -102,17 +113,12 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||
"globalPriority, priority from Builds "
|
||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
|
||||
lastBuildId);
|
||||
"where finished = 0 order by globalPriority desc, random()");
|
||||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (id > newLastBuildId) {
|
||||
newLastBuildId = id;
|
||||
prom.queue_max_id.Set(id);
|
||||
}
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
auto build = std::make_shared<Build>(
|
||||
@@ -142,13 +148,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
@@ -192,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second) {
|
||||
auto res = txn.exec_params
|
||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
localStore->printStorePath(*i.second.second));
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec_params(
|
||||
std::string { common } + " and path = $1",
|
||||
localStore->printStorePath(*optOutputPath))
|
||||
: txn.exec_params(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
localStore->printStorePath(ex.step->drvPath),
|
||||
outputName);
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -236,12 +246,10 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
auto drv = localStore->readDerivation(build->drvPath);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
|
||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
@@ -292,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
@@ -302,7 +310,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
@@ -312,15 +320,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Stop after a certain time to allow priority bumps to be
|
||||
processed. */
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prom.queue_checks_finished.Increment();
|
||||
|
||||
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
|
||||
return newBuildsByID.empty();
|
||||
}
|
||||
|
||||
@@ -358,13 +364,13 @@ void State::processQueueChange(Connection & conn)
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
@@ -399,6 +405,34 @@ void State::processQueueChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||
ref<Store> destStore,
|
||||
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||
{
|
||||
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||
ThreadPool tp;
|
||||
|
||||
for (auto & [output, maybeOutputPath] : paths) {
|
||||
if (!maybeOutputPath) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
} else {
|
||||
tp.enqueue([&] {
|
||||
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
tp.process();
|
||||
|
||||
auto missing(missing_.lock());
|
||||
return *missing;
|
||||
}
|
||||
|
||||
|
||||
Step::ptr State::createStep(ref<Store> destStore,
|
||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
@@ -457,17 +491,23 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
it's not runnable yet, and other threads won't make it
|
||||
runnable while step->created == false. */
|
||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
|
||||
{
|
||||
auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
|
||||
try {
|
||||
step->drvOptions = std::make_unique<DerivationOptions>(
|
||||
DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
|
||||
step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
|
||||
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
||||
StringSet features;
|
||||
if (i != step->drv->env.end())
|
||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
||||
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
@@ -481,26 +521,40 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
bool valid = true;
|
||||
DerivationOutputs missing;
|
||||
for (auto & i : step->drv->outputs)
|
||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
||||
valid = false;
|
||||
missing.insert_or_assign(i.first, i.second);
|
||||
}
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
bool valid = missing.empty();
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
@@ -508,26 +562,29 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(*path))
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(*path);
|
||||
localStore->ensurePath(path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { *path },
|
||||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
@@ -535,13 +592,13 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
@@ -561,7 +618,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs) {
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
@@ -640,21 +697,23 @@ void State::processJobsetSharesChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
auto r = txn.exec_params
|
||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(*output.second));
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
@@ -677,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
@@ -704,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, drv);
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
}
|
||||
|
@@ -6,6 +6,8 @@
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
@@ -13,13 +15,18 @@
|
||||
|
||||
#include "db.hh"
|
||||
|
||||
#include "parsed-derivations.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "pool.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/derivation-options.hh>
|
||||
#include <nix/store/pathlocks.hh>
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/sync.hh>
|
||||
#include "nar-extractor.hh"
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/serve-protocol-connection.hh>
|
||||
#include <nix/store/machines.hh>
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
@@ -53,6 +60,7 @@ typedef enum {
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
ssBuilding = 30,
|
||||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
@@ -77,6 +85,8 @@ struct RemoteResult
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
@@ -161,8 +171,8 @@ struct Step
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
|
||||
std::set<std::string> requiredSystemFeatures;
|
||||
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||
nix::StringSet requiredSystemFeatures;
|
||||
bool preferLocalBuild;
|
||||
bool isDeterministic;
|
||||
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
||||
@@ -230,18 +240,10 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
bool enabled{true};
|
||||
|
||||
std::string sshName, sshKey;
|
||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
||||
unsigned int maxJobs = 1;
|
||||
float speedFactor = 1.0;
|
||||
std::string sshPublicHostKey;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
counter currentJobs{0};
|
||||
@@ -291,10 +293,13 @@ struct Machine
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isLocalhost()
|
||||
{
|
||||
return sshName == "localhost";
|
||||
}
|
||||
bool isLocalhost() const;
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -348,9 +353,13 @@ private:
|
||||
|
||||
/* The build machines. */
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||
|
||||
/* Various stats. */
|
||||
time_t startedAt;
|
||||
counter nrBuildsRead{0};
|
||||
@@ -360,6 +369,7 @@ private:
|
||||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsWaitingForDownloadSlot{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
counter nrUnsupportedSteps{0};
|
||||
@@ -390,7 +400,6 @@ private:
|
||||
|
||||
struct MachineReservation
|
||||
{
|
||||
typedef std::shared_ptr<MachineReservation> ptr;
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
@@ -428,7 +437,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -448,7 +457,12 @@ private:
|
||||
prometheus::Counter& queue_steps_created;
|
||||
prometheus::Counter& queue_checks_early_exits;
|
||||
prometheus::Counter& queue_checks_finished;
|
||||
prometheus::Gauge& queue_max_id;
|
||||
|
||||
prometheus::Counter& dispatcher_time_spent_running;
|
||||
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||
|
||||
prometheus::Counter& queue_monitor_time_spent_running;
|
||||
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||
|
||||
PromMetrics();
|
||||
};
|
||||
@@ -483,23 +497,28 @@ private:
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop();
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
|
||||
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||
|
||||
/* Handle cancellation, deletion and priority bumps. */
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::Derivation & drv);
|
||||
const nix::StorePath & drvPath);
|
||||
|
||||
/* Returns paths missing from the remote store. Paths are processed in
|
||||
* parallel to work around the possible latency of remote stores. */
|
||||
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||
nix::ref<nix::Store> destStore,
|
||||
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
@@ -530,19 +549,19 @@ private:
|
||||
|
||||
void abortUnsupported();
|
||||
|
||||
void builder(MachineReservation::ptr reservation);
|
||||
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||
|
||||
/* Perform the given build step. Return true if the step is to be
|
||||
retried. */
|
||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep);
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
||||
unsigned int repeats,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
|
@@ -4,7 +4,6 @@ use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::SomeUtils qw(any);
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !isValidPath($outPath);
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
|
@@ -95,6 +95,7 @@ sub get_legacy_ldap_config {
|
||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
},
|
||||
};
|
||||
@@ -159,6 +160,7 @@ sub valid_roles {
|
||||
"bump-to-front",
|
||||
"cancel-build",
|
||||
"create-projects",
|
||||
"eval-jobset",
|
||||
"restart-jobs",
|
||||
];
|
||||
}
|
||||
|
@@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||
} elsif ($type eq "git") {
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
die if ! -d $clonePath;
|
||||
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||
@@ -239,6 +239,8 @@ sub triggerJobset {
|
||||
sub push : Chained('api') PathPart('push') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requirePost($c);
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
@@ -246,19 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
foreach my $s (@jobsets) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
||||
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
triggerJobset($self, $c, $jobset, $force)
|
||||
}
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
@@ -285,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
@@ -7,15 +7,15 @@ use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Basename;
|
||||
use File::LibMagic;
|
||||
use File::stat;
|
||||
use Data::Dump qw(dump);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use MIME::Types;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
my ($self, $c, $id) = @_;
|
||||
@@ -77,14 +77,16 @@ sub build_GET {
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or die;
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
@@ -138,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -147,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -232,17 +234,24 @@ sub serveFile {
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"cat-store", "--store", getStoreUri(), "$path"]) };
|
||||
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple.
|
||||
# Detect MIME type.
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
@@ -288,29 +297,7 @@ sub download : Chained('buildChain') PathPart {
|
||||
my $path = $product->path;
|
||||
$path .= "/" . join("/", @path) if scalar @path > 0;
|
||||
|
||||
if (isLocalStore) {
|
||||
|
||||
notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path;
|
||||
|
||||
# Make sure the file is in the Nix store.
|
||||
$path = checkPath($self, $c, $path);
|
||||
|
||||
# If this is a directory but no "/" is attached, then redirect.
|
||||
if (-d $path && substr($c->request->uri, -1) ne "/") {
|
||||
return $c->res->redirect($c->request->uri . "/");
|
||||
}
|
||||
|
||||
$path = "$path/index.html" if -d $path && -e "$path/index.html";
|
||||
|
||||
notFound($c, "File '$path' does not exist.") if !-e $path;
|
||||
|
||||
notFound($c, "Path '$path' is a directory.") if -d $path;
|
||||
|
||||
$c->serve_static_file($path);
|
||||
|
||||
} else {
|
||||
serveFile($c, $path);
|
||||
}
|
||||
serveFile($c, $path);
|
||||
|
||||
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
||||
}
|
||||
@@ -323,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
@@ -366,7 +353,7 @@ sub contents : Chained('buildChain') PathPart Args(1) {
|
||||
|
||||
# FIXME: don't use shell invocations below.
|
||||
|
||||
# FIXME: use nix cat-store
|
||||
# FIXME: use nix store cat
|
||||
|
||||
my $res;
|
||||
|
||||
@@ -440,7 +427,7 @@ sub getDependencyGraph {
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref (queryReferences($path)) {
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
@@ -448,7 +435,7 @@ sub getDependencyGraph {
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse topoSortPaths(@refs);
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
}
|
||||
|
||||
@@ -461,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
@@ -476,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
@@ -496,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless isValidPath($out->path);
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -69,7 +69,7 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
|
||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||
{ job => $c->stash->{job}, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 1, columns => [@buildListColumns] }
|
||||
{ order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] }
|
||||
);
|
||||
|
||||
$prometheus->new_counter(
|
||||
@@ -92,6 +92,26 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->buildstatus > 0);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_closure_size",
|
||||
help => "Closure size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->closuresize);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_output_size",
|
||||
help => "Output size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->size);
|
||||
|
||||
$c->stash->{'plain'} = { data => $prometheus->render };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
@@ -364,6 +364,21 @@ sub evals_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
my $jobsetName = $c->stash->{params}->{name};
|
||||
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||
{ name => $jobsetName },
|
||||
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||
);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||
}
|
||||
|
||||
# Redirect to the latest finished evaluation of this jobset.
|
||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||
|
@@ -76,7 +76,9 @@ sub view_GET {
|
||||
$c->stash->{removed} = $diff->{removed};
|
||||
$c->stash->{unfinished} = $diff->{unfinished};
|
||||
$c->stash->{aborted} = $diff->{aborted};
|
||||
$c->stash->{failed} = $diff->{failed};
|
||||
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||
|
||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||
|
||||
@@ -86,6 +88,17 @@ sub view_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{eval});
|
||||
}
|
||||
|
||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
@@ -16,8 +16,11 @@ use List::Util qw[min max];
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
@@ -32,6 +35,7 @@ sub noLoginNeeded {
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
@@ -47,11 +51,13 @@ sub begin :Private {
|
||||
$c->stash->{curUri} = $c->request->uri;
|
||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||
$c->stash->{curTime} = time;
|
||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||
$c->stash->{flashMsg} = $c->flash->{flashMsg};
|
||||
$c->stash->{successMsg} = $c->flash->{successMsg};
|
||||
$c->stash->{localStore} = isLocalStore;
|
||||
|
||||
$c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0";
|
||||
|
||||
@@ -77,7 +83,7 @@ sub begin :Private {
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
@@ -157,7 +163,7 @@ sub status_GET {
|
||||
{ "buildsteps.busy" => { '!=', 0 } },
|
||||
{ order_by => ["globalpriority DESC", "id"],
|
||||
join => "buildsteps",
|
||||
columns => [@buildListColumns]
|
||||
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||
})]
|
||||
);
|
||||
}
|
||||
@@ -326,7 +332,7 @@ sub nar :Local :Args(1) {
|
||||
else {
|
||||
$path = $Nix::Config::storeDir . "/$path";
|
||||
|
||||
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
|
||||
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
@@ -355,6 +361,33 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
@@ -366,7 +399,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = queryPathFromHashPart($hash);
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
@@ -524,7 +557,7 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
|
@@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => ["project", "jobset"]
|
||||
, join => {"jobset" => "project"}
|
||||
})];
|
||||
}
|
||||
|
||||
|
@@ -67,7 +67,7 @@ sub validateDeclarativeJobset {
|
||||
my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($config->{dynamicruncommand}->{enable}
|
||||
&& $project->{enable_dynamic_run_command}))
|
||||
&& $project->enable_dynamic_run_command))
|
||||
{
|
||||
die "Dynamic RunCommand is not enabled by the server or the parent project.";
|
||||
}
|
||||
|
@@ -32,12 +32,26 @@ sub buildDiff {
|
||||
removed => [],
|
||||
unfinished => [],
|
||||
aborted => [],
|
||||
failed => [],
|
||||
|
||||
# These summary counters cut across the categories to determine whether
|
||||
# actions such as "Restart all failed" or "Bump queue" are available.
|
||||
totalAborted => 0,
|
||||
totalFailed => 0,
|
||||
totalQueued => 0,
|
||||
};
|
||||
|
||||
my $n = 0;
|
||||
foreach my $build (@{$builds}) {
|
||||
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
|
||||
my $aborted = $build->finished != 0 && (
|
||||
# aborted
|
||||
$build->buildstatus == 3
|
||||
# cancelled
|
||||
|| $build->buildstatus == 4
|
||||
# timeout
|
||||
|| $build->buildstatus == 7
|
||||
# log limit exceeded
|
||||
|| $build->buildstatus == 10
|
||||
);
|
||||
my $d;
|
||||
my $found = 0;
|
||||
while ($n < scalar(@{$builds2})) {
|
||||
@@ -71,12 +85,19 @@ sub buildDiff {
|
||||
} else {
|
||||
push @{$ret->{new}}, $build if !$found;
|
||||
}
|
||||
if (defined $build->buildstatus && $build->buildstatus != 0) {
|
||||
push @{$ret->{failed}}, $build;
|
||||
|
||||
if ($build->finished != 0 && $build->buildstatus != 0) {
|
||||
if ($aborted) {
|
||||
++$ret->{totalAborted};
|
||||
} else {
|
||||
++$ret->{totalFailed};
|
||||
}
|
||||
} elsif ($build->finished == 0) {
|
||||
++$ret->{totalQueued};
|
||||
}
|
||||
}
|
||||
|
||||
return $ret;
|
||||
}
|
||||
|
||||
1;
|
||||
1;
|
||||
|
@@ -15,6 +15,7 @@ our @EXPORT = qw(
|
||||
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
||||
requireBumpPrivileges
|
||||
requireCancelBuildPrivileges
|
||||
requireEvalJobsetPrivileges
|
||||
trim
|
||||
getLatestFinishedEval getFirstEval
|
||||
paramToList
|
||||
@@ -186,6 +187,27 @@ sub isProjectOwner {
|
||||
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
||||
}
|
||||
|
||||
sub hasEvalJobsetRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles("eval-jobset");
|
||||
}
|
||||
|
||||
sub mayEvalJobset {
|
||||
my ($c, $project) = @_;
|
||||
return
|
||||
$c->user_exists &&
|
||||
(isAdmin($c) ||
|
||||
hasEvalJobsetRole($c) ||
|
||||
isProjectOwner($c, $project));
|
||||
}
|
||||
|
||||
sub requireEvalJobsetPrivileges {
|
||||
my ($c, $project) = @_;
|
||||
requireUser($c);
|
||||
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
|
||||
unless mayEvalJobset($c, $project);
|
||||
}
|
||||
|
||||
sub hasCancelBuildRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles('cancel-build');
|
||||
@@ -272,7 +294,7 @@ sub requireAdmin {
|
||||
|
||||
sub requirePost {
|
||||
my ($c) = @_;
|
||||
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
|
||||
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
|
||||
}
|
||||
|
||||
|
||||
|
@@ -36,12 +36,16 @@ our @EXPORT = qw(
|
||||
jobsetOverview
|
||||
jobsetOverview_
|
||||
pathIsInsidePrefix
|
||||
readIntoSocket
|
||||
readNixFile
|
||||
registerRoot
|
||||
restartBuilds
|
||||
run
|
||||
$MACHINE_LOCAL_STORE
|
||||
);
|
||||
|
||||
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||
|
||||
|
||||
sub getHydraHome {
|
||||
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
||||
@@ -171,6 +175,9 @@ sub getDrvLogPath {
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
@@ -187,6 +194,10 @@ sub findLog {
|
||||
|
||||
return undef if scalar @outPaths == 0;
|
||||
|
||||
# Filter out any NULLs. Content-addressed derivations
|
||||
# that haven't built yet or failed to build may have a NULL outPath.
|
||||
@outPaths = grep {defined} @outPaths;
|
||||
|
||||
my @steps = $c->model('DB::BuildSteps')->search(
|
||||
{ path => { -in => [@outPaths] } },
|
||||
{ select => ["drvpath"]
|
||||
@@ -286,8 +297,7 @@ sub getEvals {
|
||||
|
||||
my @evals = $evals_result_set->search(
|
||||
{ hasnewbuilds => 1 },
|
||||
{ order_by => "$me.id DESC", rows => $rows, offset => $offset
|
||||
, prefetch => { evaluationerror => [ ] } });
|
||||
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
|
||||
my @res = ();
|
||||
my $cache = {};
|
||||
|
||||
@@ -407,6 +417,16 @@ sub pathIsInsidePrefix {
|
||||
return $cur;
|
||||
}
|
||||
|
||||
sub readIntoSocket{
|
||||
my (%args) = @_;
|
||||
my $sock;
|
||||
|
||||
eval {
|
||||
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||
};
|
||||
|
||||
return $sock;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -494,7 +514,7 @@ sub restartBuilds {
|
||||
$builds = $builds->search({ finished => 1 });
|
||||
|
||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||
next if !isValidPath($build->drvpath);
|
||||
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
registerRoot $build->drvpath;
|
||||
}
|
||||
|
||||
@@ -537,7 +557,7 @@ sub getStoreUri {
|
||||
sub readNixFile {
|
||||
my ($path) = @_;
|
||||
return grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"cat-store", "--store", getStoreUri(), "$path"]);
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -38,9 +37,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||
|
@@ -9,11 +9,24 @@ use Hydra::Helper::CatalystUtils;
|
||||
sub stepFinished {
|
||||
my ($self, $step, $logPath) = @_;
|
||||
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
|
||||
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
|
||||
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
|
||||
|
||||
if ($doCompress eq "1" && -e $logPath) {
|
||||
print STDERR "compressing ‘$logPath’...\n";
|
||||
system("bzip2", "--force", $logPath);
|
||||
if (not -e $logPath or $doCompress ne "1") {
|
||||
return;
|
||||
}
|
||||
|
||||
if ($silent ne '1') {
|
||||
print STDERR "compressing '$logPath' with $compression...\n";
|
||||
}
|
||||
|
||||
if ($compression eq 'bzip2') {
|
||||
system('bzip2', '--force', $logPath);
|
||||
} elsif ($compression eq 'zstd') {
|
||||
system('zstd', '--rm', '--quiet', '-T0', $logPath);
|
||||
} else {
|
||||
print STDERR "unknown compression type '$compression'\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
{uri => $uri, revision => $revision},
|
||||
{rows => 1});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -75,8 +74,8 @@ sub fetchInput {
|
||||
die "darcs changes --count failed" if $? != 0;
|
||||
|
||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = queryPathHash($storePath);
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||
$sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
|
@@ -186,9 +186,9 @@ sub fetchInput {
|
||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||
{rows => 1});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -217,7 +217,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||
|
||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||
@@ -261,7 +261,7 @@ sub getCommits {
|
||||
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
|
||||
my $out = grab(cmd => ["git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
||||
my $out = grab(cmd => ["git", "--git-dir=.git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
||||
|
||||
my $res = [];
|
||||
foreach my $line (split /\n/, $out) {
|
||||
|
@@ -88,10 +88,6 @@ sub buildQueued {
|
||||
common(@_, [], 0);
|
||||
}
|
||||
|
||||
sub buildStarted {
|
||||
common(@_, [], 1);
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
common(@_, 2);
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ sub _iterate {
|
||||
$pulls->{$pull->{number}} = $pull;
|
||||
}
|
||||
# TODO Make Link header parsing more robust!!!
|
||||
my @links = split ',', $res->header("Link");
|
||||
my @links = split ',', ($res->header("Link") // "");
|
||||
my $next = "";
|
||||
foreach my $link (@links) {
|
||||
my ($url, $rel) = split ";", $link;
|
||||
|
@@ -1,89 +0,0 @@
|
||||
package Hydra::Plugin::HipChatNotification;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use LWP::UserAgent;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
return defined $self->{config}->{hipchat};
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $topbuild, $dependents) = @_;
|
||||
|
||||
my $cfg = $self->{config}->{hipchat};
|
||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||
|
||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
||||
|
||||
# Figure out to which rooms to send notification. For each email
|
||||
# room, we send one aggregate message.
|
||||
my %rooms;
|
||||
foreach my $build ($topbuild, @{$dependents}) {
|
||||
my $prevBuild = getPreviousBuild($build);
|
||||
my $jobName = showJobName $build;
|
||||
|
||||
foreach my $room (@config) {
|
||||
my $force = $room->{force};
|
||||
next unless $jobName =~ /^$room->{jobs}$/;
|
||||
|
||||
# If build is cancelled or aborted, do not send email.
|
||||
next if ! $force && ($build->buildstatus == 4 || $build->buildstatus == 3);
|
||||
|
||||
# If there is a previous (that is not cancelled or aborted) build
|
||||
# with same buildstatus, do not send email.
|
||||
next if ! $force && defined $prevBuild && ($build->buildstatus == $prevBuild->buildstatus);
|
||||
|
||||
$rooms{$room->{room}} //= { room => $room, builds => [] };
|
||||
push @{$rooms{$room->{room}}->{builds}}, $build;
|
||||
}
|
||||
}
|
||||
|
||||
return if scalar keys %rooms == 0;
|
||||
|
||||
my ($authors, $nrCommits) = getResponsibleAuthors($topbuild, $self->{plugins});
|
||||
|
||||
# Send a message to each room.
|
||||
foreach my $roomId (keys %rooms) {
|
||||
my $room = $rooms{$roomId};
|
||||
my @deps = grep { $_->id != $topbuild->id } @{$room->{builds}};
|
||||
|
||||
my $img =
|
||||
$topbuild->buildstatus == 0 ? "$baseurl/static/images/checkmark_16.png" :
|
||||
$topbuild->buildstatus == 2 ? "$baseurl/static/images/dependency_16.png" :
|
||||
$topbuild->buildstatus == 4 ? "$baseurl/static/images/cancelled_16.png" :
|
||||
"$baseurl/static/images/error_16.png";
|
||||
|
||||
my $msg = "";
|
||||
$msg .= "<img src='$img'/> ";
|
||||
$msg .= "Job <a href='$baseurl/job/${\$topbuild->jobset->get_column('project')}/${\$topbuild->jobset->get_column('name')}/${\$topbuild->get_column('job')}'>${\showJobName($topbuild)}</a>";
|
||||
$msg .= " (and ${\scalar @deps} others)" if scalar @deps > 0;
|
||||
$msg .= ": <a href='$baseurl/build/${\$topbuild->id}'>" . showStatus($topbuild) . "</a>";
|
||||
|
||||
if (scalar keys %{$authors} > 0) {
|
||||
# FIXME: HTML escaping
|
||||
my @x = map { "<a href='mailto:$authors->{$_}'>$_</a>" } (sort keys %{$authors});
|
||||
$msg .= ", likely due to ";
|
||||
$msg .= "$nrCommits commits by " if $nrCommits > 1;
|
||||
$msg .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]);
|
||||
}
|
||||
|
||||
print STDERR "sending hipchat notification to room $roomId: $msg\n";
|
||||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $resp = $ua->post('https://api.hipchat.com/v1/rooms/message?format=json&auth_token=' . $room->{room}->{token}, {
|
||||
room_id => $roomId,
|
||||
from => 'Hydra',
|
||||
message => $msg,
|
||||
message_format => 'html',
|
||||
notify => $room->{room}->{notify} || 0,
|
||||
color => $topbuild->buildstatus == 0 ? 'green' : 'red' });
|
||||
|
||||
print STDERR $resp->status_line, ": ", $resp->decoded_content,"\n" if !$resp->is_success;
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::Exec;
|
||||
use Nix::Store;
|
||||
use Fcntl qw(:flock);
|
||||
|
||||
sub supportedInputTypes {
|
||||
@@ -68,9 +67,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||
{uri => $uri, branch => $branch, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -85,7 +84,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||
|
@@ -5,7 +5,6 @@ use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use POSIX qw(strftime);
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -30,7 +29,7 @@ sub fetchInput {
|
||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||
{rows => 1, order_by => "lastseen DESC"});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$timestamp = $cachedInput->timestamp;
|
||||
@@ -46,7 +45,7 @@ sub fetchInput {
|
||||
}
|
||||
chomp $storePath;
|
||||
|
||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
||||
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||
|
||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||
{srcpath => $uri, sha256hash => $sha256});
|
||||
|
@@ -14,6 +14,7 @@ use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Nix;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
@@ -92,7 +93,7 @@ sub buildFinished {
|
||||
my $hash = substr basename($path), 0, 32;
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
|
||||
my $system;
|
||||
if (defined $deriver and isValidPath($deriver)) {
|
||||
if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
$system = derivationFromPath($deriver)->{platform};
|
||||
}
|
||||
foreach my $reference (@{$refs}) {
|
||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use IPC::Run;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -45,9 +44,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -62,16 +61,16 @@ sub fetchInput {
|
||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||
|
||||
if ($type eq "svn-checkout") {
|
||||
$storePath = addToStore($wcPath, 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||
} else {
|
||||
# Hm, if the Nix Perl bindings supported filters in
|
||||
# addToStore(), then we wouldn't need to make a copy here.
|
||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||
}
|
||||
|
||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||
|
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
||||
|
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
||||
|
@@ -105,4 +105,6 @@ __PACKAGE__->add_column(
|
||||
"+id" => { retrieve_on_insert => 1 }
|
||||
);
|
||||
|
||||
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||
|
||||
1;
|
||||
|
@@ -386,6 +386,8 @@ __PACKAGE__->add_column(
|
||||
"+id" => { retrieve_on_insert => 1 }
|
||||
);
|
||||
|
||||
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||
|
||||
sub supportsDynamicRunCommand {
|
||||
my ($self) = @_;
|
||||
|
||||
|
@@ -216,7 +216,7 @@ sub json_hint {
|
||||
|
||||
sub _authenticator() {
|
||||
my $authenticator = Crypt::Passphrase->new(
|
||||
encoder => 'Argon2',
|
||||
encoder => { module => 'Argon2', output_size => 16 },
|
||||
validators => [
|
||||
(sub {
|
||||
my ($password, $hash) = @_;
|
||||
|
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
@@ -0,0 +1,30 @@
|
||||
package Hydra::Schema::ResultSet::EvaluationErrors;
|
||||
|
||||
use strict;
|
||||
use utf8;
|
||||
use warnings;
|
||||
|
||||
use parent 'DBIx::Class::ResultSet';
|
||||
|
||||
use Storable qw(dclone);
|
||||
|
||||
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||
|
||||
# Exclude expensive error message values unless explicitly requested, and
|
||||
# replace them with a summary field describing their presence/absence.
|
||||
sub search_rs {
|
||||
my ( $class, $query, $attrs ) = @_;
|
||||
|
||||
if ($attrs) {
|
||||
$attrs = dclone($attrs);
|
||||
}
|
||||
|
||||
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||
}
|
||||
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||
}
|
||||
|
||||
return $class->next::method($query, $attrs);
|
||||
}
|
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
@@ -0,0 +1,30 @@
|
||||
package Hydra::Schema::ResultSet::Jobsets;
|
||||
|
||||
use strict;
|
||||
use utf8;
|
||||
use warnings;
|
||||
|
||||
use parent 'DBIx::Class::ResultSet';
|
||||
|
||||
use Storable qw(dclone);
|
||||
|
||||
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||
|
||||
# Exclude expensive error message values unless explicitly requested, and
|
||||
# replace them with a summary field describing their presence/absence.
|
||||
sub search_rs {
|
||||
my ( $class, $query, $attrs ) = @_;
|
||||
|
||||
if ($attrs) {
|
||||
$attrs = dclone($attrs);
|
||||
}
|
||||
|
||||
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||
}
|
||||
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||
}
|
||||
|
||||
return $class->next::method($query, $attrs);
|
||||
}
|
@@ -8,6 +8,7 @@ use MIME::Base64;
|
||||
use Nix::Manifest;
|
||||
use Nix::Store;
|
||||
use Nix::Utils;
|
||||
use Hydra::Helper::Nix;
|
||||
use base qw/Catalyst::View/;
|
||||
|
||||
sub process {
|
||||
@@ -17,7 +18,7 @@ sub process {
|
||||
|
||||
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
||||
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
|
||||
|
||||
my $info;
|
||||
$info .= "StorePath: $storePath\n";
|
||||
@@ -28,8 +29,8 @@ sub process {
|
||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||
if (defined $deriver) {
|
||||
$info .= "Deriver: " . basename $deriver . "\n";
|
||||
if (isValidPath($deriver)) {
|
||||
my $drv = derivationFromPath($deriver);
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
|
||||
$info .= "System: $drv->{platform}\n";
|
||||
}
|
||||
}
|
||||
|
@@ -16,7 +16,10 @@ sub process {
|
||||
|
||||
my $tail = int($c->stash->{tail} // "0");
|
||||
|
||||
if ($logPath =~ /\.bz2$/) {
|
||||
if ($logPath =~ /\.zst$/) {
|
||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||
open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die;
|
||||
} elsif ($logPath =~ /\.bz2$/) {
|
||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
||||
} else {
|
||||
|
@@ -6,6 +6,7 @@ use base 'Catalyst::View::TT';
|
||||
use Template::Plugin::HTML;
|
||||
use Hydra::Helper::Nix;
|
||||
use Time::Seconds;
|
||||
use Digest::SHA qw(sha1_hex);
|
||||
|
||||
__PACKAGE__->config(
|
||||
TEMPLATE_EXTENSION => '.tt',
|
||||
@@ -25,8 +26,14 @@ __PACKAGE__->config(
|
||||
makeNameTextForJobset
|
||||
relativeDuration
|
||||
stripSSHUser
|
||||
metricDivId
|
||||
/]);
|
||||
|
||||
sub metricDivId {
|
||||
my ($self, $c, $text) = @_;
|
||||
return "metric-" . sha1_hex($text);
|
||||
}
|
||||
|
||||
sub buildLogExists {
|
||||
my ($self, $c, $build) = @_;
|
||||
return 1 if defined $c->config->{log_prefix};
|
||||
|
@@ -1,22 +0,0 @@
|
||||
PERL_MODULES = \
|
||||
$(wildcard *.pm) \
|
||||
$(wildcard Hydra/*.pm) \
|
||||
$(wildcard Hydra/Helper/*.pm) \
|
||||
$(wildcard Hydra/Model/*.pm) \
|
||||
$(wildcard Hydra/View/*.pm) \
|
||||
$(wildcard Hydra/Schema/*.pm) \
|
||||
$(wildcard Hydra/Schema/Result/*.pm) \
|
||||
$(wildcard Hydra/Schema/ResultSet/*.pm) \
|
||||
$(wildcard Hydra/Controller/*.pm) \
|
||||
$(wildcard Hydra/Base/*.pm) \
|
||||
$(wildcard Hydra/Base/Controller/*.pm) \
|
||||
$(wildcard Hydra/Script/*.pm) \
|
||||
$(wildcard Hydra/Component/*.pm) \
|
||||
$(wildcard Hydra/Event/*.pm) \
|
||||
$(wildcard Hydra/Plugin/*.pm)
|
||||
|
||||
EXTRA_DIST = \
|
||||
$(PERL_MODULES)
|
||||
|
||||
hydradir = $(libexecdir)/hydra/lib
|
||||
nobase_hydra_DATA = $(PERL_MODULES)
|
@@ -2,7 +2,8 @@
|
||||
|
||||
#include <pqxx/pqxx>
|
||||
|
||||
#include "util.hh"
|
||||
#include <nix/util/environment-variables.hh>
|
||||
#include <nix/util/util.hh>
|
||||
|
||||
|
||||
struct Connection : pqxx::connection
|
||||
|
@@ -2,7 +2,8 @@
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "util.hh"
|
||||
#include <nix/util/file-system.hh>
|
||||
#include <nix/util/util.hh>
|
||||
|
||||
struct HydraConfig
|
||||
{
|
||||
|
5
src/libhydra/meson.build
Normal file
5
src/libhydra/meson.build
Normal file
@@ -0,0 +1,5 @@
|
||||
libhydra_inc = include_directories('.')
|
||||
|
||||
libhydra_dep = declare_dependency(
|
||||
include_directories: [libhydra_inc],
|
||||
)
|
85
src/meson.build
Normal file
85
src/meson.build
Normal file
@@ -0,0 +1,85 @@
|
||||
# Native code
|
||||
subdir('libhydra')
|
||||
subdir('hydra-evaluator')
|
||||
subdir('hydra-queue-runner')
|
||||
|
||||
hydra_libexecdir = get_option('libexecdir') / 'hydra'
|
||||
|
||||
# Data and interpreted
|
||||
foreach dir : ['lib', 'root']
|
||||
install_subdir(dir,
|
||||
install_dir: hydra_libexecdir,
|
||||
)
|
||||
endforeach
|
||||
subdir('sql')
|
||||
subdir('ttf')
|
||||
|
||||
# Static files for website
|
||||
|
||||
hydra_libexecdir_static = hydra_libexecdir / 'root' / 'static'
|
||||
|
||||
## Bootstrap
|
||||
|
||||
bootstrap_name = 'bootstrap-4.3.1-dist'
|
||||
bootstrap = custom_target(
|
||||
'extract-bootstrap',
|
||||
input: 'root' / (bootstrap_name + '.zip'),
|
||||
output: bootstrap_name,
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
)
|
||||
custom_target(
|
||||
'name-bootstrap',
|
||||
input: bootstrap,
|
||||
output: 'bootstrap',
|
||||
command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static,
|
||||
)
|
||||
|
||||
## Flot
|
||||
|
||||
custom_target(
|
||||
'extract-flot',
|
||||
input: 'root' / 'flot-0.8.3.zip',
|
||||
output: 'flot',
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'js',
|
||||
)
|
||||
|
||||
## Fontawesome
|
||||
|
||||
fontawesome_name = 'fontawesome-free-5.10.2-web'
|
||||
fontawesome = custom_target(
|
||||
'extract-fontawesome',
|
||||
input: 'root' / (fontawesome_name + '.zip'),
|
||||
output: fontawesome_name,
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
)
|
||||
custom_target(
|
||||
'name-fontawesome-css',
|
||||
input: fontawesome,
|
||||
output: 'css',
|
||||
command: ['cp', '-r', '@INPUT@/css', '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||
)
|
||||
custom_target(
|
||||
'name-fontawesome-webfonts',
|
||||
input: fontawesome,
|
||||
output: 'webfonts',
|
||||
command: ['cp', '-r', '@INPUT@/webfonts', '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||
)
|
||||
|
||||
# Scripts
|
||||
|
||||
install_subdir('script',
|
||||
install_dir: get_option('bindir'),
|
||||
exclude_files: [
|
||||
'hydra-dev-server',
|
||||
],
|
||||
install_mode: 'rwxr-xr-x',
|
||||
strip_directory: true,
|
||||
)
|
@@ -1,39 +0,0 @@
|
||||
TEMPLATES = $(wildcard *.tt)
|
||||
STATIC = \
|
||||
$(wildcard static/images/*) \
|
||||
$(wildcard static/css/*) \
|
||||
static/js/bootbox.min.js \
|
||||
static/js/popper.min.js \
|
||||
static/js/common.js \
|
||||
static/js/jquery/jquery-3.4.1.min.js \
|
||||
static/js/jquery/jquery-ui-1.10.4.min.js
|
||||
|
||||
FLOT = flot-0.8.3.zip
|
||||
BOOTSTRAP = bootstrap-4.3.1-dist.zip
|
||||
FONTAWESOME = fontawesome-free-5.10.2-web.zip
|
||||
|
||||
ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME)
|
||||
|
||||
EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS)
|
||||
|
||||
hydradir = $(libexecdir)/hydra/root
|
||||
nobase_hydra_DATA = $(EXTRA_DIST)
|
||||
|
||||
all:
|
||||
mkdir -p $(srcdir)/static/js
|
||||
unzip -u -d $(srcdir)/static $(BOOTSTRAP)
|
||||
rm -rf $(srcdir)/static/bootstrap
|
||||
mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap
|
||||
unzip -u -d $(srcdir)/static/js $(FLOT)
|
||||
unzip -u -d $(srcdir)/static $(FONTAWESOME)
|
||||
rm -rf $(srcdir)/static/fontawesome
|
||||
mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome
|
||||
|
||||
install-data-local: $(ZIPS)
|
||||
mkdir -p $(hydradir)/static/js
|
||||
cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js
|
||||
mkdir -p $(hydradir)/static/bootstrap
|
||||
cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap
|
||||
mkdir -p $(hydradir)/static/fontawesome/{css,webfonts}
|
||||
cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css
|
||||
cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts
|
@@ -33,7 +33,7 @@
|
||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog" role="document">
|
||||
<div class="modal-content">
|
||||
<form>
|
||||
<form id="signin-form">
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<label for="username" class="col-form-label">User name</label>
|
||||
@@ -45,7 +45,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
||||
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||
</div>
|
||||
</form>
|
||||
@@ -57,10 +57,11 @@
|
||||
|
||||
function finishSignOut() { }
|
||||
|
||||
$("#do-signin").click(function() {
|
||||
$("#signin-form").submit(function(e) {
|
||||
e.preventDefault();
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/login') %]",
|
||||
data: $(this).parents("form").serialize(),
|
||||
data: $(this).serialize(),
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -82,7 +83,7 @@
|
||||
function onGoogleSignIn(googleUser) {
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/google-login') %]",
|
||||
data: "id_token=" + googleUser.getAuthResponse().id_token,
|
||||
data: "id_token=" + googleUser.credential,
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -91,9 +92,6 @@
|
||||
return false;
|
||||
};
|
||||
|
||||
$("#google-signin").click(function() {
|
||||
$(".g-signin2:first-child > div").click();
|
||||
});
|
||||
</script>
|
||||
[% END %]
|
||||
|
||||
|
@@ -4,6 +4,6 @@
|
||||
|
||||
<div class="dep-tree">
|
||||
<ul class="tree">
|
||||
[% INCLUDE renderNode node=buildTimeGraph %]
|
||||
[% INCLUDE renderNode node=buildTimeGraph isRoot=1 %]
|
||||
</ul>
|
||||
</div>
|
||||
|
@@ -61,21 +61,7 @@ END;
|
||||
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
|
||||
<td class="step-status">
|
||||
[% IF step.busy != 0 %]
|
||||
[% IF step.busy == 1 %]
|
||||
<strong>Preparing</strong>
|
||||
[% ELSIF step.busy == 10 %]
|
||||
<strong>Connecting</strong>
|
||||
[% ELSIF step.busy == 20 %]
|
||||
<strong>Sending inputs</strong>
|
||||
[% ELSIF step.busy == 30 %]
|
||||
<strong>Building</strong>
|
||||
[% ELSIF step.busy == 40 %]
|
||||
<strong>Receiving outputs</strong>
|
||||
[% ELSIF step.busy == 50 %]
|
||||
<strong>Post-processing</strong>
|
||||
[% ELSE %]
|
||||
<strong>Unknown state</strong>
|
||||
[% END %]
|
||||
[% INCLUDE renderBusyStatus %]
|
||||
[% ELSIF step.status == 0 %]
|
||||
[% IF step.isnondeterministic %]
|
||||
<span class="warn">Succeeded with non-determistic result</span>
|
||||
@@ -481,11 +467,11 @@ END;
|
||||
[% END %]
|
||||
|
||||
[% IF drvAvailable %]
|
||||
[% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') %]
|
||||
[% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') callback="makeTreeCollapsible" %]
|
||||
[% END %]
|
||||
|
||||
[% IF available %]
|
||||
[% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') %]
|
||||
[% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') callback="makeTreeCollapsible" %]
|
||||
[% END %]
|
||||
|
||||
<div id="tabs-runcommandlogs" class="tab-pane">
|
||||
@@ -577,7 +563,7 @@ END;
|
||||
|
||||
[% IF eval.flake %]
|
||||
|
||||
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
|
||||
<p>If you have <a href='https://nixos.org/download/'>Nix
|
||||
installed</a>, you can reproduce this build on your own machine by
|
||||
running the following command:</p>
|
||||
|
||||
@@ -587,7 +573,7 @@ END;
|
||||
|
||||
[% ELSE %]
|
||||
|
||||
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
|
||||
<p>If you have <a href='https://nixos.org/download/'>Nix
|
||||
installed</a>, you can reproduce this build on your own machine by
|
||||
downloading <a [% HTML.attributes(href => url) %]>a script</a>
|
||||
that checks out all inputs of the build and then invokes Nix to
|
||||
|
@@ -91,6 +91,17 @@ BLOCK renderDuration;
|
||||
duration % 60 %]s[%
|
||||
END;
|
||||
|
||||
BLOCK renderDrvInfo;
|
||||
drvname = step.drvpath
|
||||
.substr(11) # strip `/nix/store/`
|
||||
.split('-').slice(1).join("-") # strip hash part
|
||||
.substr(0, -4); # strip `.drv`
|
||||
IF drvname != releasename;
|
||||
IF step.type == 0; action = "Build"; ELSE; action = "Substitution"; END;
|
||||
IF drvname; %]<em> ([% action %] of [% drvname %])</em>[% END;
|
||||
END;
|
||||
END;
|
||||
|
||||
|
||||
BLOCK renderBuildListHeader %]
|
||||
<table class="table table-striped table-condensed clickable-rows">
|
||||
@@ -131,7 +142,12 @@ BLOCK renderBuildListBody;
|
||||
[% END %]
|
||||
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
|
||||
[% IF !hideJobName %]
|
||||
<td><a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</td>
|
||||
<td>
|
||||
<a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</a>
|
||||
[% IF showStepName %]
|
||||
[% INCLUDE renderDrvInfo step=build.buildsteps releasename=build.nixname %]
|
||||
[% END %]
|
||||
</td>
|
||||
[% END %]
|
||||
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
|
||||
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
|
||||
@@ -245,6 +261,27 @@ BLOCK renderBuildStatusIcon;
|
||||
END;
|
||||
|
||||
|
||||
BLOCK renderBusyStatus;
|
||||
IF step.busy == 1 %]
|
||||
<strong>Preparing</strong>
|
||||
[% ELSIF step.busy == 10 %]
|
||||
<strong>Connecting</strong>
|
||||
[% ELSIF step.busy == 20 %]
|
||||
<strong>Sending inputs</strong>
|
||||
[% ELSIF step.busy == 30 %]
|
||||
<strong>Building</strong>
|
||||
[% ELSIF step.busy == 35 %]
|
||||
<strong>Waiting to receive outputs</strong>
|
||||
[% ELSIF step.busy == 40 %]
|
||||
<strong>Receiving outputs</strong>
|
||||
[% ELSIF step.busy == 50 %]
|
||||
<strong>Post-processing</strong>
|
||||
[% ELSE %]
|
||||
<strong>Unknown state</strong>
|
||||
[% END;
|
||||
END;
|
||||
|
||||
|
||||
BLOCK renderStatus;
|
||||
IF build.finished;
|
||||
buildstatus = build.buildstatus;
|
||||
@@ -374,7 +411,7 @@ BLOCK renderInputDiff; %]
|
||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||
[% IF bi1.type == "git" %]
|
||||
<tr><td>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
|
||||
</td></tr>
|
||||
[% ELSE %]
|
||||
<tr><td>
|
||||
@@ -476,7 +513,7 @@ BLOCK renderEvals %]
|
||||
ELSE %]
|
||||
-
|
||||
[% END %]
|
||||
[% IF eval.evaluationerror.errormsg %]
|
||||
[% IF eval.evaluationerror.has_error %]
|
||||
<span class="badge badge-warning">Eval Errors</span>
|
||||
[% END %]
|
||||
</td>
|
||||
@@ -520,7 +557,11 @@ BLOCK makeLazyTab %]
|
||||
<center><span class="spinner-border spinner-border-sm"/></center>
|
||||
</div>
|
||||
<script>
|
||||
$(function() { makeLazyTab("[% tabName %]", "[% uri %]"); });
|
||||
[% IF callback.defined %]
|
||||
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", [% callback %] ); });
|
||||
[% ELSE %]
|
||||
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", null ); });
|
||||
[% END %]
|
||||
</script>
|
||||
[% END;
|
||||
|
||||
@@ -598,7 +639,7 @@ BLOCK renderJobsetOverview %]
|
||||
<td>[% HTML.escape(j.description) %]</td>
|
||||
<td>[% IF j.lastcheckedtime;
|
||||
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
|
||||
IF j.errormsg || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
||||
IF j.has_error || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
||||
ELSE; "-";
|
||||
END %]</td>
|
||||
[% IF j.get_column('nrtotal') > 0 %]
|
||||
|
@@ -19,9 +19,16 @@
|
||||
<tt>[% node.name %]</tt> (<em>no info</em>)
|
||||
[% END %]
|
||||
</span></span>
|
||||
[% IF isRoot %]
|
||||
<span class="dep-tree-buttons">
|
||||
(<a href="#" class="tree-collapse-all">collapse all</a>
|
||||
–
|
||||
<a href="#" class="tree-expand-all">expand all</a>)
|
||||
</span>
|
||||
[% END %]
|
||||
[% IF node.refs.size > 0 %]
|
||||
<ul class="subtree">
|
||||
[% FOREACH ref IN node.refs; INCLUDE renderNode node=ref; END %]
|
||||
[% FOREACH ref IN node.refs; INCLUDE renderNode node=ref isRoot=0; END %]
|
||||
</ul>
|
||||
[% END %]
|
||||
[% END %]
|
||||
|
26
src/root/eval-error.tt
Normal file
26
src/root/eval-error.tt
Normal file
@@ -0,0 +1,26 @@
|
||||
[% PROCESS common.tt %]
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
|
||||
[% INCLUDE style.tt %]
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<div class="tab-content tab-pane">
|
||||
<div id="tabs-errors" class="">
|
||||
[% IF eval %]
|
||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
||||
[% ELSIF jobset %]
|
||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
|
||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
|
||||
[% END %]
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@@ -18,8 +18,7 @@
|
||||
|
||||
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
|
||||
|
||||
[% id = "metric-" _ metric.name;
|
||||
id = id.replace('\.', '_');
|
||||
[% id = metricDivId(metric.name);
|
||||
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
|
||||
|
||||
[% END %]
|
||||
|
@@ -48,16 +48,16 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||
<a class="nav-link dropdown-toggle" data-toggle="dropdown" href="#">Actions</a>
|
||||
<div class="dropdown-menu">
|
||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('create_jobset'), [eval.id]) %]">Create a jobset from this evaluation</a>
|
||||
[% IF unfinished.size > 0 %]
|
||||
[% IF totalQueued > 0 %]
|
||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('cancel'), [eval.id]) %]">Cancel all scheduled builds</a>
|
||||
[% END %]
|
||||
[% IF aborted.size > 0 || stillFail.size > 0 || nowFail.size > 0 || failed.size > 0 %]
|
||||
[% IF totalFailed > 0 %]
|
||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_failed'), [eval.id]) %]">Restart all failed builds</a>
|
||||
[% END %]
|
||||
[% IF aborted.size > 0 %]
|
||||
[% IF totalAborted > 0 %]
|
||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_aborted'), [eval.id]) %]">Restart all aborted builds</a>
|
||||
[% END %]
|
||||
[% IF unfinished.size > 0 %]
|
||||
[% IF totalQueued > 0 %]
|
||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('bump'), [eval.id]) %]">Bump builds to front of queue</a>
|
||||
[% END %]
|
||||
</div>
|
||||
@@ -65,7 +65,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||
[% END %]
|
||||
|
||||
[% IF aborted.size > 0 %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted Jobs ([% aborted.size %])</span></a></li>
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted / Timed out Jobs ([% aborted.size %])</span></a></li>
|
||||
[% END %]
|
||||
[% IF nowFail.size > 0 %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-now-fail" data-toggle="tab"><span class="text-warning">Newly Failing Jobs ([% nowFail.size %])</span></a></li>
|
||||
@@ -90,7 +90,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||
[% END %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-inputs" data-toggle="tab">Inputs</a></li>
|
||||
|
||||
[% IF eval.evaluationerror.errormsg %]
|
||||
[% IF eval.evaluationerror.has_error %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
||||
[% END %]
|
||||
</ul>
|
||||
@@ -108,13 +108,6 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||
|
||||
<div class="tab-content">
|
||||
|
||||
[% IF eval.evaluationerror.errormsg %]
|
||||
<div id="tabs-errors" class="tab-pane">
|
||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
||||
</div>
|
||||
[% END %]
|
||||
|
||||
<div id="tabs-aborted" class="tab-pane">
|
||||
[% INCLUDE renderSome builds=aborted tabname="#tabs-aborted" %]
|
||||
</div>
|
||||
@@ -172,10 +165,9 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||
[% END %]
|
||||
</div>
|
||||
|
||||
[% IF eval.evaluationerror.errormsg %]
|
||||
[% IF eval.evaluationerror.has_error %]
|
||||
<div id="tabs-errors" class="tab-pane">
|
||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
||||
<iframe src="[% c.uri_for(c.controller('JobsetEval').action_for('errors'), [eval.id], params) %]" loading="lazy" frameBorder="0" width="100%"></iframe>
|
||||
</div>
|
||||
[% END %]
|
||||
</div>
|
||||
|
@@ -61,7 +61,7 @@
|
||||
[% END %]
|
||||
|
||||
<li class="nav-item"><a class="nav-link active" href="#tabs-evaluations" data-toggle="tab">Evaluations</a></li>
|
||||
[% IF jobset.errormsg || jobset.fetcherrormsg %]
|
||||
[% IF jobset.has_error || jobset.fetcherrormsg %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
||||
[% END %]
|
||||
<li class="nav-item"><a class="nav-link" href="#tabs-jobs" data-toggle="tab">Jobs</a></li>
|
||||
@@ -79,7 +79,7 @@
|
||||
<th>Last checked:</th>
|
||||
<td>
|
||||
[% IF jobset.lastcheckedtime %]
|
||||
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.errormsg || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
|
||||
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.has_error || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
|
||||
[% ELSE %]
|
||||
<em>never</em>
|
||||
[% END %]
|
||||
@@ -117,10 +117,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
[% IF jobset.errormsg || jobset.fetcherrormsg %]
|
||||
[% IF jobset.has_error || jobset.fetcherrormsg %]
|
||||
<div id="tabs-errors" class="tab-pane">
|
||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
|
||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
|
||||
<iframe src="[% c.uri_for('/jobset' project.name jobset.name "errors") %]" loading="lazy" frameBorder="0" width="100%"></iframe>
|
||||
</div>
|
||||
[% END %]
|
||||
|
||||
@@ -205,6 +204,7 @@
|
||||
if (!c) return;
|
||||
requestJSON({
|
||||
url: "[% HTML.escape(c.uri_for('/api/push', { jobsets = project.name _ ':' _ jobset.name, force = "1" })) %]",
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
bootbox.alert("The jobset has been scheduled for evaluation.");
|
||||
}
|
||||
|
@@ -10,31 +10,7 @@
|
||||
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
|
||||
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-3.4.1.min.js") %]"></script>
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-ui-1.10.4.min.js") %]"></script>
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/moment/moment-2.24.0.min.js") %]"></script>
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
|
||||
<link href="[% c.uri_for("/static/fontawesome/css/all.css") %]" rel="stylesheet" />
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/popper.min.js") %]"></script>
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/bootstrap/js/bootstrap.min.js") %]"></script>
|
||||
<link href="[% c.uri_for("/static/bootstrap/css/bootstrap.min.css") %]" rel="stylesheet" />
|
||||
|
||||
<!-- hydra.css may need to be moved to before boostrap to make the @media rule work. -->
|
||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/hydra.css") %]" type="text/css" />
|
||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/rotated-th.css") %]" type="text/css" />
|
||||
|
||||
<style>
|
||||
.popover { max-width: 40%; }
|
||||
</style>
|
||||
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/bootbox.min.js") %]"></script>
|
||||
|
||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/tree.css") %]" type="text/css" />
|
||||
|
||||
<script type="text/javascript" src="[% c.uri_for("/static/js/common.js") %]"></script>
|
||||
[% INCLUDE style.tt %]
|
||||
|
||||
[% IF c.config.enable_google_login %]
|
||||
<meta name="google-signin-client_id" content="[% c.config.google_client_id %]">
|
||||
@@ -93,7 +69,7 @@
|
||||
<footer class="navbar">
|
||||
<hr />
|
||||
<small>
|
||||
<em><a href="http://nixos.org/hydra" target="_blank">Hydra</a> [% HTML.escape(version) %] (using [% HTML.escape(nixVersion) %]).</em>
|
||||
<em><a href="http://nixos.org/hydra" target="_blank" class="squiggle">Hydra</a> [% HTML.escape(version) %] (using [% HTML.escape(nixVersion) %] and [% HTML.escape(nixEvalJobsVersion) %]).</em>
|
||||
[% IF c.user_exists %]
|
||||
You are signed in as <tt>[% HTML.escape(c.user.username) %]</tt>
|
||||
[%- IF c.user.type == 'google' %] via Google[% END %].
|
||||
|
@@ -6,10 +6,10 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Job</th>
|
||||
<th>System</th>
|
||||
<th>Build</th>
|
||||
<th>Step</th>
|
||||
<th>What</th>
|
||||
<th>Status</th>
|
||||
<th>Since</th>
|
||||
</tr>
|
||||
</thead>
|
||||
@@ -17,7 +17,7 @@
|
||||
[% name = m.key ? stripSSHUser(m.key) : "localhost" %]
|
||||
<thead>
|
||||
<tr>
|
||||
<th colspan="6">
|
||||
<th colspan="7">
|
||||
<tt [% IF m.value.disabled %]style="text-decoration: line-through;"[% END %]>[% INCLUDE renderMachineName machine=m.key %]</tt>
|
||||
[% IF m.value.systemTypes %]
|
||||
<span class="muted" style="font-weight: normal;">
|
||||
@@ -40,10 +40,10 @@
|
||||
[% idle = 0 %]
|
||||
<tr>
|
||||
<td><tt>[% INCLUDE renderFullJobName project=step.project jobset=step.jobset job=step.job %]</tt></td>
|
||||
<td><tt>[% step.system %]</tt></td>
|
||||
<td><a href="[% c.uri_for('/build' step.build) %]">[% step.build %]</a></td>
|
||||
<td>[% IF step.busy >= 30 %]<a class="row-link" href="[% c.uri_for('/build' step.build 'nixlog' step.stepnr 'tail') %]">[% step.stepnr %]</a>[% ELSE; step.stepnr; END %]</td>
|
||||
<td><tt>[% step.drvpath.match('-(.*)').0 %]</tt></td>
|
||||
<td>[% INCLUDE renderBusyStatus %]</td>
|
||||
<td style="width: 10em">[% INCLUDE renderDuration duration = curTime - step.starttime %] </td>
|
||||
</tr>
|
||||
[% END %]
|
||||
|
@@ -7,7 +7,7 @@ main() {
|
||||
|
||||
set -e
|
||||
|
||||
tmpDir=${TMPDIR:-/tmp}/build-[% build.id +%]
|
||||
tmpDir=$(realpath "${TMPDIR:-/tmp}")/build-[% build.id +%]
|
||||
declare -a args extraArgs
|
||||
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
div.skip-topbar {
|
||||
padding-top: 40px;
|
||||
padding-top: 20px;
|
||||
margin-bottom: 1.5em;
|
||||
}
|
||||
|
||||
@@ -33,6 +33,11 @@ span:target > span.dep-tree-line {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
span.dep-tree-buttons {
|
||||
font-style: italic;
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
span.disabled-project, span.disabled-jobset, span.disabled-job {
|
||||
text-decoration: line-through;
|
||||
}
|
||||
@@ -146,12 +151,42 @@ td.step-status span.warn {
|
||||
padding-top: 1.5rem;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 80%;
|
||||
}
|
||||
|
||||
.tab-content {
|
||||
margin-right: 0 !important;
|
||||
}
|
||||
|
||||
body {
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.navbar-nav {
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.dropdown-item {
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
a.squiggle:hover {
|
||||
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg id='squiggle-link' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' xmlns:ev='http://www.w3.org/2001/xml-events' viewBox='0 0 10 18'%3E%3Cstyle type='text/css'%3E.squiggle{animation:shift .5s linear infinite;}@keyframes shift {from {transform:translateX(-10px);}to {transform:translateX(0);}}%3C/style%3E%3Cpath fill='none' stroke='%230056b3' stroke-width='0.65' class='squiggle' d='M0,17.5 c 2.5,0,2.5,-1.5,5,-1.5 s 2.5,1.5,5,1.5 c 2.5,0,2.5,-1.5,5,-1.5 s 2.5,1.5,5,1.5' /%3E%3C/svg%3E");
|
||||
background-position: 0 100%;
|
||||
background-size: auto 24px;
|
||||
background-repeat: repeat;
|
||||
text-decoration: none;
|
||||
border-bottom: none;
|
||||
padding-bottom: 1px;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
/* Prevent some flickering */
|
||||
html {
|
||||
background-color: #1f1f1f;
|
||||
}
|
||||
body, div.popover {
|
||||
body, div.popover, div.popover-body {
|
||||
background-color: #1f1f1f;
|
||||
color: #fafafa !important;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user