Merge branch 'master' into fix/local-store-detection
This commit is contained in:
9
.github/workflows/test.yml
vendored
9
.github/workflows/test.yml
vendored
@@ -1,14 +1,17 @@
|
|||||||
name: "Test"
|
name: "Test"
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
merge_group:
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v16
|
- uses: cachix/install-nix-action@v31
|
||||||
#- run: nix flake check
|
#- run: nix flake check
|
||||||
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
||||||
|
28
.github/workflows/update-flakes.yml
vendored
Normal file
28
.github/workflows/update-flakes.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: "Update Flakes"
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Run weekly on Monday at 00:00 UTC
|
||||||
|
- cron: '0 0 * * 1'
|
||||||
|
workflow_dispatch:
|
||||||
|
jobs:
|
||||||
|
update-flakes:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: cachix/install-nix-action@v31
|
||||||
|
- name: Update flake inputs
|
||||||
|
run: nix flake update
|
||||||
|
- name: Create Pull Request
|
||||||
|
uses: peter-evans/create-pull-request@v5
|
||||||
|
with:
|
||||||
|
commit-message: "flake.lock: Update"
|
||||||
|
title: "Update flake inputs"
|
||||||
|
body: |
|
||||||
|
Automated flake input updates.
|
||||||
|
|
||||||
|
This PR was automatically created by the update-flakes workflow.
|
||||||
|
branch: update-flakes
|
||||||
|
delete-branch: true
|
44
.gitignore
vendored
44
.gitignore
vendored
@@ -1,47 +1,9 @@
|
|||||||
/.pls_cache
|
|
||||||
*.o
|
|
||||||
*~
|
*~
|
||||||
Makefile
|
.test_info.*
|
||||||
Makefile.in
|
|
||||||
.deps
|
|
||||||
.hydra-data
|
|
||||||
/config.guess
|
|
||||||
/config.log
|
|
||||||
/config.status
|
|
||||||
/config.sub
|
|
||||||
/configure
|
|
||||||
/depcomp
|
|
||||||
/libtool
|
|
||||||
/ltmain.sh
|
|
||||||
/autom4te.cache
|
|
||||||
/aclocal.m4
|
|
||||||
/missing
|
|
||||||
/install-sh
|
|
||||||
/src/sql/hydra-postgresql.sql
|
/src/sql/hydra-postgresql.sql
|
||||||
/src/sql/hydra-sqlite.sql
|
/src/sql/hydra-sqlite.sql
|
||||||
/src/sql/tmp.sqlite
|
/src/sql/tmp.sqlite
|
||||||
/src/hydra-eval-jobs/hydra-eval-jobs
|
.hydra-data
|
||||||
/src/root/static/bootstrap
|
|
||||||
/src/root/static/js/flot
|
|
||||||
/tests
|
|
||||||
/doc/manual/images
|
|
||||||
/doc/manual/manual.html
|
|
||||||
/doc/manual/manual.pdf
|
|
||||||
/t/.bzr*
|
|
||||||
/t/.git*
|
|
||||||
/t/.hg*
|
|
||||||
/t/nix
|
|
||||||
/t/data
|
|
||||||
/t/jobs/config.nix
|
|
||||||
t/jobs/declarative/project.json
|
|
||||||
/inst
|
|
||||||
hydra-config.h
|
|
||||||
hydra-config.h.in
|
|
||||||
result
|
result
|
||||||
|
result-*
|
||||||
outputs
|
outputs
|
||||||
config
|
|
||||||
stamp-h1
|
|
||||||
src/hydra-evaluator/hydra-evaluator
|
|
||||||
src/hydra-queue-runner/hydra-queue-runner
|
|
||||||
src/root/static/fontawesome/
|
|
||||||
src/root/static/bootstrap*/
|
|
||||||
|
@@ -1,8 +0,0 @@
|
|||||||
SUBDIRS = src t doc
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
||||||
EXTRA_DIST = hydra-module.nix
|
|
||||||
|
|
||||||
install-data-local: hydra-module.nix
|
|
||||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
|
||||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
|
37
README.md
37
README.md
@@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta
|
|||||||
#### Creating A Project
|
#### Creating A Project
|
||||||
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||||
|
|
||||||
- **Identifier**: `hello`
|
- **Identifier**: `hello-project`
|
||||||
- **Display name**: `hello`
|
- **Display name**: `hello`
|
||||||
- **Description**: `hello project`
|
- **Description**: `hello project`
|
||||||
|
|
||||||
Click "_Create project_".
|
Click "_Create project_".
|
||||||
|
|
||||||
#### Creating A Jobset
|
#### Creating A Jobset
|
||||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
|
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
|
||||||
|
|
||||||
- **Identifier**: `hello`
|
- **Identifier**: `hello-project`
|
||||||
- **Nix expression**: `examples/hello.nix` in `hydra`
|
- **Nix expression**: `examples/hello.nix` in `hydra`
|
||||||
- **Check interval**: 60
|
- **Check interval**: 60
|
||||||
- **Scheduling shares**: 1
|
- **Scheduling shares**: 1
|
||||||
@@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_
|
|||||||
|
|
||||||
- **Input name**: `nixpkgs`
|
- **Input name**: `nixpkgs`
|
||||||
- **Type**: `Git checkout`
|
- **Type**: `Git checkout`
|
||||||
- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
|
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
|
||||||
|
|
||||||
- **Input name**: `hydra`
|
- **Input name**: `hydra`
|
||||||
- **Type**: `Git checkout`
|
- **Type**: `Git checkout`
|
||||||
@@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
|
|||||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-build
|
$ nix build
|
||||||
```
|
```
|
||||||
|
|
||||||
### Development Environment
|
### Development Environment
|
||||||
|
|
||||||
You can use the provided shell.nix to get a working development environment:
|
You can use the provided shell.nix to get a working development environment:
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ ./bootstrap
|
$ mesonConfigurePhase
|
||||||
$ configurePhase # NOTE: not ./configure
|
$ ninja
|
||||||
$ make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Executing Hydra During Development
|
### Executing Hydra During Development
|
||||||
@@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
|||||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ # hack hack
|
$ # hack hack
|
||||||
$ make
|
$ ninja -C build
|
||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
|
|||||||
Then, you can run the tests and the perlcritic linter together with:
|
Then, you can run the tests and the perlcritic linter together with:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make check
|
$ ninja -C build test
|
||||||
```
|
```
|
||||||
|
|
||||||
You can run a single test with:
|
You can run a single test with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ yath test ./t/foo/bar.t
|
$ cd build
|
||||||
|
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||||
```
|
```
|
||||||
|
|
||||||
And you can run just perlcritic with:
|
And you can run just perlcritic with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make perlcritic
|
$ cd build
|
||||||
|
$ meson test perlcritic
|
||||||
```
|
```
|
||||||
|
|
||||||
### JSON API
|
### JSON API
|
||||||
@@ -140,7 +141,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
|||||||
## Additional Resources
|
## Additional Resources
|
||||||
|
|
||||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||||
|
|
||||||
|
85
configure.ac
85
configure.ac
@@ -1,85 +0,0 @@
|
|||||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
|
|
||||||
AC_CONFIG_AUX_DIR(config)
|
|
||||||
AM_INIT_AUTOMAKE([foreign serial-tests])
|
|
||||||
|
|
||||||
AC_LANG([C++])
|
|
||||||
|
|
||||||
AC_PROG_CC
|
|
||||||
AC_PROG_INSTALL
|
|
||||||
AC_PROG_LN_S
|
|
||||||
AC_PROG_LIBTOOL
|
|
||||||
AC_PROG_CXX
|
|
||||||
|
|
||||||
CXXFLAGS+=" -std=c++17"
|
|
||||||
|
|
||||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
|
||||||
|
|
||||||
AC_ARG_WITH([docbook-xsl],
|
|
||||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
|
||||||
[path of the DocBook XSL stylesheets])],
|
|
||||||
[docbookxsl="$withval"],
|
|
||||||
[docbookxsl="/docbook-xsl-missing"])
|
|
||||||
AC_SUBST([docbookxsl])
|
|
||||||
|
|
||||||
|
|
||||||
AC_DEFUN([NEED_PROG],
|
|
||||||
[
|
|
||||||
AC_PATH_PROG($1, $2)
|
|
||||||
if test -z "$$1"; then
|
|
||||||
AC_MSG_ERROR([$2 is required])
|
|
||||||
fi
|
|
||||||
])
|
|
||||||
|
|
||||||
NEED_PROG(perl, perl)
|
|
||||||
|
|
||||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
|
||||||
|
|
||||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
|
||||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
|
||||||
then
|
|
||||||
# This may be executed from within a build chroot, so pacify
|
|
||||||
# `nix-store' instead of letting it choke while trying to mkdir
|
|
||||||
# /nix/var.
|
|
||||||
NIX_STATE_DIR="$TMPDIR"
|
|
||||||
export NIX_STATE_DIR
|
|
||||||
fi
|
|
||||||
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
|
|
||||||
AC_MSG_RESULT([yes])
|
|
||||||
else
|
|
||||||
AC_MSG_RESULT([no])
|
|
||||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
|
||||||
fi
|
|
||||||
|
|
||||||
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
|
||||||
|
|
||||||
testPath="$(dirname $(type -p expr))"
|
|
||||||
AC_SUBST(testPath)
|
|
||||||
|
|
||||||
jobsPath="$(realpath ./t/jobs)"
|
|
||||||
AC_SUBST(jobsPath)
|
|
||||||
|
|
||||||
CXXFLAGS+=" -include nix/config.h"
|
|
||||||
|
|
||||||
AC_CONFIG_FILES([
|
|
||||||
Makefile
|
|
||||||
doc/Makefile
|
|
||||||
doc/manual/Makefile
|
|
||||||
src/Makefile
|
|
||||||
src/hydra-evaluator/Makefile
|
|
||||||
src/hydra-eval-jobs/Makefile
|
|
||||||
src/hydra-queue-runner/Makefile
|
|
||||||
src/sql/Makefile
|
|
||||||
src/ttf/Makefile
|
|
||||||
src/lib/Makefile
|
|
||||||
src/root/Makefile
|
|
||||||
src/script/Makefile
|
|
||||||
t/Makefile
|
|
||||||
t/jobs/config.nix
|
|
||||||
t/jobs/declarative/project.json
|
|
||||||
])
|
|
||||||
|
|
||||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
|
||||||
|
|
||||||
AC_CONFIG_HEADER([hydra-config.h])
|
|
||||||
|
|
||||||
AC_OUTPUT
|
|
@@ -1,6 +1,6 @@
|
|||||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||||
|
|
||||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||||
src = ./.;
|
src = ./.;
|
||||||
}).defaultNix
|
}).defaultNix
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
SUBDIRS = manual
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
||||||
|
|
129
doc/architecture.md
Normal file
129
doc/architecture.md
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
This is a rough overview from informal discussions and explanations of inner workings of Hydra.
|
||||||
|
You can use it as a guide to navigate the codebase or ask questions.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
- Postgres database
|
||||||
|
- configuration
|
||||||
|
- build queue
|
||||||
|
- what is already built
|
||||||
|
- what is going to build
|
||||||
|
- `hydra-server`
|
||||||
|
- Perl, Catalyst
|
||||||
|
- web frontend
|
||||||
|
- `hydra-evaluator`
|
||||||
|
- Perl, C++
|
||||||
|
- fetches repositories
|
||||||
|
- evaluates job sets
|
||||||
|
- pointers to a repository
|
||||||
|
- adds builds to the queue
|
||||||
|
- `hydra-queue-runner`
|
||||||
|
- C++
|
||||||
|
- monitors the queue
|
||||||
|
- executes build steps
|
||||||
|
- uploads build results
|
||||||
|
- copy to a Nix store
|
||||||
|
- Nix store
|
||||||
|
- contains `.drv`s
|
||||||
|
- populated by `hydra-evaluator`
|
||||||
|
- read by `hydra-queue-runner`
|
||||||
|
- destination Nix store
|
||||||
|
- can be a binary cache
|
||||||
|
- e.g. `[cache.nixos.org](http://cache.nixos.org)` or the same store again (for small Hydra instances)
|
||||||
|
- plugin architecture
|
||||||
|
- extend evaluator for new kinds of repositories
|
||||||
|
- e.g. fetch from `git`
|
||||||
|
|
||||||
|
### Database Schema
|
||||||
|
|
||||||
|
[https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql](https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql)
|
||||||
|
|
||||||
|
- `Jobsets`
|
||||||
|
- populated by calling Nix evaluator
|
||||||
|
- every Nix derivation in `release.nix` is a Job
|
||||||
|
- `flake`
|
||||||
|
- URL to flake, if job is from a flake
|
||||||
|
- single-point of configuration for flake builds
|
||||||
|
- flake itself contains pointers to dependencies
|
||||||
|
- for other builds we need more configuration data
|
||||||
|
- `JobsetInputs`
|
||||||
|
- more configuration for a Job
|
||||||
|
- `JobsetInputAlts`
|
||||||
|
- historical, where you could have more than one alternative for each input
|
||||||
|
- it would have done the cross product of all possibilities
|
||||||
|
- not used any more, as now every input is unique
|
||||||
|
- originally that was to have alternative values for the system parameter
|
||||||
|
- `x86-linux`, `x86_64-darwin`
|
||||||
|
- turned out not to be a good idea, as job set names did not uniquely identify output
|
||||||
|
- `Builds`
|
||||||
|
- queue: scheduled and finished builds
|
||||||
|
- instance of a Job
|
||||||
|
- corresponds to a top-level derivation
|
||||||
|
- can have many dependencies that don’t have a corresponding build
|
||||||
|
- dependencies represented as `BuildSteps`
|
||||||
|
- a Job is all the builds with a particular name, e.g.
|
||||||
|
- `git.x86_64-linux` is a job
|
||||||
|
- there maybe be multiple builds for that job
|
||||||
|
- build ID: just an auto-increment number
|
||||||
|
- building one thing can actually cause many (hundreds of) derivations to be built
|
||||||
|
- for queued builds, the `drv` has to be present in the store
|
||||||
|
- otherwise build will fail, e.g. after garbage collection
|
||||||
|
- `BuildSteps`
|
||||||
|
- corresponds to a derivation or substitution
|
||||||
|
- are reused through the Nix store
|
||||||
|
- may be duplicated for unique derivations due to how they relate to `Jobs`
|
||||||
|
- `BuildStepOutputs`
|
||||||
|
- corresponds directly to derivation outputs
|
||||||
|
- `out`, `dev`, ...
|
||||||
|
- `BuildProducts`
|
||||||
|
- not a Nix concept
|
||||||
|
- populated from a special file `$out/nix-support/hydra-build-producs`
|
||||||
|
- used to scrape parts of build results out to the web frontend
|
||||||
|
- e.g. manuals, ISO images, etc.
|
||||||
|
- `BuildMetrics`
|
||||||
|
- scrapes data from magic location, similar to `BuildProducts` to show fancy graphs
|
||||||
|
- e.g. test coverage, build times, CPU utilization for build
|
||||||
|
- `$out/nix-support/hydra-metrics`
|
||||||
|
- `BuildInputs`
|
||||||
|
- probably obsolute
|
||||||
|
- `JobsetEvalMembers`
|
||||||
|
- joins evaluations with jobs
|
||||||
|
- huge table, 10k’s of entries for one `nixpkgs` evaluation
|
||||||
|
- can be imagined as a subset of the eval cache
|
||||||
|
- could in principle use the eval cache
|
||||||
|
|
||||||
|
### `release.nix`
|
||||||
|
|
||||||
|
- hydra-specific convention to describe the build
|
||||||
|
- should evaluate to an attribute set that contains derivations
|
||||||
|
- hydra considers every attribute in that set a job
|
||||||
|
- every job needs a unique name
|
||||||
|
- if you want to build for multiple platforms, you need to reflect that in the name
|
||||||
|
- hydra does a deep traversal of the attribute set
|
||||||
|
- just evaluating the names may take half an hour
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
Can we imagine Hydra to be a persistence layer for the build graph?
|
||||||
|
|
||||||
|
- partially, it lacks a lot of information
|
||||||
|
- does not keep edges of the build graph
|
||||||
|
|
||||||
|
How does Hydra relate to `nix build`?
|
||||||
|
|
||||||
|
- reimplements the top level Nix build loop, scheduling, etc.
|
||||||
|
- Hydra has to persist build results
|
||||||
|
- Hydra has more sophisticated remote build execution and scheduling than Nix
|
||||||
|
|
||||||
|
Is it conceptually possible to unify Hydra’s capabilities with regular Nix?
|
||||||
|
|
||||||
|
- Nix does not have any scheduling, it just traverses the build graph
|
||||||
|
- Hydra has scheduling in terms of job set priorities, tracks how much of a job set it has worked on
|
||||||
|
- makes sure jobs don’t starve each other
|
||||||
|
- Nix cannot dynamically add build jobs at runtime
|
||||||
|
- [RFC 92](https://github.com/NixOS/rfcs/blob/master/rfcs/0092-plan-dynamism.md) should enable that
|
||||||
|
- internally it is already possible, but there is no interface to do that
|
||||||
|
- Hydra queue runner is a long running process
|
||||||
|
- Nix takes a static set of jobs, working it off at once
|
@@ -1,6 +0,0 @@
|
|||||||
MD_FILES = src/*.md
|
|
||||||
|
|
||||||
EXTRA_DIST = $(MD_FILES)
|
|
||||||
|
|
||||||
install: $(MD_FILES)
|
|
||||||
mdbook build . -d $(docdir)
|
|
36
doc/manual/meson.build
Normal file
36
doc/manual/meson.build
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
srcs = files(
|
||||||
|
'src/SUMMARY.md',
|
||||||
|
'src/about.md',
|
||||||
|
'src/api.md',
|
||||||
|
'src/configuration.md',
|
||||||
|
'src/hacking.md',
|
||||||
|
'src/installation.md',
|
||||||
|
'src/introduction.md',
|
||||||
|
'src/jobs.md',
|
||||||
|
'src/monitoring/README.md',
|
||||||
|
'src/notifications.md',
|
||||||
|
'src/plugins/README.md',
|
||||||
|
'src/plugins/RunCommand.md',
|
||||||
|
'src/plugins/declarative-projects.md',
|
||||||
|
'src/projects.md',
|
||||||
|
'src/webhooks.md',
|
||||||
|
)
|
||||||
|
|
||||||
|
manual = custom_target(
|
||||||
|
'manual',
|
||||||
|
command: [
|
||||||
|
mdbook,
|
||||||
|
'build',
|
||||||
|
'@SOURCE_ROOT@/doc/manual',
|
||||||
|
'-d', meson.current_build_dir() / 'html'
|
||||||
|
],
|
||||||
|
depend_files: srcs,
|
||||||
|
output: ['html'],
|
||||||
|
build_by_default: true,
|
||||||
|
)
|
||||||
|
|
||||||
|
install_subdir(
|
||||||
|
manual.full_path(),
|
||||||
|
install_dir: get_option('datadir') / 'doc/hydra',
|
||||||
|
strip_directory: true,
|
||||||
|
)
|
@@ -7,6 +7,7 @@
|
|||||||
- [Hydra jobs](./jobs.md)
|
- [Hydra jobs](./jobs.md)
|
||||||
- [Plugins](./plugins/README.md)
|
- [Plugins](./plugins/README.md)
|
||||||
- [Declarative Projects](./plugins/declarative-projects.md)
|
- [Declarative Projects](./plugins/declarative-projects.md)
|
||||||
|
- [RunCommand](./plugins/RunCommand.md)
|
||||||
- [Using the external API](api.md)
|
- [Using the external API](api.md)
|
||||||
- [Webhooks](webhooks.md)
|
- [Webhooks](webhooks.md)
|
||||||
- [Monitoring Hydra](./monitoring/README.md)
|
- [Monitoring Hydra](./monitoring/README.md)
|
||||||
|
@@ -51,10 +51,12 @@ base_uri example.com
|
|||||||
`base_uri` should be your hydra servers proxied URL. If you are using
|
`base_uri` should be your hydra servers proxied URL. If you are using
|
||||||
Hydra nixos module then setting `hydraURL` option should be enough.
|
Hydra nixos module then setting `hydraURL` option should be enough.
|
||||||
|
|
||||||
If you want to serve Hydra with a prefix path, for example
|
You also need to configure your reverse proxy to pass `X-Request-Base`
|
||||||
[http://example.com/hydra]() then you need to configure your reverse
|
to hydra, with the same value as `base_uri`.
|
||||||
proxy to pass `X-Request-Base` to hydra, with prefix path as value. For
|
This also covers the case of serving Hydra with a prefix path,
|
||||||
example if you are using nginx, then use configuration similar to
|
as in [http://example.com/hydra]().
|
||||||
|
|
||||||
|
For example if you are using nginx, then use configuration similar to
|
||||||
following:
|
following:
|
||||||
|
|
||||||
server {
|
server {
|
||||||
@@ -63,8 +65,7 @@ following:
|
|||||||
.. other configuration ..
|
.. other configuration ..
|
||||||
location /hydra/ {
|
location /hydra/ {
|
||||||
|
|
||||||
proxy_pass http://127.0.0.1:3000;
|
proxy_pass http://127.0.0.1:3000/;
|
||||||
proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
|
|
||||||
|
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
@@ -74,6 +75,33 @@ following:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
|
||||||
|
strip off the `/hydra/` part of the URL before passing it to hydra.
|
||||||
|
|
||||||
|
Populating a Cache
|
||||||
|
------------------
|
||||||
|
|
||||||
|
A common use for Hydra is to pre-build and cache derivations which
|
||||||
|
take a long time to build. While it is possible to direcly access the
|
||||||
|
Hydra server's store over SSH, a more scalable option is to upload
|
||||||
|
built derivations to a remote store like an [S3-compatible object
|
||||||
|
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||||
|
the `store_uri` parameter will cause Hydra to sign and upload
|
||||||
|
derivations as they are built:
|
||||||
|
|
||||||
|
```
|
||||||
|
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||||
|
```
|
||||||
|
|
||||||
|
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||||
|
compression on derivations to reduce CPU usage on the server, but
|
||||||
|
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||||
|
build logs because it has better browser support.
|
||||||
|
|
||||||
|
See [`nix help
|
||||||
|
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||||
|
for a description of the store URI format.
|
||||||
|
|
||||||
Statsd Configuration
|
Statsd Configuration
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
@@ -102,53 +130,141 @@ in the hydra configuration file, as below:
|
|||||||
</hydra_notify>
|
</hydra_notify>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
hydra-queue-runner's Prometheus service
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
hydra-queue-runner supports running a Prometheus webserver for metrics. The
|
||||||
|
exporter's address defaults to exposing on `127.0.0.1:9198`, but is also
|
||||||
|
configurable through the hydra configuration file and a command line argument,
|
||||||
|
as below. A port of `:0` will make the exposer choose a random, available port.
|
||||||
|
|
||||||
|
```conf
|
||||||
|
queue_runner_metrics_address = 127.0.0.1:9198
|
||||||
|
# or
|
||||||
|
queue_runner_metrics_address = [::]:9198
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ hydra-queue-runner --prometheus-address 127.0.0.1:9198
|
||||||
|
# or
|
||||||
|
$ hydra-queue-runner --prometheus-address [::]:9198
|
||||||
|
```
|
||||||
|
|
||||||
Using LDAP as authentication backend (optional)
|
Using LDAP as authentication backend (optional)
|
||||||
-----------------------------------------------
|
-----------------------------------------------
|
||||||
|
|
||||||
Instead of using Hydra\'s built-in user management you can optionally
|
Instead of using Hydra's built-in user management you can optionally
|
||||||
use LDAP to manage roles and users.
|
use LDAP to manage roles and users.
|
||||||
|
|
||||||
The `hydra-server` accepts the environment variable
|
This is configured by defining the `<ldap>` block in the configuration file.
|
||||||
*HYDRA\_LDAP\_CONFIG*. The value of the variable should point to a valid
|
In this block it's possible to configure the authentication plugin in the
|
||||||
YAML file containing the Catalyst LDAP configuration. The format of the
|
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||||
configuration file is describe in the
|
The documentation for the available settings can be found
|
||||||
[*Catalyst::Authentication::Store::LDAP*
|
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||||
documentation](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
|
||||||
An example is given below.
|
|
||||||
|
|
||||||
Roles can be assigned to users based on their LDAP group membership
|
Note that the bind password (if needed) should be supplied as an included file to
|
||||||
(*use\_roles: 1* in the below example). For a user to have the role
|
prevent it from leaking to the Nix store.
|
||||||
*admin* assigned to them they should be in the group *hydra\_admin*. In
|
|
||||||
general any LDAP group of the form *hydra\_some\_role* (notice the
|
|
||||||
*hydra\_* prefix) will work.
|
|
||||||
|
|
||||||
credential:
|
Roles can be assigned to users based on their LDAP group membership. For this
|
||||||
class: Password
|
to work *use\_roles = 1* needs to be defined for the authentication plugin.
|
||||||
password_field: password
|
LDAP groups can then be mapped to Hydra roles using the `<role_mapping>` block.
|
||||||
password_type: self_check
|
|
||||||
store:
|
Example configuration:
|
||||||
class: LDAP
|
```
|
||||||
ldap_server: localhost
|
<ldap>
|
||||||
ldap_server_options.timeout: 30
|
<config>
|
||||||
binddn: "cn=root,dc=example"
|
<credential>
|
||||||
bindpw: notapassword
|
class = Password
|
||||||
start_tls: 0
|
password_field = password
|
||||||
start_tls_options:
|
password_type = self_check
|
||||||
verify: none
|
</credential>
|
||||||
user_basedn: "ou=users,dc=example"
|
<store>
|
||||||
user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))"
|
class = LDAP
|
||||||
user_scope: one
|
ldap_server = localhost
|
||||||
user_field: cn
|
<ldap_server_options>
|
||||||
user_search_options:
|
timeout = 30
|
||||||
deref: always
|
</ldap_server_options>
|
||||||
use_roles: 1
|
binddn = "cn=root,dc=example"
|
||||||
role_basedn: "ou=groups,dc=example"
|
include ldap-password.conf
|
||||||
role_filter: "(&(objectClass=groupOfNames)(member=%s))"
|
start_tls = 0
|
||||||
role_scope: one
|
<start_tls_options>
|
||||||
role_field: cn
|
verify = none
|
||||||
role_value: dn
|
</start_tls_options>
|
||||||
role_search_options:
|
user_basedn = "ou=users,dc=example"
|
||||||
deref: always
|
user_filter = "(&(objectClass=inetOrgPerson)(cn=%s))"
|
||||||
|
user_scope = one
|
||||||
|
user_field = cn
|
||||||
|
<user_search_options>
|
||||||
|
deref = always
|
||||||
|
</user_search_options>
|
||||||
|
# Important for role mappings to work:
|
||||||
|
use_roles = 1
|
||||||
|
role_basedn = "ou=groups,dc=example"
|
||||||
|
role_filter = "(&(objectClass=groupOfNames)(member=%s))"
|
||||||
|
role_scope = one
|
||||||
|
role_field = cn
|
||||||
|
role_value = dn
|
||||||
|
<role_search_options>
|
||||||
|
deref = always
|
||||||
|
</role_search_options>
|
||||||
|
</store>
|
||||||
|
</config>
|
||||||
|
<role_mapping>
|
||||||
|
# Make all users in the hydra_admin group Hydra admins
|
||||||
|
hydra_admin = admin
|
||||||
|
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||||
|
dev = eval-jobset
|
||||||
|
dev = restart-jobs
|
||||||
|
dev = cancel-build
|
||||||
|
</role_mapping>
|
||||||
|
</ldap>
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, place the password to your LDAP server in `/var/lib/hydra/ldap-password.conf`:
|
||||||
|
|
||||||
|
```
|
||||||
|
bindpw = the-ldap-password
|
||||||
|
```
|
||||||
|
|
||||||
|
### Debugging LDAP
|
||||||
|
|
||||||
|
Set the `debug` parameter under `ldap.config.ldap_server_options.debug`:
|
||||||
|
|
||||||
|
```
|
||||||
|
<ldap>
|
||||||
|
<config>
|
||||||
|
<store>
|
||||||
|
<ldap_server_options>
|
||||||
|
debug = 2
|
||||||
|
</ldap_server_options>
|
||||||
|
</store>
|
||||||
|
</config>
|
||||||
|
</ldap>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Legacy LDAP Configuration
|
||||||
|
|
||||||
|
Hydra used to load the LDAP configuration from a YAML file in the
|
||||||
|
`HYDRA_LDAP_CONFIG` environment variable. This behavior is deperecated
|
||||||
|
and will be removed.
|
||||||
|
|
||||||
|
When Hydra uses the deprecated YAML file, Hydra applies the following
|
||||||
|
default role mapping:
|
||||||
|
|
||||||
|
```
|
||||||
|
<ldap>
|
||||||
|
<role_mapping>
|
||||||
|
hydra_admin = admin
|
||||||
|
hydra_bump-to-front = bump-to-front
|
||||||
|
hydra_cancel-build = cancel-build
|
||||||
|
hydra_create-projects = create-projects
|
||||||
|
hydra_restart-jobs = restart-jobs
|
||||||
|
</role_mapping>
|
||||||
|
</ldap>
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that configuring both the LDAP parameters in the hydra.conf and via
|
||||||
|
the environment variable is a fatal error.
|
||||||
|
|
||||||
Embedding Extra HTML
|
Embedding Extra HTML
|
||||||
--------------------
|
--------------------
|
||||||
|
@@ -12,24 +12,26 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
|
|||||||
and dependencies can be found:
|
and dependencies can be found:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
```
|
```
|
||||||
|
|
||||||
To build Hydra, you should then do:
|
To build Hydra, you should then do:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ ./bootstrap
|
$ mesonConfigurePhase
|
||||||
[nix-shell]$ configurePhase
|
$ ninja
|
||||||
[nix-shell]$ make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You start a local database, the webserver, and other components with
|
You start a local database, the webserver, and other components with
|
||||||
foreman:
|
foreman:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
|
$ ninja -C build
|
||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||||
|
|
||||||
You can run just the Hydra web server in your source tree as follows:
|
You can run just the Hydra web server in your source tree as follows:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
@@ -39,18 +41,11 @@ $ ./src/script/hydra-server
|
|||||||
You can run Hydra's test suite with the following:
|
You can run Hydra's test suite with the following:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ make check
|
$ meson test
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
# to run as many tests as you have cores:
|
||||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||||
[nix-shell]$ # or run yath directly:
|
|
||||||
[nix-shell]$ yath test
|
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
|
||||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
|
||||||
```
|
```
|
||||||
|
|
||||||
When using `yath` instead of `make check`, ensure you have run `make`
|
|
||||||
in the root of the repository at least once.
|
|
||||||
|
|
||||||
**Warning**: Currently, the tests can fail
|
**Warning**: Currently, the tests can fail
|
||||||
if run with high parallelism [due to an issue in
|
if run with high parallelism [due to an issue in
|
||||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||||
@@ -67,7 +62,7 @@ will reload the page every time you save.
|
|||||||
To build Hydra and its dependencies:
|
To build Hydra and its dependencies:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-build release.nix -A build.x86_64-linux
|
$ nix build .#packages.x86_64-linux.default
|
||||||
```
|
```
|
||||||
|
|
||||||
## Development Tasks
|
## Development Tasks
|
||||||
@@ -92,7 +87,7 @@ On NixOS:
|
|||||||
|
|
||||||
```nix
|
```nix
|
||||||
{
|
{
|
||||||
nix.trustedUsers = [ "YOURUSER" ];
|
nix.settings.trusted-users = [ "YOURUSER" ];
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@@ -48,7 +48,7 @@ Getting Nix
|
|||||||
If your server runs NixOS you are all set to continue with installation
|
If your server runs NixOS you are all set to continue with installation
|
||||||
of Hydra. Otherwise you first need to install Nix. The latest stable
|
of Hydra. Otherwise you first need to install Nix. The latest stable
|
||||||
version can be found one [the Nix web
|
version can be found one [the Nix web
|
||||||
site](http://nixos.org/nix/download.html), along with a manual, which
|
site](https://nixos.org/download/), along with a manual, which
|
||||||
includes installation instructions.
|
includes installation instructions.
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
|
@@ -42,7 +42,7 @@ Sets CircleCI status.
|
|||||||
|
|
||||||
## Compress build logs
|
## Compress build logs
|
||||||
|
|
||||||
Compresses build logs after a build with bzip2.
|
Compresses build logs after a build with bzip2 or zstd.
|
||||||
|
|
||||||
### Configuration options
|
### Configuration options
|
||||||
|
|
||||||
@@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2.
|
|||||||
|
|
||||||
Enable log compression
|
Enable log compression
|
||||||
|
|
||||||
|
- `compress_build_logs_compression`
|
||||||
|
|
||||||
|
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||||
|
|
||||||
|
- `compress_build_logs_silent`
|
||||||
|
|
||||||
|
Whether to compress logs silently.
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
@@ -172,17 +180,6 @@ Sets Gitlab CI status.
|
|||||||
|
|
||||||
- `gitlab_authorization.<projectId>`
|
- `gitlab_authorization.<projectId>`
|
||||||
|
|
||||||
## HipChat notification
|
|
||||||
|
|
||||||
Sends hipchat chat notifications when a build finish.
|
|
||||||
|
|
||||||
### Configuration options
|
|
||||||
|
|
||||||
- `hipchat.[].jobs`
|
|
||||||
- `hipchat.[].builds`
|
|
||||||
- `hipchat.[].token`
|
|
||||||
- `hipchat.[].notify`
|
|
||||||
|
|
||||||
## InfluxDB notification
|
## InfluxDB notification
|
||||||
|
|
||||||
Writes InfluxDB events when a builds finished.
|
Writes InfluxDB events when a builds finished.
|
||||||
@@ -192,10 +189,12 @@ Writes InfluxDB events when a builds finished.
|
|||||||
- `influxdb.url`
|
- `influxdb.url`
|
||||||
- `influxdb.db`
|
- `influxdb.db`
|
||||||
|
|
||||||
## Run command
|
## RunCommand
|
||||||
|
|
||||||
Runs a shell command when the build is finished.
|
Runs a shell command when the build is finished.
|
||||||
|
|
||||||
|
See [The RunCommand Plugin](./RunCommand.md) for more information.
|
||||||
|
|
||||||
### Configuration options:
|
### Configuration options:
|
||||||
|
|
||||||
- `runcommand.[].job`
|
- `runcommand.[].job`
|
||||||
|
83
doc/manual/src/plugins/RunCommand.md
Normal file
83
doc/manual/src/plugins/RunCommand.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
## The RunCommand Plugin
|
||||||
|
|
||||||
|
Hydra supports executing a program after certain builds finish.
|
||||||
|
This behavior is disabled by default.
|
||||||
|
|
||||||
|
Hydra executes these commands under the `hydra-notify` service.
|
||||||
|
|
||||||
|
### Static Commands
|
||||||
|
|
||||||
|
Configure specific commands to execute after the specified matching job finishes.
|
||||||
|
|
||||||
|
#### Configuration
|
||||||
|
|
||||||
|
- `runcommand.[].job`
|
||||||
|
|
||||||
|
A matcher for jobs to match in the format `project:jobset:job`. Defaults to `*:*:*`.
|
||||||
|
|
||||||
|
**Note:** This matcher format is not a regular expression.
|
||||||
|
The `*` is a wildcard for that entire section, partial matches are not supported.
|
||||||
|
|
||||||
|
- `runcommand.[].command`
|
||||||
|
|
||||||
|
Command to run. Can use the `$HYDRA_JSON` environment variable to access information about the build.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<runcommand>
|
||||||
|
job = myProject:*:*
|
||||||
|
command = cat $HYDRA_JSON > /tmp/hydra-output
|
||||||
|
</runcommand>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dynamic Commands
|
||||||
|
|
||||||
|
Hydra can optionally run RunCommand hooks defined dynamically by the jobset. In
|
||||||
|
order to enable dynamic commands, you must enable this feature in your
|
||||||
|
`hydra.conf`, *as well as* in the parent project and jobset configuration.
|
||||||
|
|
||||||
|
#### Behavior
|
||||||
|
|
||||||
|
Hydra will execute any program defined under the `runCommandHook` attribute set. These jobs must have a single output named `out`, and that output must be an executable file located directly at `$out`.
|
||||||
|
|
||||||
|
#### Security Properties
|
||||||
|
|
||||||
|
Safely deploying dynamic commands requires careful design of your Hydra jobs. Allowing arbitrary users to define attributes in your top level attribute set will allow that user to execute code on your Hydra.
|
||||||
|
|
||||||
|
If a jobset has dynamic commands enabled, you must ensure only trusted users can define top level attributes.
|
||||||
|
|
||||||
|
|
||||||
|
#### Configuration
|
||||||
|
|
||||||
|
- `dynamicruncommand.enable`
|
||||||
|
|
||||||
|
Set to 1 to enable dynamic RunCommand program execution.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
In your Hydra configuration, specify:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<dynamicruncommand>
|
||||||
|
enable = 1
|
||||||
|
</dynamicruncommand>
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create a job named `runCommandHook.example` in your jobset:
|
||||||
|
|
||||||
|
```
|
||||||
|
{ pkgs, ... }: {
|
||||||
|
runCommandHook = {
|
||||||
|
recurseForDerivations = true;
|
||||||
|
|
||||||
|
example = pkgs.writeScript "run-me" ''
|
||||||
|
#!${pkgs.runtimeShell}
|
||||||
|
|
||||||
|
${pkgs.jq}/bin/jq . "$HYDRA_JSON"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
After the `runcommandHook.example` build finishes that script will execute.
|
@@ -34,6 +34,7 @@ To configure a static declarative project, take the following steps:
|
|||||||
"checkinterval": 300,
|
"checkinterval": 300,
|
||||||
"schedulingshares": 100,
|
"schedulingshares": 100,
|
||||||
"enableemail": false,
|
"enableemail": false,
|
||||||
|
"enable_dynamic_run_command": false,
|
||||||
"emailoverride": "",
|
"emailoverride": "",
|
||||||
"keepnr": 3,
|
"keepnr": 3,
|
||||||
"inputs": {
|
"inputs": {
|
||||||
@@ -53,6 +54,7 @@ To configure a static declarative project, take the following steps:
|
|||||||
"checkinterval": 300,
|
"checkinterval": 300,
|
||||||
"schedulingshares": 100,
|
"schedulingshares": 100,
|
||||||
"enableemail": false,
|
"enableemail": false,
|
||||||
|
"enable_dynamic_run_command": false,
|
||||||
"emailoverride": "",
|
"emailoverride": "",
|
||||||
"keepnr": 3,
|
"keepnr": 3,
|
||||||
"inputs": {
|
"inputs": {
|
||||||
@@ -92,6 +94,7 @@ containing the configuration of the jobset, for example:
|
|||||||
"checkinterval": 300,
|
"checkinterval": 300,
|
||||||
"schedulingshares": 100,
|
"schedulingshares": 100,
|
||||||
"enableemail": false,
|
"enableemail": false,
|
||||||
|
"enable_dynamic_run_command": false,
|
||||||
"emailoverride": "",
|
"emailoverride": "",
|
||||||
"keepnr": 3,
|
"keepnr": 3,
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
@@ -378,13 +378,18 @@ This section describes how it can be implemented for `gitea`, but the approach f
|
|||||||
analogous:
|
analogous:
|
||||||
|
|
||||||
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
|
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
|
||||||
* Add it to your `hydra.conf` like this:
|
* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information
|
||||||
|
```
|
||||||
|
<gitea_authorization>
|
||||||
|
your_username=your_token
|
||||||
|
</gitea_authorization>
|
||||||
|
```
|
||||||
|
|
||||||
|
* Include the file in your `hydra.conf` like this:
|
||||||
``` nix
|
``` nix
|
||||||
{
|
{
|
||||||
services.hydra-dev.extraConfig = ''
|
services.hydra-dev.extraConfig = ''
|
||||||
<gitea_authorization>
|
Include /path/to/secret/file
|
||||||
your_username=your_token
|
|
||||||
</gitea_authorization>
|
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -399,3 +404,10 @@ analogous:
|
|||||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||||
|
|
||||||
|
Content-addressed derivations
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||||
|
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||||
|
|
||||||
|
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||||
|
@@ -1,9 +1,12 @@
|
|||||||
# Webhooks
|
# Webhooks
|
||||||
|
|
||||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||||
jobset has a github repo in its input.
|
jobset has a github repo in its input.
|
||||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
|
||||||
click on `Add webhook`.
|
## GitHub
|
||||||
|
|
||||||
|
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||||
|
and in the `Webhooks` tab click on `Add webhook`.
|
||||||
|
|
||||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||||
- In `Content type` switch to `application/json`.
|
- In `Content type` switch to `application/json`.
|
||||||
@@ -11,3 +14,14 @@ click on `Add webhook`.
|
|||||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||||
|
|
||||||
Then add the hook with `Add webhook`.
|
Then add the hook with `Add webhook`.
|
||||||
|
|
||||||
|
## Gitea
|
||||||
|
|
||||||
|
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||||
|
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||||
|
|
||||||
|
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||||
|
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||||
|
- Change the branch filter to match the git branch hydra builds.
|
||||||
|
|
||||||
|
Then add the hook with `Add webhook`.
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# jobset example file. This file canbe referenced as Nix expression
|
# jobset example file. This file can be referenced as Nix expression
|
||||||
# in a jobset configuration along with inputs for nixpkgs and the
|
# in a jobset configuration along with inputs for nixpkgs and the
|
||||||
# repository containing this file.
|
# repository containing this file.
|
||||||
#
|
#
|
||||||
|
59
flake.lock
generated
59
flake.lock
generated
@@ -1,62 +1,59 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"lowdown-src": {
|
"nix": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1617481909,
|
"lastModified": 1750777360,
|
||||||
"narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=",
|
"narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=",
|
||||||
"owner": "kristapsdz",
|
"owner": "NixOS",
|
||||||
"repo": "lowdown",
|
"repo": "nix",
|
||||||
"rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d",
|
"rev": "7bb200199705eddd53cb34660a76567c6f1295d9",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "kristapsdz",
|
"owner": "NixOS",
|
||||||
"ref": "VERSION_0_8_4",
|
"ref": "2.29-maintenance",
|
||||||
"repo": "lowdown",
|
"repo": "nix",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nix": {
|
"nix-eval-jobs": {
|
||||||
"inputs": {
|
"flake": false,
|
||||||
"lowdown-src": "lowdown-src",
|
|
||||||
"nixpkgs": "nixpkgs"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1628586117,
|
"lastModified": 1748680938,
|
||||||
"narHash": "sha256-8hS4xy7fq3z9XZIMYm4sQi9SzhcYqEJfdbwgDePoWuc=",
|
"narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=",
|
||||||
"owner": "NixOS",
|
"owner": "nix-community",
|
||||||
"repo": "nix",
|
"repo": "nix-eval-jobs",
|
||||||
"rev": "a6ba313a0aac3b6e2fef434cb42d190a0849238e",
|
"rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"id": "nix",
|
"owner": "nix-community",
|
||||||
"type": "indirect"
|
"repo": "nix-eval-jobs",
|
||||||
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1624862269,
|
"lastModified": 1750736827,
|
||||||
"narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=",
|
"narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "f77036342e2b690c61c97202bf48f2ce13acc022",
|
"rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"id": "nixpkgs",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-21.05-small",
|
"ref": "nixos-25.05-small",
|
||||||
"type": "indirect"
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nix": "nix",
|
"nix": "nix",
|
||||||
"nixpkgs": [
|
"nix-eval-jobs": "nix-eval-jobs",
|
||||||
"nix",
|
"nixpkgs": "nixpkgs"
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@@ -70,7 +70,7 @@ paths:
|
|||||||
$ref: '#/components/examples/projects-success'
|
$ref: '#/components/examples/projects-success'
|
||||||
|
|
||||||
/api/push:
|
/api/push:
|
||||||
put:
|
post:
|
||||||
summary: trigger jobsets
|
summary: trigger jobsets
|
||||||
parameters:
|
parameters:
|
||||||
- in: query
|
- in: query
|
||||||
@@ -178,6 +178,9 @@ paths:
|
|||||||
enabled:
|
enabled:
|
||||||
description: when set to true the project gets scheduled for evaluation
|
description: when set to true the project gets scheduled for evaluation
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_dynamic_run_command:
|
||||||
|
description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||||
|
type: boolean
|
||||||
visible:
|
visible:
|
||||||
description: when set to true the project is displayed in the web interface
|
description: when set to true the project is displayed in the web interface
|
||||||
type: boolean
|
type: boolean
|
||||||
@@ -530,13 +533,13 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/Error'
|
$ref: '#/components/schemas/Error'
|
||||||
|
|
||||||
/eval/{build-id}:
|
/eval/{eval-id}:
|
||||||
get:
|
get:
|
||||||
summary: Retrieves evaluations identified by build id
|
summary: Retrieves evaluations identified by eval id
|
||||||
parameters:
|
parameters:
|
||||||
- name: build-id
|
- name: eval-id
|
||||||
in: path
|
in: path
|
||||||
description: build identifier
|
description: eval identifier
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: integer
|
type: integer
|
||||||
@@ -548,6 +551,24 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/JobsetEval'
|
$ref: '#/components/schemas/JobsetEval'
|
||||||
|
|
||||||
|
/eval/{eval-id}/builds:
|
||||||
|
get:
|
||||||
|
summary: Retrieves all builds belonging to an evaluation identified by eval id
|
||||||
|
parameters:
|
||||||
|
- name: eval-id
|
||||||
|
in: path
|
||||||
|
description: eval identifier
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: integer
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: builds
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/JobsetEvalBuilds'
|
||||||
|
|
||||||
components:
|
components:
|
||||||
schemas:
|
schemas:
|
||||||
|
|
||||||
@@ -607,6 +628,9 @@ components:
|
|||||||
enabled:
|
enabled:
|
||||||
description: when set to true the project gets scheduled for evaluation
|
description: when set to true the project gets scheduled for evaluation
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_dynamic_run_command:
|
||||||
|
description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||||
|
type: boolean
|
||||||
declarative:
|
declarative:
|
||||||
description: declarative input configured for this project
|
description: declarative input configured for this project
|
||||||
type: object
|
type: object
|
||||||
@@ -689,6 +713,9 @@ components:
|
|||||||
enableemail:
|
enableemail:
|
||||||
description: when true the jobset sends emails when previously-successful builds fail
|
description: when true the jobset sends emails when previously-successful builds fail
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enable_dynamic_run_command:
|
||||||
|
description: when true the jobset supports executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||||
|
type: boolean
|
||||||
visible:
|
visible:
|
||||||
description: when true the jobset is visible in the web frontend
|
description: when true the jobset is visible in the web frontend
|
||||||
type: boolean
|
type: boolean
|
||||||
@@ -787,6 +814,13 @@ components:
|
|||||||
additionalProperties:
|
additionalProperties:
|
||||||
$ref: '#/components/schemas/JobsetEvalInput'
|
$ref: '#/components/schemas/JobsetEvalInput'
|
||||||
|
|
||||||
|
JobsetEvalBuilds:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
$ref: '#/components/schemas/Build'
|
||||||
|
|
||||||
JobsetOverview:
|
JobsetOverview:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
@@ -861,7 +895,7 @@ components:
|
|||||||
description: Size of the produced file
|
description: Size of the produced file
|
||||||
type: integer
|
type: integer
|
||||||
defaultpath:
|
defaultpath:
|
||||||
description: This is a Git/Mercurial commit hash or a Subversion revision number
|
description: if path is a directory, the default file relative to path to be served
|
||||||
type: string
|
type: string
|
||||||
'type':
|
'type':
|
||||||
description: Types of build product (user defined)
|
description: Types of build product (user defined)
|
||||||
|
26
meson.build
Normal file
26
meson.build
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
project('hydra', 'cpp',
|
||||||
|
version: files('version.txt'),
|
||||||
|
license: 'GPL-3.0',
|
||||||
|
default_options: [
|
||||||
|
'debug=true',
|
||||||
|
'optimization=2',
|
||||||
|
'cpp_std=c++20',
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
nix_util_dep = dependency('nix-util', required: true)
|
||||||
|
nix_store_dep = dependency('nix-store', required: true)
|
||||||
|
nix_main_dep = dependency('nix-main', required: true)
|
||||||
|
|
||||||
|
pqxx_dep = dependency('libpqxx', required: true)
|
||||||
|
|
||||||
|
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||||
|
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||||
|
|
||||||
|
mdbook = find_program('mdbook', native: true)
|
||||||
|
perl = find_program('perl', native: true)
|
||||||
|
|
||||||
|
subdir('doc/manual')
|
||||||
|
subdir('nixos-modules')
|
||||||
|
subdir('src')
|
||||||
|
subdir('t')
|
47
nixos-modules/default.nix
Normal file
47
nixos-modules/default.nix
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
{ self }:
|
||||||
|
|
||||||
|
{
|
||||||
|
hydra = { pkgs, lib,... }: {
|
||||||
|
_file = ./default.nix;
|
||||||
|
imports = [ ./hydra.nix ];
|
||||||
|
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||||
|
};
|
||||||
|
|
||||||
|
hydraTest = { pkgs, ... }: {
|
||||||
|
services.hydra-dev.enable = true;
|
||||||
|
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||||
|
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||||
|
|
||||||
|
systemd.services.hydra-send-stats.enable = false;
|
||||||
|
|
||||||
|
services.postgresql.enable = true;
|
||||||
|
|
||||||
|
# The following is to work around the following error from hydra-server:
|
||||||
|
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
|
||||||
|
nix.extraOptions = ''
|
||||||
|
allowed-uris = https://github.com/
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
hydraProxy = {
|
||||||
|
services.httpd = {
|
||||||
|
enable = true;
|
||||||
|
adminAddr = "hydra-admin@example.org";
|
||||||
|
extraConfig = ''
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
|
||||||
|
ProxyRequests Off
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass /apache-errors !
|
||||||
|
ErrorDocument 503 /apache-errors/503.html
|
||||||
|
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||||
|
ProxyPassReverse / http://127.0.0.1:3000/
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
@@ -68,7 +68,6 @@ in
|
|||||||
|
|
||||||
package = mkOption {
|
package = mkOption {
|
||||||
type = types.path;
|
type = types.path;
|
||||||
default = pkgs.hydra;
|
|
||||||
description = "The Hydra package.";
|
description = "The Hydra package.";
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -171,6 +170,7 @@ in
|
|||||||
buildMachinesFiles = mkOption {
|
buildMachinesFiles = mkOption {
|
||||||
type = types.listOf types.path;
|
type = types.listOf types.path;
|
||||||
default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
|
default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
|
||||||
|
defaultText = literalExpression ''optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
|
||||||
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
|
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
|
||||||
description = "List of files containing build machines.";
|
description = "List of files containing build machines.";
|
||||||
};
|
};
|
||||||
@@ -226,7 +226,11 @@ in
|
|||||||
useDefaultShell = true;
|
useDefaultShell = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
nix.trustedUsers = [ "hydra-queue-runner" ];
|
nix.settings = {
|
||||||
|
trusted-users = [ "hydra-queue-runner" ];
|
||||||
|
keep-outputs = true;
|
||||||
|
keep-derivations = true;
|
||||||
|
};
|
||||||
|
|
||||||
services.hydra-dev.extraConfig =
|
services.hydra-dev.extraConfig =
|
||||||
''
|
''
|
||||||
@@ -254,11 +258,6 @@ in
|
|||||||
|
|
||||||
environment.variables = hydraEnv;
|
environment.variables = hydraEnv;
|
||||||
|
|
||||||
nix.extraOptions = ''
|
|
||||||
gc-keep-outputs = true
|
|
||||||
gc-keep-derivations = true
|
|
||||||
'';
|
|
||||||
|
|
||||||
systemd.services.hydra-init =
|
systemd.services.hydra-init =
|
||||||
{ wantedBy = [ "multi-user.target" ];
|
{ wantedBy = [ "multi-user.target" ];
|
||||||
requires = optional haveLocalDB "postgresql.service";
|
requires = optional haveLocalDB "postgresql.service";
|
||||||
@@ -266,17 +265,17 @@ in
|
|||||||
environment = env // {
|
environment = env // {
|
||||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||||
};
|
};
|
||||||
path = [ pkgs.utillinux ];
|
path = [ pkgs.util-linux ];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||||
|
|
||||||
mkdir -m 0700 -p ${baseDir}/www
|
mkdir -m 0700 -p ${baseDir}/www
|
||||||
chown hydra-www.hydra ${baseDir}/www
|
chown hydra-www:hydra ${baseDir}/www
|
||||||
|
|
||||||
mkdir -m 0700 -p ${baseDir}/queue-runner
|
mkdir -m 0700 -p ${baseDir}/queue-runner
|
||||||
mkdir -m 0750 -p ${baseDir}/build-logs
|
mkdir -m 0750 -p ${baseDir}/build-logs
|
||||||
mkdir -m 0750 -p ${baseDir}/runcommand-logs
|
mkdir -m 0750 -p ${baseDir}/runcommand-logs
|
||||||
chown hydra-queue-runner.hydra \
|
chown hydra-queue-runner:hydra \
|
||||||
${baseDir}/queue-runner \
|
${baseDir}/queue-runner \
|
||||||
${baseDir}/build-logs \
|
${baseDir}/build-logs \
|
||||||
${baseDir}/runcommand-logs
|
${baseDir}/runcommand-logs
|
||||||
@@ -307,7 +306,7 @@ in
|
|||||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chown hydra.hydra ${cfg.gcRootsDir}
|
chown hydra:hydra ${cfg.gcRootsDir}
|
||||||
chmod 2775 ${cfg.gcRootsDir}
|
chmod 2775 ${cfg.gcRootsDir}
|
||||||
'';
|
'';
|
||||||
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
|
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
|
||||||
@@ -339,7 +338,8 @@ in
|
|||||||
systemd.services.hydra-queue-runner =
|
systemd.services.hydra-queue-runner =
|
||||||
{ wantedBy = [ "multi-user.target" ];
|
{ wantedBy = [ "multi-user.target" ];
|
||||||
requires = [ "hydra-init.service" ];
|
requires = [ "hydra-init.service" ];
|
||||||
after = [ "hydra-init.service" "network.target" ];
|
wants = [ "network-online.target" ];
|
||||||
|
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||||
restartTriggers = [ hydraConf ];
|
restartTriggers = [ hydraConf ];
|
||||||
environment = env // {
|
environment = env // {
|
||||||
@@ -407,6 +407,7 @@ in
|
|||||||
requires = [ "hydra-init.service" ];
|
requires = [ "hydra-init.service" ];
|
||||||
after = [ "hydra-init.service" ];
|
after = [ "hydra-init.service" ];
|
||||||
restartTriggers = [ hydraConf ];
|
restartTriggers = [ hydraConf ];
|
||||||
|
path = [ pkgs.zstd ];
|
||||||
environment = env // {
|
environment = env // {
|
||||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||||
@@ -457,10 +458,17 @@ in
|
|||||||
# logs automatically after a step finishes, but this doesn't work
|
# logs automatically after a step finishes, but this doesn't work
|
||||||
# if the queue runner is stopped prematurely.
|
# if the queue runner is stopped prematurely.
|
||||||
systemd.services.hydra-compress-logs =
|
systemd.services.hydra-compress-logs =
|
||||||
{ path = [ pkgs.bzip2 ];
|
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||||
script =
|
script =
|
||||||
''
|
''
|
||||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
|
set -eou pipefail
|
||||||
|
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||||
|
if [[ $compression == "" ]]; then
|
||||||
|
compression="bzip2"
|
||||||
|
elif [[ $compression == zstd ]]; then
|
||||||
|
compression="zstd --rm"
|
||||||
|
fi
|
||||||
|
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||||
'';
|
'';
|
||||||
startAt = "Sun 01:45";
|
startAt = "Sun 01:45";
|
||||||
};
|
};
|
4
nixos-modules/meson.build
Normal file
4
nixos-modules/meson.build
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
install_data('hydra.nix',
|
||||||
|
install_dir: get_option('datadir') / 'nix',
|
||||||
|
rename: ['hydra-module.nix'],
|
||||||
|
)
|
306
nixos-tests.nix
Normal file
306
nixos-tests.nix
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
{ forEachSystem, nixpkgs, nixosModules }:
|
||||||
|
|
||||||
|
let
|
||||||
|
# NixOS configuration used for VM tests.
|
||||||
|
hydraServer =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
nixosModules.hydra
|
||||||
|
nixosModules.hydraTest
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1024;
|
||||||
|
virtualisation.writableStore = true;
|
||||||
|
|
||||||
|
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||||
|
|
||||||
|
nix = {
|
||||||
|
# Without this nix tries to fetch packages from the default
|
||||||
|
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||||
|
settings.substituters = [ ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
install = forEachSystem (system:
|
||||||
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||||
|
name = "hydra-install";
|
||||||
|
nodes.machine = hydraServer;
|
||||||
|
testScript =
|
||||||
|
''
|
||||||
|
machine.wait_for_job("hydra-init")
|
||||||
|
machine.wait_for_job("hydra-server")
|
||||||
|
machine.wait_for_job("hydra-evaluator")
|
||||||
|
machine.wait_for_job("hydra-queue-runner")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
machine.succeed("curl --fail http://localhost:3000/")
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
notifications = forEachSystem (system:
|
||||||
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||||
|
name = "hydra-notifications";
|
||||||
|
nodes.machine = {
|
||||||
|
imports = [ hydraServer ];
|
||||||
|
services.hydra-dev.extraConfig = ''
|
||||||
|
<influxdb>
|
||||||
|
url = http://127.0.0.1:8086
|
||||||
|
db = hydra
|
||||||
|
</influxdb>
|
||||||
|
'';
|
||||||
|
services.influxdb.enable = true;
|
||||||
|
};
|
||||||
|
testScript = { nodes, ... }: ''
|
||||||
|
machine.wait_for_job("hydra-init")
|
||||||
|
|
||||||
|
# Create an admin account and some other state.
|
||||||
|
machine.succeed(
|
||||||
|
"""
|
||||||
|
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||||
|
mkdir /run/jobset
|
||||||
|
chmod 755 /run/jobset
|
||||||
|
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||||
|
chmod 644 /run/jobset/default.nix
|
||||||
|
chown -R hydra /run/jobset
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until InfluxDB can receive web requests
|
||||||
|
machine.wait_for_job("influxdb")
|
||||||
|
machine.wait_for_open_port(8086)
|
||||||
|
|
||||||
|
# Create an InfluxDB database where hydra will write to
|
||||||
|
machine.succeed(
|
||||||
|
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||||
|
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until hydra-server can receive HTTP requests
|
||||||
|
machine.wait_for_job("hydra-server")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
|
||||||
|
# Setup the project and jobset
|
||||||
|
machine.succeed(
|
||||||
|
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until hydra has build the job and
|
||||||
|
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -s -H 'Accept: application/csv' "
|
||||||
|
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||||
|
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
gitea = forEachSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
in
|
||||||
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
|
||||||
|
name = "hydra-gitea";
|
||||||
|
nodes.machine = { pkgs, ... }: {
|
||||||
|
imports = [ hydraServer ];
|
||||||
|
services.hydra-dev.extraConfig = ''
|
||||||
|
<gitea_authorization>
|
||||||
|
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||||
|
</gitea_authorization>
|
||||||
|
'';
|
||||||
|
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||||
|
nix = {
|
||||||
|
settings.substituters = [ ];
|
||||||
|
};
|
||||||
|
services.gitea = {
|
||||||
|
enable = true;
|
||||||
|
database.type = "postgres";
|
||||||
|
settings = {
|
||||||
|
service.DISABLE_REGISTRATION = true;
|
||||||
|
server.HTTP_PORT = 3001;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services.openssh.enable = true;
|
||||||
|
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||||
|
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||||
|
};
|
||||||
|
skipLint = true;
|
||||||
|
testScript =
|
||||||
|
let
|
||||||
|
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||||
|
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||||
|
'';
|
||||||
|
|
||||||
|
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||||
|
set -x
|
||||||
|
mkdir -p /tmp/repo $HOME/.ssh
|
||||||
|
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||||
|
chmod 0400 $HOME/.ssh/privk
|
||||||
|
git -C /tmp/repo init
|
||||||
|
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||||
|
git -C /tmp/repo add .
|
||||||
|
git config --global user.email test@localhost
|
||||||
|
git config --global user.name test
|
||||||
|
git -C /tmp/repo commit -m 'Initial import'
|
||||||
|
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||||
|
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||||
|
git -C /tmp/repo push origin master
|
||||||
|
git -C /tmp/repo log >&2
|
||||||
|
'';
|
||||||
|
|
||||||
|
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||||
|
set -x
|
||||||
|
su -l hydra -c "hydra-create-user root --email-address \
|
||||||
|
'alice@example.org' --password foobar --role admin"
|
||||||
|
|
||||||
|
URL=http://localhost:3000
|
||||||
|
USERNAME="root"
|
||||||
|
PASSWORD="foobar"
|
||||||
|
PROJECT_NAME="trivial"
|
||||||
|
JOBSET_NAME="trivial"
|
||||||
|
mycurl() {
|
||||||
|
curl --referer $URL -H "Accept: application/json" \
|
||||||
|
-H "Content-Type: application/json" $@
|
||||||
|
}
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||||
|
EOF
|
||||||
|
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{
|
||||||
|
"displayname":"Trivial",
|
||||||
|
"enabled":"1",
|
||||||
|
"visible":"1"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||||
|
-d @data.json -b hydra-cookie.txt
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{
|
||||||
|
"description": "Trivial",
|
||||||
|
"checkinterval": "60",
|
||||||
|
"enabled": "1",
|
||||||
|
"visible": "1",
|
||||||
|
"keepnr": "1",
|
||||||
|
"enableemail": true,
|
||||||
|
"emailoverride": "hydra@localhost",
|
||||||
|
"type": 0,
|
||||||
|
"nixexprinput": "git",
|
||||||
|
"nixexprpath": "jobset.nix",
|
||||||
|
"inputs": {
|
||||||
|
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||||
|
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||||
|
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||||
|
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||||
|
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||||
|
-d @data.json -b hydra-cookie.txt
|
||||||
|
'';
|
||||||
|
|
||||||
|
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||||
|
|
||||||
|
snakeoilKeypair = {
|
||||||
|
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||||
|
-----BEGIN EC PRIVATE KEY-----
|
||||||
|
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||||
|
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||||
|
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||||
|
-----END EC PRIVATE KEY-----
|
||||||
|
'';
|
||||||
|
|
||||||
|
pubkey = pkgs.lib.concatStrings [
|
||||||
|
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||||
|
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||||
|
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||||
|
{ trivial = builtins.derivation {
|
||||||
|
name = "trivial";
|
||||||
|
system = "${system}";
|
||||||
|
builder = "/bin/sh";
|
||||||
|
allowSubstitutes = false;
|
||||||
|
preferLocalBuild = true;
|
||||||
|
args = ["-c" "echo success > $out; exit 0"];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
''
|
||||||
|
import json
|
||||||
|
|
||||||
|
machine.start()
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
machine.wait_for_open_port(3001)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||||
|
+ "--username root --password root --email test@localhost'"
|
||||||
|
)
|
||||||
|
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"${scripts.git-setup}"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"${scripts.hydra-setup}"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||||
|
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||||
|
)
|
||||||
|
|
||||||
|
data = machine.succeed(
|
||||||
|
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
response = json.loads(data)
|
||||||
|
|
||||||
|
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||||
|
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||||
|
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||||
|
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
validate-openapi = forEachSystem (system:
|
||||||
|
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||||
|
pkgs.runCommand "validate-openapi"
|
||||||
|
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||||
|
''
|
||||||
|
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||||
|
touch $out
|
||||||
|
'');
|
||||||
|
|
||||||
|
}
|
284
package.nix
Normal file
284
package.nix
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
{ stdenv
|
||||||
|
, lib
|
||||||
|
, fileset
|
||||||
|
|
||||||
|
, rawSrc
|
||||||
|
|
||||||
|
, buildEnv
|
||||||
|
|
||||||
|
, perlPackages
|
||||||
|
|
||||||
|
, nixComponents
|
||||||
|
, git
|
||||||
|
|
||||||
|
, makeWrapper
|
||||||
|
, meson
|
||||||
|
, ninja
|
||||||
|
, nukeReferences
|
||||||
|
, pkg-config
|
||||||
|
, mdbook
|
||||||
|
|
||||||
|
, unzip
|
||||||
|
, libpqxx
|
||||||
|
, top-git
|
||||||
|
, mercurial
|
||||||
|
, darcs
|
||||||
|
, subversion
|
||||||
|
, breezy
|
||||||
|
, openssl
|
||||||
|
, bzip2
|
||||||
|
, libxslt
|
||||||
|
, perl
|
||||||
|
, pixz
|
||||||
|
, boost
|
||||||
|
, postgresql_13
|
||||||
|
, nlohmann_json
|
||||||
|
, prometheus-cpp
|
||||||
|
|
||||||
|
, cacert
|
||||||
|
, foreman
|
||||||
|
, glibcLocales
|
||||||
|
, libressl
|
||||||
|
, openldap
|
||||||
|
, python3
|
||||||
|
|
||||||
|
, openssh
|
||||||
|
, coreutils
|
||||||
|
, findutils
|
||||||
|
, gzip
|
||||||
|
, xz
|
||||||
|
, gnutar
|
||||||
|
, gnused
|
||||||
|
, nix-eval-jobs
|
||||||
|
|
||||||
|
, rpm
|
||||||
|
, dpkg
|
||||||
|
, cdrkit
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
perlDeps = buildEnv {
|
||||||
|
name = "hydra-perl-deps";
|
||||||
|
paths = lib.closePropagation
|
||||||
|
([
|
||||||
|
nixComponents.nix-perl-bindings
|
||||||
|
git
|
||||||
|
] ++ (with perlPackages; [
|
||||||
|
AuthenSASL
|
||||||
|
CatalystActionREST
|
||||||
|
CatalystAuthenticationStoreDBIxClass
|
||||||
|
CatalystAuthenticationStoreLDAP
|
||||||
|
CatalystDevel
|
||||||
|
CatalystPluginAccessLog
|
||||||
|
CatalystPluginAuthorizationRoles
|
||||||
|
CatalystPluginCaptcha
|
||||||
|
CatalystPluginPrometheusTiny
|
||||||
|
CatalystPluginSessionStateCookie
|
||||||
|
CatalystPluginSessionStoreFastMmap
|
||||||
|
CatalystPluginStackTrace
|
||||||
|
CatalystTraitForRequestProxyBase
|
||||||
|
CatalystViewDownload
|
||||||
|
CatalystViewJSON
|
||||||
|
CatalystViewTT
|
||||||
|
CatalystXRoleApplicator
|
||||||
|
CatalystXScriptServerStarman
|
||||||
|
CryptPassphrase
|
||||||
|
CryptPassphraseArgon2
|
||||||
|
CryptRandPasswd
|
||||||
|
DataDump
|
||||||
|
DateTime
|
||||||
|
DBDPg
|
||||||
|
DBDSQLite
|
||||||
|
DBIxClassHelpers
|
||||||
|
DigestSHA1
|
||||||
|
EmailMIME
|
||||||
|
EmailSender
|
||||||
|
FileCopyRecursive
|
||||||
|
FileLibMagic
|
||||||
|
FileSlurper
|
||||||
|
FileWhich
|
||||||
|
IOCompress
|
||||||
|
IPCRun
|
||||||
|
IPCRun3
|
||||||
|
JSON
|
||||||
|
JSONMaybeXS
|
||||||
|
JSONXS
|
||||||
|
ListSomeUtils
|
||||||
|
LWP
|
||||||
|
LWPProtocolHttps
|
||||||
|
ModulePluggable
|
||||||
|
NetAmazonS3
|
||||||
|
NetPrometheus
|
||||||
|
NetStatsd
|
||||||
|
PadWalker
|
||||||
|
ParallelForkManager
|
||||||
|
PerlCriticCommunity
|
||||||
|
PrometheusTinyShared
|
||||||
|
ReadonlyX
|
||||||
|
SetScalar
|
||||||
|
SQLSplitStatement
|
||||||
|
Starman
|
||||||
|
StringCompareConstantTime
|
||||||
|
SysHostnameLong
|
||||||
|
TermSizeAny
|
||||||
|
TermReadKey
|
||||||
|
Test2Harness
|
||||||
|
TestPostgreSQL
|
||||||
|
TextDiff
|
||||||
|
TextTable
|
||||||
|
UUID4Tiny
|
||||||
|
YAML
|
||||||
|
XMLSimple
|
||||||
|
]));
|
||||||
|
};
|
||||||
|
|
||||||
|
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||||
|
in
|
||||||
|
stdenv.mkDerivation (finalAttrs: {
|
||||||
|
pname = "hydra";
|
||||||
|
inherit version;
|
||||||
|
|
||||||
|
src = fileset.toSource {
|
||||||
|
root = ./.;
|
||||||
|
fileset = fileset.unions ([
|
||||||
|
./doc
|
||||||
|
./meson.build
|
||||||
|
./nixos-modules
|
||||||
|
./src
|
||||||
|
./t
|
||||||
|
./version.txt
|
||||||
|
./.perlcriticrc
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = [ "out" "doc" ];
|
||||||
|
|
||||||
|
strictDeps = true;
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
makeWrapper
|
||||||
|
meson
|
||||||
|
ninja
|
||||||
|
nukeReferences
|
||||||
|
pkg-config
|
||||||
|
mdbook
|
||||||
|
nixComponents.nix-cli
|
||||||
|
perlDeps
|
||||||
|
perl
|
||||||
|
unzip
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
libpqxx
|
||||||
|
openssl
|
||||||
|
libxslt
|
||||||
|
nixComponents.nix-util
|
||||||
|
nixComponents.nix-store
|
||||||
|
nixComponents.nix-main
|
||||||
|
perlDeps
|
||||||
|
perl
|
||||||
|
boost
|
||||||
|
nlohmann_json
|
||||||
|
prometheus-cpp
|
||||||
|
];
|
||||||
|
|
||||||
|
nativeCheckInputs = [
|
||||||
|
bzip2
|
||||||
|
darcs
|
||||||
|
foreman
|
||||||
|
top-git
|
||||||
|
mercurial
|
||||||
|
subversion
|
||||||
|
breezy
|
||||||
|
openldap
|
||||||
|
postgresql_13
|
||||||
|
pixz
|
||||||
|
nix-eval-jobs
|
||||||
|
];
|
||||||
|
|
||||||
|
checkInputs = [
|
||||||
|
cacert
|
||||||
|
glibcLocales
|
||||||
|
libressl.nc
|
||||||
|
python3
|
||||||
|
nixComponents.nix-cli
|
||||||
|
];
|
||||||
|
|
||||||
|
hydraPath = lib.makeBinPath (
|
||||||
|
[
|
||||||
|
subversion
|
||||||
|
openssh
|
||||||
|
nixComponents.nix-cli
|
||||||
|
coreutils
|
||||||
|
findutils
|
||||||
|
pixz
|
||||||
|
gzip
|
||||||
|
bzip2
|
||||||
|
xz
|
||||||
|
gnutar
|
||||||
|
unzip
|
||||||
|
git
|
||||||
|
top-git
|
||||||
|
mercurial
|
||||||
|
darcs
|
||||||
|
gnused
|
||||||
|
breezy
|
||||||
|
nix-eval-jobs
|
||||||
|
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||||
|
);
|
||||||
|
|
||||||
|
OPENLDAP_ROOT = openldap;
|
||||||
|
|
||||||
|
mesonBuildType = "release";
|
||||||
|
|
||||||
|
postPatch = ''
|
||||||
|
patchShebangs .
|
||||||
|
'';
|
||||||
|
|
||||||
|
shellHook = ''
|
||||||
|
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||||
|
|
||||||
|
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||||
|
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||||
|
export HYDRA_HOME="$(pwd)/src/"
|
||||||
|
mkdir -p .hydra-data
|
||||||
|
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||||
|
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||||
|
|
||||||
|
popd >/dev/null
|
||||||
|
'';
|
||||||
|
|
||||||
|
doCheck = true;
|
||||||
|
|
||||||
|
mesonCheckFlags = [ "--verbose" ];
|
||||||
|
|
||||||
|
preCheck = ''
|
||||||
|
export LOGNAME=''${LOGNAME:-foo}
|
||||||
|
# set $HOME for bzr so it can create its trace file
|
||||||
|
export HOME=$(mktemp -d)
|
||||||
|
'';
|
||||||
|
|
||||||
|
postInstall = ''
|
||||||
|
mkdir -p $out/nix-support
|
||||||
|
|
||||||
|
for i in $out/bin/*; do
|
||||||
|
read -n 4 chars < $i
|
||||||
|
if [[ $chars =~ ELF ]]; then continue; fi
|
||||||
|
wrapProgram $i \
|
||||||
|
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||||
|
--prefix PATH ':' $out/bin:$hydraPath \
|
||||||
|
--set HYDRA_RELEASE ${version} \
|
||||||
|
--set HYDRA_HOME $out/libexec/hydra \
|
||||||
|
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
|
||||||
|
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||||
|
done
|
||||||
|
'';
|
||||||
|
|
||||||
|
dontStrip = true;
|
||||||
|
|
||||||
|
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||||
|
passthru = {
|
||||||
|
inherit perlDeps;
|
||||||
|
nix = nixComponents.nix-cli;
|
||||||
|
};
|
||||||
|
})
|
@@ -1,6 +1,6 @@
|
|||||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||||
|
|
||||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||||
src = ./.;
|
src = ./.;
|
||||||
}).shellNix
|
}).shellNix
|
||||||
|
@@ -1,3 +0,0 @@
|
|||||||
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
@@ -1,5 +0,0 @@
|
|||||||
bin_PROGRAMS = hydra-eval-jobs
|
|
||||||
|
|
||||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
|
||||||
hydra_eval_jobs_LDADD = $(NIX_LIBS)
|
|
||||||
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
|
|
@@ -1,518 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <thread>
|
|
||||||
#include <optional>
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "shared.hh"
|
|
||||||
#include "store-api.hh"
|
|
||||||
#include "eval.hh"
|
|
||||||
#include "eval-inline.hh"
|
|
||||||
#include "util.hh"
|
|
||||||
#include "get-drvs.hh"
|
|
||||||
#include "globals.hh"
|
|
||||||
#include "common-eval-args.hh"
|
|
||||||
#include "flake/flakeref.hh"
|
|
||||||
#include "flake/flake.hh"
|
|
||||||
#include "attr-path.hh"
|
|
||||||
#include "derivations.hh"
|
|
||||||
#include "local-fs-store.hh"
|
|
||||||
|
|
||||||
#include "hydra-config.hh"
|
|
||||||
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/wait.h>
|
|
||||||
#include <sys/resource.h>
|
|
||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
|
||||||
|
|
||||||
using namespace nix;
|
|
||||||
|
|
||||||
static Path gcRootsDir;
|
|
||||||
static size_t maxMemorySize;
|
|
||||||
|
|
||||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
|
||||||
{
|
|
||||||
Path releaseExpr;
|
|
||||||
bool flake = false;
|
|
||||||
bool dryRun = false;
|
|
||||||
|
|
||||||
MyArgs() : MixCommonArgs("hydra-eval-jobs")
|
|
||||||
{
|
|
||||||
addFlag({
|
|
||||||
.longName = "gc-roots-dir",
|
|
||||||
.description = "garbage collector roots directory",
|
|
||||||
.labels = {"path"},
|
|
||||||
.handler = {&gcRootsDir}
|
|
||||||
});
|
|
||||||
|
|
||||||
addFlag({
|
|
||||||
.longName = "dry-run",
|
|
||||||
.description = "don't create store derivations",
|
|
||||||
.handler = {&dryRun, true}
|
|
||||||
});
|
|
||||||
|
|
||||||
addFlag({
|
|
||||||
.longName = "flake",
|
|
||||||
.description = "build a flake",
|
|
||||||
.handler = {&flake, true}
|
|
||||||
});
|
|
||||||
|
|
||||||
expectArg("expr", &releaseExpr);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static MyArgs myArgs;
|
|
||||||
|
|
||||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute)
|
|
||||||
{
|
|
||||||
Strings res;
|
|
||||||
std::function<void(Value & v)> rec;
|
|
||||||
|
|
||||||
rec = [&](Value & v) {
|
|
||||||
state.forceValue(v);
|
|
||||||
if (v.type() == nString)
|
|
||||||
res.push_back(v.string.s);
|
|
||||||
else if (v.isList())
|
|
||||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
|
||||||
rec(*v.listElems()[n]);
|
|
||||||
else if (v.type() == nAttrs) {
|
|
||||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
|
||||||
if (a != v.attrs->end())
|
|
||||||
res.push_back(state.forceString(*a->value));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Value * v = drv.queryMeta(name);
|
|
||||||
if (v) rec(*v);
|
|
||||||
|
|
||||||
return concatStringsSep(", ", res);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void worker(
|
|
||||||
EvalState & state,
|
|
||||||
Bindings & autoArgs,
|
|
||||||
AutoCloseFD & to,
|
|
||||||
AutoCloseFD & from)
|
|
||||||
{
|
|
||||||
Value vTop;
|
|
||||||
|
|
||||||
if (myArgs.flake) {
|
|
||||||
using namespace flake;
|
|
||||||
|
|
||||||
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
|
|
||||||
|
|
||||||
auto vFlake = state.allocValue();
|
|
||||||
|
|
||||||
auto lockedFlake = lockFlake(state, flakeRef,
|
|
||||||
LockFlags {
|
|
||||||
.updateLockFile = false,
|
|
||||||
.useRegistries = false,
|
|
||||||
.allowMutable = false,
|
|
||||||
});
|
|
||||||
|
|
||||||
callFlake(state, lockedFlake, *vFlake);
|
|
||||||
|
|
||||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
|
||||||
state.forceValue(*vOutputs);
|
|
||||||
|
|
||||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
|
||||||
if (!aHydraJobs)
|
|
||||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
|
||||||
if (!aHydraJobs)
|
|
||||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
|
||||||
|
|
||||||
vTop = *aHydraJobs->value;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto vRoot = state.allocValue();
|
|
||||||
state.autoCallFunction(autoArgs, vTop, *vRoot);
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
/* Wait for the master to send us a job name. */
|
|
||||||
writeLine(to.get(), "next");
|
|
||||||
|
|
||||||
auto s = readLine(from.get());
|
|
||||||
if (s == "exit") break;
|
|
||||||
if (!hasPrefix(s, "do ")) abort();
|
|
||||||
std::string attrPath(s, 3);
|
|
||||||
|
|
||||||
debug("worker process %d at '%s'", getpid(), attrPath);
|
|
||||||
|
|
||||||
/* Evaluate it and send info back to the master. */
|
|
||||||
nlohmann::json reply;
|
|
||||||
|
|
||||||
try {
|
|
||||||
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
|
|
||||||
|
|
||||||
auto v = state.allocValue();
|
|
||||||
state.autoCallFunction(autoArgs, *vTmp, *v);
|
|
||||||
|
|
||||||
if (auto drv = getDerivation(state, *v, false)) {
|
|
||||||
|
|
||||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
|
||||||
|
|
||||||
if (drv->querySystem() == "unknown")
|
|
||||||
throw EvalError("derivation must have a 'system' attribute");
|
|
||||||
|
|
||||||
auto drvPath = drv->queryDrvPath();
|
|
||||||
|
|
||||||
nlohmann::json job;
|
|
||||||
|
|
||||||
job["nixName"] = drv->queryName();
|
|
||||||
job["system"] =drv->querySystem();
|
|
||||||
job["drvPath"] = drvPath;
|
|
||||||
job["description"] = drv->queryMetaString("description");
|
|
||||||
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
|
|
||||||
job["homepage"] = drv->queryMetaString("homepage");
|
|
||||||
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
|
|
||||||
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
|
|
||||||
job["timeout"] = drv->queryMetaInt("timeout", 36000);
|
|
||||||
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
|
|
||||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
|
||||||
|
|
||||||
/* If this is an aggregate, then get its constituents. */
|
|
||||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
|
||||||
if (a && state.forceBool(*a->value, *a->pos)) {
|
|
||||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
|
||||||
if (!a)
|
|
||||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
|
||||||
|
|
||||||
|
|
||||||
PathSet context;
|
|
||||||
state.coerceToString(*a->pos, *a->value, context, true, false);
|
|
||||||
for (auto & i : context)
|
|
||||||
if (i.at(0) == '!') {
|
|
||||||
size_t index = i.find("!", 1);
|
|
||||||
job["constituents"].push_back(string(i, index + 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
state.forceList(*a->value, *a->pos);
|
|
||||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
|
||||||
auto v = a->value->listElems()[n];
|
|
||||||
state.forceValue(*v);
|
|
||||||
if (v->type() == nString)
|
|
||||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Register the derivation as a GC root. !!! This
|
|
||||||
registers roots for jobs that we may have already
|
|
||||||
done. */
|
|
||||||
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
|
|
||||||
if (gcRootsDir != "" && localStore) {
|
|
||||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
|
||||||
if (!pathExists(root))
|
|
||||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
|
||||||
}
|
|
||||||
|
|
||||||
nlohmann::json out;
|
|
||||||
for (auto & j : outputs)
|
|
||||||
out[j.first] = j.second;
|
|
||||||
job["outputs"] = std::move(out);
|
|
||||||
|
|
||||||
reply["job"] = std::move(job);
|
|
||||||
}
|
|
||||||
|
|
||||||
else if (v->type() == nAttrs) {
|
|
||||||
auto attrs = nlohmann::json::array();
|
|
||||||
StringSet ss;
|
|
||||||
for (auto & i : v->attrs->lexicographicOrder()) {
|
|
||||||
std::string name(i->name);
|
|
||||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
|
||||||
printError("skipping job with illegal name '%s'", name);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
attrs.push_back(name);
|
|
||||||
}
|
|
||||||
reply["attrs"] = std::move(attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
else if (v->type() == nNull)
|
|
||||||
;
|
|
||||||
|
|
||||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
|
||||||
|
|
||||||
} catch (EvalError & e) {
|
|
||||||
auto msg = e.msg();
|
|
||||||
// Transmits the error we got from the previous evaluation
|
|
||||||
// in the JSON output.
|
|
||||||
reply["error"] = filterANSIEscapes(msg, true);
|
|
||||||
// Don't forget to print it into the STDERR log, this is
|
|
||||||
// what's shown in the Hydra UI.
|
|
||||||
printError(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
writeLine(to.get(), reply.dump());
|
|
||||||
|
|
||||||
/* If our RSS exceeds the maximum, exit. The master will
|
|
||||||
start a new process. */
|
|
||||||
struct rusage r;
|
|
||||||
getrusage(RUSAGE_SELF, &r);
|
|
||||||
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
|
|
||||||
}
|
|
||||||
|
|
||||||
writeLine(to.get(), "restart");
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char * * argv)
|
|
||||||
{
|
|
||||||
/* Prevent undeclared dependencies in the evaluation via
|
|
||||||
$NIX_PATH. */
|
|
||||||
unsetenv("NIX_PATH");
|
|
||||||
|
|
||||||
return handleExceptions(argv[0], [&]() {
|
|
||||||
|
|
||||||
auto config = std::make_unique<HydraConfig>();
|
|
||||||
|
|
||||||
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
|
|
||||||
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
|
|
||||||
|
|
||||||
initNix();
|
|
||||||
initGC();
|
|
||||||
|
|
||||||
myArgs.parseCmdline(argvToStrings(argc, argv));
|
|
||||||
|
|
||||||
auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake);
|
|
||||||
|
|
||||||
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
|
|
||||||
settings.builders = "";
|
|
||||||
|
|
||||||
/* Prevent access to paths outside of the Nix search path and
|
|
||||||
to the environment. */
|
|
||||||
evalSettings.restrictEval = true;
|
|
||||||
|
|
||||||
/* When building a flake, use pure evaluation (no access to
|
|
||||||
'getEnv', 'currentSystem' etc. */
|
|
||||||
evalSettings.pureEval = pureEval;
|
|
||||||
|
|
||||||
if (myArgs.dryRun) settings.readOnlyMode = true;
|
|
||||||
|
|
||||||
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
|
|
||||||
|
|
||||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
|
||||||
|
|
||||||
struct State
|
|
||||||
{
|
|
||||||
std::set<std::string> todo{""};
|
|
||||||
std::set<std::string> active;
|
|
||||||
nlohmann::json jobs;
|
|
||||||
std::exception_ptr exc;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::condition_variable wakeup;
|
|
||||||
|
|
||||||
Sync<State> state_;
|
|
||||||
|
|
||||||
/* Start a handler thread per worker process. */
|
|
||||||
auto handler = [&]()
|
|
||||||
{
|
|
||||||
try {
|
|
||||||
pid_t pid = -1;
|
|
||||||
AutoCloseFD from, to;
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
|
|
||||||
/* Start a new worker process if necessary. */
|
|
||||||
if (pid == -1) {
|
|
||||||
Pipe toPipe, fromPipe;
|
|
||||||
toPipe.create();
|
|
||||||
fromPipe.create();
|
|
||||||
pid = startProcess(
|
|
||||||
[&,
|
|
||||||
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
|
|
||||||
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
|
|
||||||
]()
|
|
||||||
{
|
|
||||||
try {
|
|
||||||
EvalState state(myArgs.searchPath, openStore());
|
|
||||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
|
||||||
worker(state, autoArgs, *to, *from);
|
|
||||||
} catch (Error & e) {
|
|
||||||
nlohmann::json err;
|
|
||||||
auto msg = e.msg();
|
|
||||||
err["error"] = filterANSIEscapes(msg, true);
|
|
||||||
printError(msg);
|
|
||||||
writeLine(to->get(), err.dump());
|
|
||||||
// Don't forget to print it into the STDERR log, this is
|
|
||||||
// what's shown in the Hydra UI.
|
|
||||||
writeLine(to->get(), "restart");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ProcessOptions { .allowVfork = false });
|
|
||||||
from = std::move(fromPipe.readSide);
|
|
||||||
to = std::move(toPipe.writeSide);
|
|
||||||
debug("created worker process %d", pid);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check whether the existing worker process is still there. */
|
|
||||||
auto s = readLine(from.get());
|
|
||||||
if (s == "restart") {
|
|
||||||
pid = -1;
|
|
||||||
continue;
|
|
||||||
} else if (s != "next") {
|
|
||||||
auto json = nlohmann::json::parse(s);
|
|
||||||
throw Error("worker error: %s", (std::string) json["error"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wait for a job name to become available. */
|
|
||||||
std::string attrPath;
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
checkInterrupt();
|
|
||||||
auto state(state_.lock());
|
|
||||||
if ((state->todo.empty() && state->active.empty()) || state->exc) {
|
|
||||||
writeLine(to.get(), "exit");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!state->todo.empty()) {
|
|
||||||
attrPath = *state->todo.begin();
|
|
||||||
state->todo.erase(state->todo.begin());
|
|
||||||
state->active.insert(attrPath);
|
|
||||||
break;
|
|
||||||
} else
|
|
||||||
state.wait(wakeup);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Tell the worker to evaluate it. */
|
|
||||||
writeLine(to.get(), "do " + attrPath);
|
|
||||||
|
|
||||||
/* Wait for the response. */
|
|
||||||
auto response = nlohmann::json::parse(readLine(from.get()));
|
|
||||||
|
|
||||||
/* Handle the response. */
|
|
||||||
StringSet newAttrs;
|
|
||||||
|
|
||||||
if (response.find("job") != response.end()) {
|
|
||||||
auto state(state_.lock());
|
|
||||||
state->jobs[attrPath] = response["job"];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (response.find("attrs") != response.end()) {
|
|
||||||
for (auto & i : response["attrs"]) {
|
|
||||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
|
||||||
newAttrs.insert(s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (response.find("error") != response.end()) {
|
|
||||||
auto state(state_.lock());
|
|
||||||
state->jobs[attrPath]["error"] = response["error"];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add newly discovered job names to the queue. */
|
|
||||||
{
|
|
||||||
auto state(state_.lock());
|
|
||||||
state->active.erase(attrPath);
|
|
||||||
for (auto & s : newAttrs)
|
|
||||||
state->todo.insert(s);
|
|
||||||
wakeup.notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (...) {
|
|
||||||
auto state(state_.lock());
|
|
||||||
state->exc = std::current_exception();
|
|
||||||
wakeup.notify_all();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<std::thread> threads;
|
|
||||||
for (size_t i = 0; i < nrWorkers; i++)
|
|
||||||
threads.emplace_back(std::thread(handler));
|
|
||||||
|
|
||||||
for (auto & thread : threads)
|
|
||||||
thread.join();
|
|
||||||
|
|
||||||
auto state(state_.lock());
|
|
||||||
|
|
||||||
if (state->exc)
|
|
||||||
std::rethrow_exception(state->exc);
|
|
||||||
|
|
||||||
/* For aggregate jobs that have named consistuents
|
|
||||||
(i.e. constituents that are a job name rather than a
|
|
||||||
derivation), look up the referenced job and add it to the
|
|
||||||
dependencies of the aggregate derivation. */
|
|
||||||
auto store = openStore();
|
|
||||||
|
|
||||||
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
|
|
||||||
auto jobName = i.key();
|
|
||||||
auto & job = i.value();
|
|
||||||
|
|
||||||
auto named = job.find("namedConstituents");
|
|
||||||
if (named == job.end()) continue;
|
|
||||||
|
|
||||||
std::unordered_map<std::string, std::string> brokenJobs;
|
|
||||||
auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state](
|
|
||||||
const std::string & childJobName) -> std::optional<nlohmann::json> {
|
|
||||||
auto childJob = state->jobs.find(childJobName);
|
|
||||||
if (childJob == state->jobs.end()) {
|
|
||||||
printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName);
|
|
||||||
brokenJobs[childJobName] = "does not exist";
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
if (childJob->find("error") != childJob->end()) {
|
|
||||||
std::string error = (*childJob)["error"];
|
|
||||||
printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error);
|
|
||||||
brokenJobs[childJobName] = error;
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
return *childJob;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (myArgs.dryRun) {
|
|
||||||
for (std::string jobName2 : *named) {
|
|
||||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
|
||||||
if (!job2) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
std::string drvPath2 = (*job2)["drvPath"];
|
|
||||||
job["constituents"].push_back(drvPath2);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
|
|
||||||
auto drv = store->readDerivation(drvPath);
|
|
||||||
|
|
||||||
for (std::string jobName2 : *named) {
|
|
||||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
|
||||||
if (!job2) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
|
||||||
auto drv2 = store->readDerivation(drvPath2);
|
|
||||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
|
||||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (brokenJobs.empty()) {
|
|
||||||
std::string drvName(drvPath.name());
|
|
||||||
assert(hasSuffix(drvName, drvExtension));
|
|
||||||
drvName.resize(drvName.size() - drvExtension.size());
|
|
||||||
auto h = std::get<Hash>(hashDerivationModulo(*store, drv, true));
|
|
||||||
auto outPath = store->makeOutputPath("out", h, drvName);
|
|
||||||
drv.env["out"] = store->printStorePath(outPath);
|
|
||||||
drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } });
|
|
||||||
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
|
|
||||||
|
|
||||||
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
|
|
||||||
|
|
||||||
job["drvPath"] = newDrvPath;
|
|
||||||
job["outputs"]["out"] = store->printStorePath(outPath);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
job.erase("namedConstituents");
|
|
||||||
|
|
||||||
if (!brokenJobs.empty()) {
|
|
||||||
std::stringstream ss;
|
|
||||||
for (const auto& [jobName, error] : brokenJobs) {
|
|
||||||
ss << jobName << ": " << error << "\n";
|
|
||||||
}
|
|
||||||
job["error"] = ss.str();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << state->jobs.dump(2) << "\n";
|
|
||||||
});
|
|
||||||
}
|
|
@@ -1,5 +0,0 @@
|
|||||||
bin_PROGRAMS = hydra-evaluator
|
|
||||||
|
|
||||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
|
||||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
|
||||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
|
@@ -1,7 +1,8 @@
|
|||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "pool.hh"
|
#include <nix/util/pool.hh>
|
||||||
#include "shared.hh"
|
#include <nix/main/shared.hh>
|
||||||
|
#include <nix/util/signals.hh>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
@@ -37,7 +38,7 @@ class JobsetId {
|
|||||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||||
|
|
||||||
std::string display() const {
|
std::string display() const {
|
||||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||||
@@ -366,6 +367,9 @@ struct Evaluator
|
|||||||
printInfo("received jobset event");
|
printInfo("received jobset event");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} catch (pqxx::broken_connection & e) {
|
||||||
|
printError("Database connection broken: %s", e.what());
|
||||||
|
std::_Exit(1);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError("exception in database monitor thread: %s", e.what());
|
printError("exception in database monitor thread: %s", e.what());
|
||||||
sleep(30);
|
sleep(30);
|
||||||
@@ -473,6 +477,9 @@ struct Evaluator
|
|||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
loop();
|
loop();
|
||||||
|
} catch (pqxx::broken_connection & e) {
|
||||||
|
printError("Database connection broken: %s", e.what());
|
||||||
|
std::_Exit(1);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError("exception in main loop: %s", e.what());
|
printError("exception in main loop: %s", e.what());
|
||||||
sleep(30);
|
sleep(30);
|
||||||
|
10
src/hydra-evaluator/meson.build
Normal file
10
src/hydra-evaluator/meson.build
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
hydra_evaluator = executable('hydra-evaluator',
|
||||||
|
'hydra-evaluator.cc',
|
||||||
|
dependencies: [
|
||||||
|
libhydra_dep,
|
||||||
|
nix_util_dep,
|
||||||
|
nix_main_dep,
|
||||||
|
pqxx_dep,
|
||||||
|
],
|
||||||
|
install: true,
|
||||||
|
)
|
@@ -1,8 +0,0 @@
|
|||||||
bin_PROGRAMS = hydra-queue-runner
|
|
||||||
|
|
||||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
|
||||||
builder.cc build-result.cc build-remote.cc \
|
|
||||||
build-result.hh counter.hh state.hh db.hh \
|
|
||||||
nar-extractor.cc nar-extractor.hh
|
|
||||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
|
|
||||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
|
@@ -5,128 +5,94 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
|
||||||
#include "serve-protocol.hh"
|
#include <nix/store/build-result.hh>
|
||||||
|
#include <nix/store/path.hh>
|
||||||
|
#include <nix/store/legacy-ssh-store.hh>
|
||||||
|
#include <nix/store/serve-protocol.hh>
|
||||||
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "util.hh"
|
#include <nix/util/current-process.hh>
|
||||||
#include "worker-protocol.hh"
|
#include <nix/util/processes.hh>
|
||||||
#include "finally.hh"
|
#include <nix/util/util.hh>
|
||||||
#include "url.hh"
|
#include <nix/store/serve-protocol.hh>
|
||||||
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
|
#include <nix/store/ssh.hh>
|
||||||
|
#include <nix/util/finally.hh>
|
||||||
|
#include <nix/util/url.hh>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
|
bool ::Machine::isLocalhost() const
|
||||||
struct Child
|
|
||||||
{
|
{
|
||||||
Pid pid;
|
return storeUri.params.empty() && std::visit(overloaded {
|
||||||
AutoCloseFD to, from;
|
[](const StoreReference::Auto &) {
|
||||||
};
|
return true;
|
||||||
|
},
|
||||||
|
[](const StoreReference::Specified & s) {
|
||||||
static void append(Strings & dst, const Strings & src)
|
return
|
||||||
{
|
(s.scheme == "local" || s.scheme == "unix") ||
|
||||||
dst.insert(dst.end(), src.begin(), src.end());
|
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
|
||||||
|
s.authority == "localhost");
|
||||||
|
},
|
||||||
|
}, storeUri.variant);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Strings extraStoreArgs(std::string & machine)
|
namespace nix::build_remote {
|
||||||
|
|
||||||
|
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||||
|
::Machine::ptr machine, SSHMaster & master)
|
||||||
{
|
{
|
||||||
Strings result;
|
Strings command = {"nix-store", "--serve", "--write"};
|
||||||
try {
|
if (machine->isLocalhost()) {
|
||||||
auto parsed = parseURL(machine);
|
command.push_back("--builders");
|
||||||
if (parsed.scheme != "ssh") {
|
command.push_back("");
|
||||||
throw SysError("Currently, only (legacy-)ssh stores are supported!");
|
} else {
|
||||||
|
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||||
|
if (remoteStore != machine->storeUri.params.end()) {
|
||||||
|
command.push_back("--store");
|
||||||
|
command.push_back(escapeShellArgAlways(remoteStore->second));
|
||||||
}
|
}
|
||||||
machine = parsed.authority.value_or("");
|
|
||||||
auto remoteStore = parsed.query.find("remote-store");
|
|
||||||
if (remoteStore != parsed.query.end()) {
|
|
||||||
result = {"--store", shellEscape(remoteStore->second)};
|
|
||||||
}
|
|
||||||
} catch (BadURL &) {
|
|
||||||
// We just try to continue with `machine->sshName` here for backwards compat.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
auto ret = master.startCommand(std::move(command), {
|
||||||
}
|
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||||
|
|
||||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
|
||||||
{
|
|
||||||
string pgmName;
|
|
||||||
Pipe to, from;
|
|
||||||
to.create();
|
|
||||||
from.create();
|
|
||||||
|
|
||||||
child.pid = startProcess([&]() {
|
|
||||||
|
|
||||||
restoreProcessContext();
|
|
||||||
|
|
||||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup input pipe to stdin");
|
|
||||||
|
|
||||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup output pipe to stdout");
|
|
||||||
|
|
||||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup stderr");
|
|
||||||
|
|
||||||
Strings argv;
|
|
||||||
if (machine->isLocalhost()) {
|
|
||||||
pgmName = "nix-store";
|
|
||||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
pgmName = "ssh";
|
|
||||||
auto sshName = machine->sshName;
|
|
||||||
Strings extraArgs = extraStoreArgs(sshName);
|
|
||||||
argv = {"ssh", sshName};
|
|
||||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
|
||||||
if (machine->sshPublicHostKey != "") {
|
|
||||||
Path fileName = tmpDir + "/host-key";
|
|
||||||
auto p = machine->sshName.find("@");
|
|
||||||
string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName;
|
|
||||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
|
||||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
|
||||||
}
|
|
||||||
append(argv,
|
|
||||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
|
||||||
, "--", "nix-store", "--serve", "--write" });
|
|
||||||
append(argv, extraArgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
|
||||||
|
|
||||||
throw SysError("cannot start %s", pgmName);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
to.readSide = -1;
|
// XXX: determine the actual max value we can use from /proc.
|
||||||
from.writeSide = -1;
|
|
||||||
|
|
||||||
child.to = to.writeSide.release();
|
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||||
child.from = from.readSide.release();
|
|
||||||
|
int pipesize = 1024 * 1024;
|
||||||
|
|
||||||
|
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
static void copyClosureTo(
|
||||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
::Machine::Connection & conn,
|
||||||
bool useSubstitutes = false)
|
Store & destStore,
|
||||||
|
const StorePathSet & paths,
|
||||||
|
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||||
{
|
{
|
||||||
StorePathSet closure;
|
StorePathSet closure;
|
||||||
destStore->computeFSClosure(paths, closure);
|
destStore.computeFSClosure(paths, closure);
|
||||||
|
|
||||||
/* Send the "query valid paths" command with the "lock" option
|
/* Send the "query valid paths" command with the "lock" option
|
||||||
enabled. This prevents a race where the remote host
|
enabled. This prevents a race where the remote host
|
||||||
garbage-collect paths that are already there. Optionally, ask
|
garbage-collect paths that are already there. Optionally, ask
|
||||||
the remote host to substitute missing paths. */
|
the remote host to substitute missing paths. */
|
||||||
// FIXME: substitute output pollutes our build log
|
// FIXME: substitute output pollutes our build log
|
||||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
|
||||||
worker_proto::write(*destStore, to, closure);
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
/* Get back the set of paths that are already valid on the remote
|
/* Get back the set of paths that are already valid on the remote
|
||||||
host. */
|
host. */
|
||||||
auto present = worker_proto::read(*destStore, from, Phantom<StorePathSet> {});
|
auto present = conn.queryValidPaths(
|
||||||
|
destStore, true, closure, useSubstitutes);
|
||||||
|
|
||||||
if (present.size() == closure.size()) return;
|
if (present.size() == closure.size()) return;
|
||||||
|
|
||||||
auto sorted = destStore->topoSortPaths(closure);
|
auto sorted = destStore.topoSortPaths(closure);
|
||||||
|
|
||||||
StorePathSet missing;
|
StorePathSet missing;
|
||||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||||
@@ -134,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
|||||||
|
|
||||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||||
|
|
||||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||||
std::chrono::seconds(600));
|
std::chrono::seconds(600));
|
||||||
|
|
||||||
to << cmdImportPaths;
|
conn.to << ServeProto::Command::ImportPaths;
|
||||||
destStore->exportPaths(missing, to);
|
destStore.exportPaths(missing, conn.to);
|
||||||
to.flush();
|
conn.to.flush();
|
||||||
|
|
||||||
if (readInt(from) != 1)
|
if (readInt(conn.from) != 1)
|
||||||
throw Error("remote machine failed to import closure");
|
throw Error("remote machine failed to import closure");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// FIXME: use Store::topoSortPaths().
|
// FIXME: use Store::topoSortPaths().
|
||||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||||
{
|
{
|
||||||
StorePaths sorted;
|
StorePaths sorted;
|
||||||
StorePathSet visited;
|
StorePathSet visited;
|
||||||
@@ -175,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
|||||||
return sorted;
|
return sorted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||||
|
{
|
||||||
|
std::string base(drvPath.to_string());
|
||||||
|
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||||
|
|
||||||
|
createDirs(dirOf(logFile));
|
||||||
|
|
||||||
|
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||||
|
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||||
|
|
||||||
|
return {std::move(logFile), std::move(logFD)};
|
||||||
|
}
|
||||||
|
|
||||||
|
static BasicDerivation sendInputs(
|
||||||
|
State & state,
|
||||||
|
Step & step,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
unsigned int & overhead,
|
||||||
|
counter & nrStepsWaiting,
|
||||||
|
counter & nrStepsCopyingTo
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/* Replace the input derivations by their output paths to send a
|
||||||
|
minimal closure to the builder.
|
||||||
|
|
||||||
|
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||||
|
is safe to do this in all cases. (It should probably have a mode
|
||||||
|
to do that, however, but we would not use it here.)
|
||||||
|
*/
|
||||||
|
BasicDerivation basicDrv = ({
|
||||||
|
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||||
|
if (!maybeBasicDrv)
|
||||||
|
throw Error(
|
||||||
|
"the derivation '%s' can’t be resolved. It’s probably "
|
||||||
|
"missing some outputs",
|
||||||
|
localStore.printStorePath(step.drvPath));
|
||||||
|
*maybeBasicDrv;
|
||||||
|
});
|
||||||
|
|
||||||
|
/* Ensure that the inputs exist in the destination store. This is
|
||||||
|
a no-op for regular stores, but for the binary cache store,
|
||||||
|
this will copy the inputs to the binary cache from the local
|
||||||
|
store. */
|
||||||
|
if (&localStore != &destStore) {
|
||||||
|
copyClosure(localStore, destStore,
|
||||||
|
step.drv->inputSrcs,
|
||||||
|
NoRepair, NoCheckSigs, NoSubstitute);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||||
|
mc1.reset();
|
||||||
|
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||||
|
|
||||||
|
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||||
|
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
|
||||||
|
|
||||||
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
/* Copy the input closure. */
|
||||||
|
if (conn.machine->isLocalhost()) {
|
||||||
|
StorePathSet closure;
|
||||||
|
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||||
|
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||||
|
} else {
|
||||||
|
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto now2 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||||
|
}
|
||||||
|
|
||||||
|
return basicDrv;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BuildResult performBuild(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
Store & localStore,
|
||||||
|
StorePath drvPath,
|
||||||
|
const BasicDerivation & drv,
|
||||||
|
const ServeProto::BuildOptions & options,
|
||||||
|
counter & nrStepsBuilding
|
||||||
|
)
|
||||||
|
{
|
||||||
|
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||||
|
|
||||||
|
BuildResult result;
|
||||||
|
|
||||||
|
time_t startTime, stopTime;
|
||||||
|
|
||||||
|
startTime = time(0);
|
||||||
|
{
|
||||||
|
MaintainCount<counter> mc(nrStepsBuilding);
|
||||||
|
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||||
|
}
|
||||||
|
stopTime = time(0);
|
||||||
|
|
||||||
|
if (!result.startTime) {
|
||||||
|
// If the builder gave `startTime = 0`, use our measurements
|
||||||
|
// instead of the builder's.
|
||||||
|
//
|
||||||
|
// Note: this represents the duration of a single round, rather
|
||||||
|
// than all rounds.
|
||||||
|
result.startTime = startTime;
|
||||||
|
result.stopTime = stopTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||||
|
// it manually by introspecting the derivation.
|
||||||
|
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||||
|
{
|
||||||
|
// If the remote is too old to handle CA derivations, we can’t get this
|
||||||
|
// far anyways
|
||||||
|
assert(drv.type().hasKnownOutputPaths());
|
||||||
|
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||||
|
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||||
|
// do any real work.
|
||||||
|
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||||
|
for (auto & [outputName, output] : drvOutputs) {
|
||||||
|
auto outputPath = output.second;
|
||||||
|
// We’ve just asserted that the output paths of the derivation
|
||||||
|
// were known
|
||||||
|
assert(outputPath);
|
||||||
|
auto outputHash = outputHashes.at(outputName);
|
||||||
|
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||||
|
result.builtOutputs.insert_or_assign(
|
||||||
|
std::move(outputName),
|
||||||
|
Realisation { drvOutput, *outputPath });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copyPathFromRemote(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
NarMemberDatas & narMembers,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
const ValidPathInfo & info
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/* Receive the NAR from the remote and add it to the
|
||||||
|
destination store. Meanwhile, extract all the info from the
|
||||||
|
NAR that getBuildOutput() needs. */
|
||||||
|
auto source2 = sinkToSource([&](Sink & sink)
|
||||||
|
{
|
||||||
|
/* Note: we should only send the command to dump the store
|
||||||
|
path to the remote if the NAR is actually going to get read
|
||||||
|
by the destination store, which won't happen if this path
|
||||||
|
is already valid on the destination store. Since this
|
||||||
|
lambda function only gets executed if someone tries to read
|
||||||
|
from source2, we will send the command from here rather
|
||||||
|
than outside the lambda. */
|
||||||
|
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||||
|
conn.to.flush();
|
||||||
|
|
||||||
|
TeeSource tee(conn.from, sink);
|
||||||
|
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||||
|
});
|
||||||
|
|
||||||
|
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copyPathsFromRemote(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
NarMemberDatas & narMembers,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||||
|
)
|
||||||
|
{
|
||||||
|
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||||
|
|
||||||
|
for (auto & path : pathsSorted) {
|
||||||
|
auto & info = infos.find(path)->second;
|
||||||
|
copyPathFromRemote(
|
||||||
|
conn, narMembers, localStore, destStore,
|
||||||
|
ValidPathInfo { path, info });
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* using namespace nix::build_remote; */
|
||||||
|
|
||||||
|
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||||
|
{
|
||||||
|
startTime = buildResult.startTime;
|
||||||
|
stopTime = buildResult.stopTime;
|
||||||
|
timesBuilt = buildResult.timesBuilt;
|
||||||
|
errorMsg = buildResult.errorMsg;
|
||||||
|
isNonDeterministic = buildResult.isNonDeterministic;
|
||||||
|
|
||||||
|
switch ((BuildResult::Status) buildResult.status) {
|
||||||
|
case BuildResult::Built:
|
||||||
|
stepStatus = bsSuccess;
|
||||||
|
break;
|
||||||
|
case BuildResult::Substituted:
|
||||||
|
case BuildResult::AlreadyValid:
|
||||||
|
stepStatus = bsSuccess;
|
||||||
|
isCached = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::PermanentFailure:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canCache = true;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::InputRejected:
|
||||||
|
case BuildResult::OutputRejected:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canCache = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::TransientFailure:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canRetry = true;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::TimedOut:
|
||||||
|
stepStatus = bsTimedOut;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::MiscFailure:
|
||||||
|
stepStatus = bsAborted;
|
||||||
|
canRetry = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::LogLimitExceeded:
|
||||||
|
stepStatus = bsLogLimitExceeded;
|
||||||
|
break;
|
||||||
|
case BuildResult::NotDeterministic:
|
||||||
|
stepStatus = bsNotDeterministic;
|
||||||
|
canRetry = false;
|
||||||
|
canCache = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
stepStatus = bsAborted;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||||
|
template <typename T>
|
||||||
|
class SemaphoreReleaser {
|
||||||
|
public:
|
||||||
|
SemaphoreReleaser(T* s) : sem(s) {}
|
||||||
|
~SemaphoreReleaser() { sem->release(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* sem;
|
||||||
|
};
|
||||||
|
|
||||||
void State::buildRemote(ref<Store> destStore,
|
void State::buildRemote(ref<Store> destStore,
|
||||||
Machine::ptr machine, Step::ptr step,
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
::Machine::ptr machine, Step::ptr step,
|
||||||
|
const ServeProto::BuildOptions & buildOptions,
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
std::function<void(StepState)> updateStep,
|
std::function<void(StepState)> updateStep,
|
||||||
NarMemberDatas & narMembers)
|
NarMemberDatas & narMembers)
|
||||||
{
|
{
|
||||||
assert(BuildResult::TimedOut == 8);
|
assert(BuildResult::TimedOut == 8);
|
||||||
|
|
||||||
string base(step->drvPath.to_string());
|
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||||
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
|
AutoDelete logFileDel(logFile, false);
|
||||||
AutoDelete autoDelete(result.logFile, false);
|
result.logFile = logFile;
|
||||||
|
|
||||||
createDirs(dirOf(result.logFile));
|
|
||||||
|
|
||||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
|
||||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
|
||||||
|
|
||||||
nix::Path tmpDir = createTempDir();
|
|
||||||
AutoDelete tmpDirDel(tmpDir, true);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
updateStep(ssConnecting);
|
updateStep(ssConnecting);
|
||||||
|
|
||||||
|
auto storeRef = machine->completeStoreReference();
|
||||||
|
|
||||||
|
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||||
|
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||||
|
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||||
|
}
|
||||||
|
|
||||||
|
LegacySSHStoreConfig storeConfig {
|
||||||
|
pSpecified->scheme,
|
||||||
|
pSpecified->authority,
|
||||||
|
storeRef.params
|
||||||
|
};
|
||||||
|
|
||||||
|
auto master = storeConfig.createSSHMaster(
|
||||||
|
false, // no SSH master yet
|
||||||
|
logFD.get());
|
||||||
|
|
||||||
// FIXME: rewrite to use Store.
|
// FIXME: rewrite to use Store.
|
||||||
Child child;
|
auto child = build_remote::openConnection(machine, master);
|
||||||
openConnection(machine, tmpDir, logFD.get(), child);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto activeStepState(activeStep->state_.lock());
|
auto activeStepState(activeStep->state_.lock());
|
||||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||||
activeStepState->pid = child.pid;
|
activeStepState->pid = child->sshPid;
|
||||||
}
|
}
|
||||||
|
|
||||||
Finally clearPid([&]() {
|
Finally clearPid([&]() {
|
||||||
@@ -223,34 +453,33 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
process. Meh. */
|
process. Meh. */
|
||||||
});
|
});
|
||||||
|
|
||||||
FdSource from(child.from.get());
|
::Machine::Connection conn {
|
||||||
FdSink to(child.to.get());
|
{
|
||||||
|
.to = child->in.get(),
|
||||||
|
.from = child->out.get(),
|
||||||
|
/* Handshake. */
|
||||||
|
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||||
|
},
|
||||||
|
/*.machine =*/ machine,
|
||||||
|
};
|
||||||
|
|
||||||
Finally updateStats([&]() {
|
Finally updateStats([&]() {
|
||||||
bytesReceived += from.read;
|
bytesReceived += conn.from.read;
|
||||||
bytesSent += to.written;
|
bytesSent += conn.to.written;
|
||||||
});
|
});
|
||||||
|
|
||||||
/* Handshake. */
|
constexpr ServeProto::Version our_version = 0x206;
|
||||||
unsigned int remoteVersion;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
to << SERVE_MAGIC_1 << 0x204;
|
conn.remoteVersion = decltype(conn)::handshake(
|
||||||
to.flush();
|
conn.to,
|
||||||
|
conn.from,
|
||||||
unsigned int magic = readInt(from);
|
our_version,
|
||||||
if (magic != SERVE_MAGIC_2)
|
machine->storeUri.render());
|
||||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
|
||||||
remoteVersion = readInt(from);
|
|
||||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
|
||||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
|
||||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
|
||||||
|
|
||||||
} catch (EndOfFile & e) {
|
} catch (EndOfFile & e) {
|
||||||
child.pid.wait();
|
child->sshPid.wait();
|
||||||
string s = chomp(readFile(result.logFile));
|
std::string s = chomp(readFile(result.logFile));
|
||||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -264,62 +493,12 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
copy the immediate sources of the derivation and the required
|
copy the immediate sources of the derivation and the required
|
||||||
outputs of the input derivations. */
|
outputs of the input derivations. */
|
||||||
updateStep(ssSendingInputs);
|
updateStep(ssSendingInputs);
|
||||||
|
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||||
|
|
||||||
StorePathSet inputs;
|
logFileDel.cancel();
|
||||||
BasicDerivation basicDrv(*step->drv);
|
|
||||||
|
|
||||||
for (auto & p : step->drv->inputSrcs)
|
|
||||||
inputs.insert(p);
|
|
||||||
|
|
||||||
for (auto & input : step->drv->inputDrvs) {
|
|
||||||
auto drv2 = localStore->readDerivation(input.first);
|
|
||||||
for (auto & name : input.second) {
|
|
||||||
if (auto i = get(drv2.outputs, name)) {
|
|
||||||
auto outPath = i->path(*localStore, drv2.name, name);
|
|
||||||
inputs.insert(*outPath);
|
|
||||||
basicDrv.inputSrcs.insert(*outPath);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure that the inputs exist in the destination store. This is
|
|
||||||
a no-op for regular stores, but for the binary cache store,
|
|
||||||
this will copy the inputs to the binary cache from the local
|
|
||||||
store. */
|
|
||||||
if (localStore != std::shared_ptr<Store>(destStore)) {
|
|
||||||
StorePathSet closure;
|
|
||||||
localStore->computeFSClosure(step->drv->inputSrcs, closure);
|
|
||||||
copyPaths(*localStore, *destStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
|
||||||
mc1.reset();
|
|
||||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
|
||||||
|
|
||||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
/* Copy the input closure. */
|
|
||||||
if (machine->isLocalhost()) {
|
|
||||||
StorePathSet closure;
|
|
||||||
destStore->computeFSClosure(inputs, closure);
|
|
||||||
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
|
||||||
} else {
|
|
||||||
copyClosureTo(machine->state->sendLock, destStore, from, to, inputs, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
||||||
}
|
|
||||||
|
|
||||||
autoDelete.cancel();
|
|
||||||
|
|
||||||
/* Truncate the log to get rid of messages about substitutions
|
/* Truncate the log to get rid of messages about substitutions
|
||||||
etc. on the remote system. */
|
etc. on the remote system. */
|
||||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||||
|
|
||||||
@@ -331,89 +510,21 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
/* Do the build. */
|
/* Do the build. */
|
||||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||||
localStore->printStorePath(step->drvPath),
|
localStore->printStorePath(step->drvPath),
|
||||||
machine->sshName);
|
machine->storeUri.render());
|
||||||
|
|
||||||
updateStep(ssBuilding);
|
updateStep(ssBuilding);
|
||||||
|
|
||||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
BuildResult buildResult = build_remote::performBuild(
|
||||||
writeDerivation(to, *localStore, basicDrv);
|
conn,
|
||||||
to << maxSilentTime << buildTimeout;
|
*localStore,
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
step->drvPath,
|
||||||
to << maxLogSize;
|
resolvedDrv,
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
buildOptions,
|
||||||
to << repeats // == build-repeat
|
nrStepsBuilding
|
||||||
<< step->isDeterministic; // == enforce-determinism
|
);
|
||||||
}
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
result.startTime = time(0);
|
result.updateWithBuildResult(buildResult);
|
||||||
int res;
|
|
||||||
{
|
|
||||||
MaintainCount<counter> mc(nrStepsBuilding);
|
|
||||||
res = readInt(from);
|
|
||||||
}
|
|
||||||
result.stopTime = time(0);
|
|
||||||
|
|
||||||
result.errorMsg = readString(from);
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
||||||
result.timesBuilt = readInt(from);
|
|
||||||
result.isNonDeterministic = readInt(from);
|
|
||||||
auto start = readInt(from);
|
|
||||||
auto stop = readInt(from);
|
|
||||||
if (start && start) {
|
|
||||||
/* Note: this represents the duration of a single
|
|
||||||
round, rather than all rounds. */
|
|
||||||
result.startTime = start;
|
|
||||||
result.stopTime = stop;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
|
|
||||||
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
|
|
||||||
}
|
|
||||||
switch ((BuildResult::Status) res) {
|
|
||||||
case BuildResult::Built:
|
|
||||||
result.stepStatus = bsSuccess;
|
|
||||||
break;
|
|
||||||
case BuildResult::Substituted:
|
|
||||||
case BuildResult::AlreadyValid:
|
|
||||||
result.stepStatus = bsSuccess;
|
|
||||||
result.isCached = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::PermanentFailure:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canCache = true;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::InputRejected:
|
|
||||||
case BuildResult::OutputRejected:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canCache = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::TransientFailure:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canRetry = true;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::TimedOut:
|
|
||||||
result.stepStatus = bsTimedOut;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::MiscFailure:
|
|
||||||
result.stepStatus = bsAborted;
|
|
||||||
result.canRetry = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::LogLimitExceeded:
|
|
||||||
result.stepStatus = bsLogLimitExceeded;
|
|
||||||
break;
|
|
||||||
case BuildResult::NotDeterministic:
|
|
||||||
result.stepStatus = bsNotDeterministic;
|
|
||||||
result.canRetry = false;
|
|
||||||
result.canCache = true;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
result.stepStatus = bsAborted;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (result.stepStatus != bsSuccess) return;
|
if (result.stepStatus != bsSuccess) return;
|
||||||
|
|
||||||
result.errorMsg = "";
|
result.errorMsg = "";
|
||||||
@@ -422,11 +533,32 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
get a build log. */
|
get a build log. */
|
||||||
if (result.isCached) {
|
if (result.isCached) {
|
||||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||||
unlink(result.logFile.c_str());
|
unlink(result.logFile.c_str());
|
||||||
result.logFile = "";
|
result.logFile = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||||
|
* step, since this requires a DB roundtrip. */
|
||||||
|
if (!localWorkThrottler.try_acquire()) {
|
||||||
|
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||||
|
updateStep(ssWaitingForLocalSlot);
|
||||||
|
localWorkThrottler.acquire();
|
||||||
|
}
|
||||||
|
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||||
|
|
||||||
|
/* Once we've started copying outputs, release the machine reservation
|
||||||
|
* so further builds can happen. We do not release the machine earlier
|
||||||
|
* to avoid situations where the queue runner is bottlenecked on
|
||||||
|
* copying outputs and we end up building too many things that we
|
||||||
|
* haven't been able to allow copy slots for. */
|
||||||
|
reservation.reset();
|
||||||
|
wakeDispatcher();
|
||||||
|
|
||||||
|
StorePathSet outputs;
|
||||||
|
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||||
|
outputs.insert(realisation.outPath);
|
||||||
|
|
||||||
/* Copy the output paths. */
|
/* Copy the output paths. */
|
||||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||||
updateStep(ssReceivingOutputs);
|
updateStep(ssReceivingOutputs);
|
||||||
@@ -435,39 +567,10 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
StorePathSet outputs;
|
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
|
||||||
if (i.second.second)
|
|
||||||
outputs.insert(*i.second.second);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get info about each output path. */
|
|
||||||
std::map<StorePath, ValidPathInfo> infos;
|
|
||||||
size_t totalNarSize = 0;
|
size_t totalNarSize = 0;
|
||||||
to << cmdQueryPathInfos;
|
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||||
worker_proto::write(*localStore, to, outputs);
|
|
||||||
to.flush();
|
|
||||||
while (true) {
|
|
||||||
auto storePathS = readString(from);
|
|
||||||
if (storePathS == "") break;
|
|
||||||
auto deriver = readString(from); // deriver
|
|
||||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
|
||||||
readLongLong(from); // download size
|
|
||||||
auto narSize = readLongLong(from);
|
|
||||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
|
||||||
auto ca = parseContentAddressOpt(readString(from));
|
|
||||||
readStrings<StringSet>(from); // sigs
|
|
||||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
|
||||||
assert(outputs.count(info.path));
|
|
||||||
info.references = references;
|
|
||||||
info.narSize = narSize;
|
|
||||||
totalNarSize += info.narSize;
|
|
||||||
info.narHash = narHash;
|
|
||||||
info.ca = ca;
|
|
||||||
if (deriver != "")
|
|
||||||
info.deriver = localStore->parseStorePath(deriver);
|
|
||||||
infos.insert_or_assign(info.path, info);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (totalNarSize > maxOutputSize) {
|
if (totalNarSize > maxOutputSize) {
|
||||||
result.stepStatus = bsNarSizeLimitExceeded;
|
result.stepStatus = bsNarSizeLimitExceeded;
|
||||||
@@ -476,43 +579,32 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
/* Copy each path. */
|
/* Copy each path. */
|
||||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
|
||||||
|
|
||||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
|
||||||
|
|
||||||
for (auto & path : pathsSorted) {
|
|
||||||
auto & info = infos.find(path)->second;
|
|
||||||
|
|
||||||
/* Receive the NAR from the remote and add it to the
|
|
||||||
destination store. Meanwhile, extract all the info from the
|
|
||||||
NAR that getBuildOutput() needs. */
|
|
||||||
auto source2 = sinkToSource([&](Sink & sink)
|
|
||||||
{
|
|
||||||
/* Note: we should only send the command to dump the store
|
|
||||||
path to the remote if the NAR is actually going to get read
|
|
||||||
by the destination store, which won't happen if this path
|
|
||||||
is already valid on the destination store. Since this
|
|
||||||
lambda function only gets executed if someone tries to read
|
|
||||||
from source2, we will send the command from here rather
|
|
||||||
than outside the lambda. */
|
|
||||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
TeeSource tee(from, sink);
|
|
||||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
|
||||||
});
|
|
||||||
|
|
||||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
auto now2 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Register the outputs of the newly built drv */
|
||||||
|
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||||
|
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||||
|
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||||
|
// Register the resolved drv output
|
||||||
|
destStore->registerDrvOutput(realisation);
|
||||||
|
|
||||||
|
// Also register the unresolved one
|
||||||
|
auto unresolvedRealisation = realisation;
|
||||||
|
unresolvedRealisation.signatures.clear();
|
||||||
|
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||||
|
destStore->registerDrvOutput(unresolvedRealisation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Shut down the connection. */
|
/* Shut down the connection. */
|
||||||
child.to = -1;
|
child->in = -1;
|
||||||
child.pid.wait();
|
child->sshPid.wait();
|
||||||
|
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
/* Disable this machine until a certain period of time has
|
/* Disable this machine until a certain period of time has
|
||||||
@@ -526,7 +618,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||||
info->lastFailure = now;
|
info->lastFailure = now;
|
||||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||||
}
|
}
|
||||||
throw;
|
throw;
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
#include "build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include <nix/store/store-api.hh>
|
||||||
#include "util.hh"
|
#include <nix/util/util.hh>
|
||||||
#include "fs-accessor.hh"
|
#include <nix/util/source-accessor.hh>
|
||||||
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
@@ -11,18 +11,18 @@ using namespace nix;
|
|||||||
BuildOutput getBuildOutput(
|
BuildOutput getBuildOutput(
|
||||||
nix::ref<Store> store,
|
nix::ref<Store> store,
|
||||||
NarMemberDatas & narMembers,
|
NarMemberDatas & narMembers,
|
||||||
const Derivation & drv)
|
const OutputPathMap derivationOutputs)
|
||||||
{
|
{
|
||||||
BuildOutput res;
|
BuildOutput res;
|
||||||
|
|
||||||
/* Compute the closure size. */
|
/* Compute the closure size. */
|
||||||
StorePathSet outputs;
|
StorePathSet outputs;
|
||||||
StorePathSet closure;
|
StorePathSet closure;
|
||||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||||
if (i.second.second) {
|
store->computeFSClosure(outputPath, closure);
|
||||||
store->computeFSClosure(*i.second.second, closure);
|
outputs.insert(outputPath);
|
||||||
outputs.insert(*i.second.second);
|
res.outputs.insert({outputName, outputPath});
|
||||||
}
|
}
|
||||||
for (auto & path : closure) {
|
for (auto & path : closure) {
|
||||||
auto info = store->queryPathInfo(path);
|
auto info = store->queryPathInfo(path);
|
||||||
res.closureSize += info->narSize;
|
res.closureSize += info->narSize;
|
||||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
|||||||
|
|
||||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||||
if (productsFile == narMembers.end() ||
|
if (productsFile == narMembers.end() ||
|
||||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
assert(productsFile->second.contents);
|
assert(productsFile->second.contents);
|
||||||
|
|
||||||
@@ -78,7 +78,7 @@ BuildOutput getBuildOutput(
|
|||||||
product.type = match[1];
|
product.type = match[1];
|
||||||
product.subtype = match[2];
|
product.subtype = match[2];
|
||||||
std::string s(match[3]);
|
std::string s(match[3]);
|
||||||
product.path = s[0] == '"' ? string(s, 1, s.size() - 2) : s;
|
product.path = s[0] == '"' ? std::string(s, 1, s.size() - 2) : s;
|
||||||
product.defaultPath = match[5];
|
product.defaultPath = match[5];
|
||||||
|
|
||||||
/* Ensure that the path exists and points into the Nix
|
/* Ensure that the path exists and points into the Nix
|
||||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
|||||||
|
|
||||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||||
|
|
||||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||||
product.isRegular = true;
|
product.isRegular = true;
|
||||||
product.fileSize = file->second.fileSize.value();
|
product.fileSize = file->second.fileSize.value();
|
||||||
product.sha256hash = file->second.sha256.value();
|
product.sha256hash = file->second.sha256.value();
|
||||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
|||||||
/* If no build products were explicitly declared, then add all
|
/* If no build products were explicitly declared, then add all
|
||||||
outputs as a product of type "nix-build". */
|
outputs as a product of type "nix-build". */
|
||||||
if (!explicitProducts) {
|
if (!explicitProducts) {
|
||||||
for (auto & [name, output] : drv.outputs) {
|
for (auto & [name, output] : derivationOutputs) {
|
||||||
BuildProduct product;
|
BuildProduct product;
|
||||||
auto outPath = output.path(*store, drv.name, name);
|
product.path = store->printStorePath(output);
|
||||||
product.path = store->printStorePath(*outPath);
|
|
||||||
product.type = "nix-build";
|
product.type = "nix-build";
|
||||||
product.subtype = name == "out" ? "" : name;
|
product.subtype = name == "out" ? "" : name;
|
||||||
product.name = outPath->name();
|
product.name = output.name();
|
||||||
|
|
||||||
auto file = narMembers.find(product.path);
|
auto file = narMembers.find(product.path);
|
||||||
assert(file != narMembers.end());
|
assert(file != narMembers.end());
|
||||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||||
res.products.push_back(product);
|
res.products.push_back(product);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
|||||||
for (auto & output : outputs) {
|
for (auto & output : outputs) {
|
||||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||||
if (file == narMembers.end() ||
|
if (file == narMembers.end() ||
|
||||||
file->second.type != FSAccessor::Type::tRegular)
|
file->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
res.releaseName = trim(file->second.contents.value());
|
res.releaseName = trim(file->second.contents.value());
|
||||||
// FIXME: validate release name
|
// FIXME: validate release name
|
||||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
|||||||
for (auto & output : outputs) {
|
for (auto & output : outputs) {
|
||||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||||
if (file == narMembers.end() ||
|
if (file == narMembers.end() ||
|
||||||
file->second.type != FSAccessor::Type::tRegular)
|
file->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||||
|
@@ -1,9 +1,9 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "finally.hh"
|
#include <nix/util/finally.hh>
|
||||||
#include "binary-cache-store.hh"
|
#include <nix/store/binary-cache-store.hh>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void State::builder(MachineReservation::ptr reservation)
|
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||||
{
|
{
|
||||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||||
|
|
||||||
@@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
|
|||||||
activeSteps_.lock()->erase(activeStep);
|
activeSteps_.lock()->erase(activeStep);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
std::string machine = reservation->machine->storeUri.render();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
res = doBuildStep(destStore, reservation, activeStep);
|
// Might release the reservation.
|
||||||
|
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||||
localStore->printStorePath(reservation->step->drvPath),
|
localStore->printStorePath(activeStep->step->drvPath),
|
||||||
reservation->machine->sshName,
|
machine,
|
||||||
e.what());
|
e.what());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Release the machine and wake up the dispatcher. */
|
|
||||||
assert(reservation.unique());
|
|
||||||
reservation = 0;
|
|
||||||
wakeDispatcher();
|
|
||||||
|
|
||||||
/* If there was a temporary failure, retry the step after an
|
/* If there was a temporary failure, retry the step after an
|
||||||
exponentially increasing interval. */
|
exponentially increasing interval. */
|
||||||
Step::ptr step = wstep.lock();
|
Step::ptr step = wstep.lock();
|
||||||
@@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
|
|||||||
|
|
||||||
|
|
||||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
MachineReservation::ptr reservation,
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
std::shared_ptr<ActiveStep> activeStep)
|
std::shared_ptr<ActiveStep> activeStep)
|
||||||
{
|
{
|
||||||
auto & step(reservation->step);
|
auto step(reservation->step);
|
||||||
auto & machine(reservation->machine);
|
auto machine(reservation->machine);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
@@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
it). */
|
it). */
|
||||||
BuildID buildId;
|
BuildID buildId;
|
||||||
std::optional<StorePath> buildDrvPath;
|
std::optional<StorePath> buildDrvPath;
|
||||||
unsigned int maxSilentTime, buildTimeout;
|
// Other fields set below
|
||||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
nix::ServeProto::BuildOptions buildOptions {
|
||||||
|
.maxLogSize = maxLogSize,
|
||||||
|
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||||
|
.enforceDeterminism = step->isDeterministic,
|
||||||
|
.keepFailed = false,
|
||||||
|
};
|
||||||
|
|
||||||
auto conn(dbPool.get());
|
auto conn(dbPool.get());
|
||||||
|
|
||||||
@@ -134,18 +137,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
{
|
{
|
||||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||||
if (i != jobsetRepeats.end())
|
if (i != jobsetRepeats.end())
|
||||||
repeats = std::max(repeats, i->second);
|
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!build) build = *dependents.begin();
|
if (!build) build = *dependents.begin();
|
||||||
|
|
||||||
buildId = build->id;
|
buildId = build->id;
|
||||||
buildDrvPath = build->drvPath;
|
buildDrvPath = build->drvPath;
|
||||||
maxSilentTime = build->maxSilentTime;
|
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||||
buildTimeout = build->buildTimeout;
|
buildOptions.buildTimeout = build->buildTimeout;
|
||||||
|
|
||||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buildOneDone)
|
if (!buildOneDone)
|
||||||
@@ -173,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
unlink(result.logFile.c_str());
|
unlink(result.logFile.c_str());
|
||||||
}
|
}
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
ignoreException();
|
ignoreExceptionInDestructor();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -191,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(*conn);
|
||||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
/* FIXME: referring builds may have conflicting timeouts. */
|
/* FIXME: referring builds may have conflicting timeouts. */
|
||||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
if (activeStep->state_.lock()->cancelled) {
|
if (activeStep->state_.lock()->cancelled) {
|
||||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||||
@@ -221,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
if (result.stepStatus == bsSuccess) {
|
if (result.stepStatus == bsSuccess) {
|
||||||
updateStep(ssPostProcessing);
|
updateStep(ssPostProcessing);
|
||||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
/* Finish the step in the database. */
|
/* Finish the step in the database. */
|
||||||
if (stepNr) {
|
if (stepNr) {
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(*conn);
|
||||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
issue). Retry a number of times. */
|
issue). Retry a number of times. */
|
||||||
if (result.canRetry) {
|
if (result.canRetry) {
|
||||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
|
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||||
assert(stepNr);
|
assert(stepNr);
|
||||||
bool retry;
|
bool retry;
|
||||||
{
|
{
|
||||||
@@ -275,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
assert(stepNr);
|
assert(stepNr);
|
||||||
|
|
||||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||||
if (i.second.second)
|
if (!optOutputPath)
|
||||||
addRoot(*i.second.second);
|
throw Error(
|
||||||
|
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||||
|
outputName, localStore->printStorePath(step->drvPath));
|
||||||
|
addRoot(*optOutputPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register success in the database for all Build objects that
|
/* Register success in the database for all Build objects that
|
||||||
@@ -323,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
pqxx::work txn(*conn);
|
pqxx::work txn(*conn);
|
||||||
|
|
||||||
for (auto & b : direct) {
|
for (auto & b : direct) {
|
||||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
printInfo("marking build %1% as succeeded", b->id);
|
||||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||||
result.startTime, result.stopTime);
|
result.startTime, result.stopTime);
|
||||||
}
|
}
|
||||||
@@ -398,7 +404,7 @@ void State::failStep(
|
|||||||
Step::ptr step,
|
Step::ptr step,
|
||||||
BuildID buildId,
|
BuildID buildId,
|
||||||
const RemoteResult & result,
|
const RemoteResult & result,
|
||||||
Machine::ptr machine,
|
::Machine::ptr machine,
|
||||||
bool & stepFinished)
|
bool & stepFinished)
|
||||||
{
|
{
|
||||||
/* Register failure in the database for all Build objects that
|
/* Register failure in the database for all Build objects that
|
||||||
@@ -444,14 +450,14 @@ void State::failStep(
|
|||||||
build->finishedInDB)
|
build->finishedInDB)
|
||||||
continue;
|
continue;
|
||||||
createBuildStep(txn,
|
createBuildStep(txn,
|
||||||
0, build->id, step, machine ? machine->sshName : "",
|
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark all builds that depend on this derivation as failed. */
|
/* Mark all builds that depend on this derivation as failed. */
|
||||||
for (auto & build : indirect) {
|
for (auto & build : indirect) {
|
||||||
if (build->finishedInDB) continue;
|
if (build->finishedInDB) continue;
|
||||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
printError("marking build %1% as failed", build->id);
|
||||||
txn.exec_params0
|
txn.exec_params0
|
||||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||||
build->id,
|
build->id,
|
||||||
|
@@ -2,6 +2,7 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include <unordered_set>
|
||||||
|
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
|
|
||||||
@@ -39,28 +40,34 @@ void State::dispatcher()
|
|||||||
printMsg(lvlDebug, "dispatcher woken up");
|
printMsg(lvlDebug, "dispatcher woken up");
|
||||||
nrDispatcherWakeups++;
|
nrDispatcherWakeups++;
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto t_before_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
auto sleepUntil = doDispatch();
|
auto sleepUntil = doDispatch();
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
prom.dispatcher_time_spent_running.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||||
|
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||||
|
|
||||||
/* Sleep until we're woken up (either because a runnable build
|
/* Sleep until we're woken up (either because a runnable build
|
||||||
is added, or because a build finishes). */
|
is added, or because a build finishes). */
|
||||||
{
|
{
|
||||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||||
if (!*dispatcherWakeup_) {
|
if (!*dispatcherWakeup_) {
|
||||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
debug("dispatcher sleeping for %1%s",
|
||||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||||
}
|
}
|
||||||
*dispatcherWakeup_ = false;
|
*dispatcherWakeup_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
|
prom.dispatcher_time_spent_waiting.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||||
|
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
printError("dispatcher: %s", e.what());
|
||||||
sleep(1);
|
sleep(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,17 +87,124 @@ system_time State::doDispatch()
|
|||||||
jobset.second->pruneSteps();
|
jobset.second->pruneSteps();
|
||||||
auto s2 = jobset.second->shareUsed();
|
auto s2 = jobset.second->shareUsed();
|
||||||
if (s1 != s2)
|
if (s1 != s2)
|
||||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
jobset.first.first, jobset.first.second, s1, s2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
system_time now = std::chrono::system_clock::now();
|
||||||
|
|
||||||
/* Start steps until we're out of steps or slots. */
|
/* Start steps until we're out of steps or slots. */
|
||||||
auto sleepUntil = system_time::max();
|
auto sleepUntil = system_time::max();
|
||||||
bool keepGoing;
|
bool keepGoing;
|
||||||
|
|
||||||
|
/* Sort the runnable steps by priority. Priority is establised
|
||||||
|
as follows (in order of precedence):
|
||||||
|
|
||||||
|
- The global priority of the builds that depend on the
|
||||||
|
step. This allows admins to bump a build to the front of
|
||||||
|
the queue.
|
||||||
|
|
||||||
|
- The lowest used scheduling share of the jobsets depending
|
||||||
|
on the step.
|
||||||
|
|
||||||
|
- The local priority of the build, as set via the build's
|
||||||
|
meta.schedulingPriority field. Note that this is not
|
||||||
|
quite correct: the local priority should only be used to
|
||||||
|
establish priority between builds in the same jobset, but
|
||||||
|
here it's used between steps in different jobsets if they
|
||||||
|
happen to have the same lowest used scheduling share. But
|
||||||
|
that's not very likely.
|
||||||
|
|
||||||
|
- The lowest ID of the builds depending on the step;
|
||||||
|
i.e. older builds take priority over new ones.
|
||||||
|
|
||||||
|
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||||
|
runnable queue sorted by priority. */
|
||||||
|
struct StepInfo
|
||||||
|
{
|
||||||
|
Step::ptr step;
|
||||||
|
bool alreadyScheduled = false;
|
||||||
|
|
||||||
|
/* The lowest share used of any jobset depending on this
|
||||||
|
step. */
|
||||||
|
double lowestShareUsed = 1e9;
|
||||||
|
|
||||||
|
/* Info copied from step->state to ensure that the
|
||||||
|
comparator is a partial ordering (see MachineInfo). */
|
||||||
|
int highestGlobalPriority;
|
||||||
|
int highestLocalPriority;
|
||||||
|
size_t numRequiredSystemFeatures;
|
||||||
|
size_t numRevDeps;
|
||||||
|
BuildID lowestBuildID;
|
||||||
|
|
||||||
|
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||||
|
{
|
||||||
|
for (auto & jobset : step_.jobsets)
|
||||||
|
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||||
|
highestGlobalPriority = step_.highestGlobalPriority;
|
||||||
|
highestLocalPriority = step_.highestLocalPriority;
|
||||||
|
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
|
||||||
|
numRevDeps = step_.rdeps.size();
|
||||||
|
lowestBuildID = step_.lowestBuildID;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<StepInfo> runnableSorted;
|
||||||
|
|
||||||
|
struct RunnablePerType
|
||||||
|
{
|
||||||
|
unsigned int count{0};
|
||||||
|
std::chrono::seconds waitTime{0};
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto runnable_(runnable.lock());
|
||||||
|
runnableSorted.reserve(runnable_->size());
|
||||||
|
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||||
|
auto step = i->lock();
|
||||||
|
|
||||||
|
/* Remove dead steps. */
|
||||||
|
if (!step) {
|
||||||
|
i = runnable_->erase(i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
++i;
|
||||||
|
|
||||||
|
auto & r = runnablePerType[step->systemType];
|
||||||
|
r.count++;
|
||||||
|
|
||||||
|
/* Skip previously failed steps that aren't ready
|
||||||
|
to be retried. */
|
||||||
|
auto step_(step->state.lock());
|
||||||
|
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||||
|
if (step_->tries > 0 && step_->after > now) {
|
||||||
|
if (step_->after < sleepUntil)
|
||||||
|
sleepUntil = step_->after;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
runnableSorted.emplace_back(step, *step_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||||
|
[](const StepInfo & a, const StepInfo & b)
|
||||||
|
{
|
||||||
|
return
|
||||||
|
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||||
|
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||||
|
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||||
|
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
|
||||||
|
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
|
||||||
|
a.lowestBuildID < b.lowestBuildID;
|
||||||
|
});
|
||||||
|
|
||||||
do {
|
do {
|
||||||
system_time now = std::chrono::system_clock::now();
|
now = std::chrono::system_clock::now();
|
||||||
|
|
||||||
/* Copy the currentJobs field of each machine. This is
|
/* Copy the currentJobs field of each machine. This is
|
||||||
necessary to ensure that the sort comparator below is
|
necessary to ensure that the sort comparator below is
|
||||||
@@ -98,7 +212,7 @@ system_time State::doDispatch()
|
|||||||
filter out temporarily disabled machines. */
|
filter out temporarily disabled machines. */
|
||||||
struct MachineInfo
|
struct MachineInfo
|
||||||
{
|
{
|
||||||
Machine::ptr machine;
|
::Machine::ptr machine;
|
||||||
unsigned long currentJobs;
|
unsigned long currentJobs;
|
||||||
};
|
};
|
||||||
std::vector<MachineInfo> machinesSorted;
|
std::vector<MachineInfo> machinesSorted;
|
||||||
@@ -138,104 +252,6 @@ system_time State::doDispatch()
|
|||||||
a.currentJobs > b.currentJobs;
|
a.currentJobs > b.currentJobs;
|
||||||
});
|
});
|
||||||
|
|
||||||
/* Sort the runnable steps by priority. Priority is establised
|
|
||||||
as follows (in order of precedence):
|
|
||||||
|
|
||||||
- The global priority of the builds that depend on the
|
|
||||||
step. This allows admins to bump a build to the front of
|
|
||||||
the queue.
|
|
||||||
|
|
||||||
- The lowest used scheduling share of the jobsets depending
|
|
||||||
on the step.
|
|
||||||
|
|
||||||
- The local priority of the build, as set via the build's
|
|
||||||
meta.schedulingPriority field. Note that this is not
|
|
||||||
quite correct: the local priority should only be used to
|
|
||||||
establish priority between builds in the same jobset, but
|
|
||||||
here it's used between steps in different jobsets if they
|
|
||||||
happen to have the same lowest used scheduling share. But
|
|
||||||
that's not very likely.
|
|
||||||
|
|
||||||
- The lowest ID of the builds depending on the step;
|
|
||||||
i.e. older builds take priority over new ones.
|
|
||||||
|
|
||||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
|
||||||
runnable queue sorted by priority. */
|
|
||||||
struct StepInfo
|
|
||||||
{
|
|
||||||
Step::ptr step;
|
|
||||||
|
|
||||||
/* The lowest share used of any jobset depending on this
|
|
||||||
step. */
|
|
||||||
double lowestShareUsed = 1e9;
|
|
||||||
|
|
||||||
/* Info copied from step->state to ensure that the
|
|
||||||
comparator is a partial ordering (see MachineInfo). */
|
|
||||||
int highestGlobalPriority;
|
|
||||||
int highestLocalPriority;
|
|
||||||
BuildID lowestBuildID;
|
|
||||||
|
|
||||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
|
||||||
{
|
|
||||||
for (auto & jobset : step_.jobsets)
|
|
||||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
|
||||||
highestGlobalPriority = step_.highestGlobalPriority;
|
|
||||||
highestLocalPriority = step_.highestLocalPriority;
|
|
||||||
lowestBuildID = step_.lowestBuildID;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<StepInfo> runnableSorted;
|
|
||||||
|
|
||||||
struct RunnablePerType
|
|
||||||
{
|
|
||||||
unsigned int count{0};
|
|
||||||
std::chrono::seconds waitTime{0};
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
|
||||||
|
|
||||||
{
|
|
||||||
auto runnable_(runnable.lock());
|
|
||||||
runnableSorted.reserve(runnable_->size());
|
|
||||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
|
||||||
auto step = i->lock();
|
|
||||||
|
|
||||||
/* Remove dead steps. */
|
|
||||||
if (!step) {
|
|
||||||
i = runnable_->erase(i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
++i;
|
|
||||||
|
|
||||||
auto & r = runnablePerType[step->systemType];
|
|
||||||
r.count++;
|
|
||||||
|
|
||||||
/* Skip previously failed steps that aren't ready
|
|
||||||
to be retried. */
|
|
||||||
auto step_(step->state.lock());
|
|
||||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
|
||||||
if (step_->tries > 0 && step_->after > now) {
|
|
||||||
if (step_->after < sleepUntil)
|
|
||||||
sleepUntil = step_->after;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
runnableSorted.emplace_back(step, *step_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
|
||||||
[](const StepInfo & a, const StepInfo & b)
|
|
||||||
{
|
|
||||||
return
|
|
||||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
|
||||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
|
||||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
|
||||||
a.lowestBuildID < b.lowestBuildID;
|
|
||||||
});
|
|
||||||
|
|
||||||
/* Find a machine with a free slot and find a step to run
|
/* Find a machine with a free slot and find a step to run
|
||||||
on it. Once we find such a pair, we restart the outer
|
on it. Once we find such a pair, we restart the outer
|
||||||
loop because the machine sorting will have changed. */
|
loop because the machine sorting will have changed. */
|
||||||
@@ -245,12 +261,14 @@ system_time State::doDispatch()
|
|||||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||||
|
|
||||||
for (auto & stepInfo : runnableSorted) {
|
for (auto & stepInfo : runnableSorted) {
|
||||||
|
if (stepInfo.alreadyScheduled) continue;
|
||||||
|
|
||||||
auto & step(stepInfo.step);
|
auto & step(stepInfo.step);
|
||||||
|
|
||||||
/* Can this machine do this step? */
|
/* Can this machine do this step? */
|
||||||
if (!mi.machine->supportsStep(step)) {
|
if (!mi.machine->supportsStep(step)) {
|
||||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||||
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
|
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,10 +289,12 @@ system_time State::doDispatch()
|
|||||||
r.count--;
|
r.count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stepInfo.alreadyScheduled = true;
|
||||||
|
|
||||||
/* Make a slot reservation and start a thread to
|
/* Make a slot reservation and start a thread to
|
||||||
do the build. */
|
do the build. */
|
||||||
auto builderThread = std::thread(&State::builder, this,
|
auto builderThread = std::thread(&State::builder, this,
|
||||||
std::make_shared<MachineReservation>(*this, step, mi.machine));
|
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||||
builderThread.detach(); // FIXME?
|
builderThread.detach(); // FIXME?
|
||||||
|
|
||||||
keepGoing = true;
|
keepGoing = true;
|
||||||
@@ -428,7 +448,7 @@ void Jobset::pruneSteps()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||||
: state(state), step(step), machine(machine)
|
: state(state), step(step), machine(machine)
|
||||||
{
|
{
|
||||||
machine->state->currentJobs++;
|
machine->state->currentJobs++;
|
||||||
|
@@ -2,9 +2,9 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "hash.hh"
|
#include <nix/util/hash.hh>
|
||||||
#include "derivations.hh"
|
#include <nix/store/derivations.hh>
|
||||||
#include "store-api.hh"
|
#include <nix/store/store-api.hh>
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
struct BuildProduct
|
struct BuildProduct
|
||||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
|||||||
|
|
||||||
std::list<BuildProduct> products;
|
std::list<BuildProduct> products;
|
||||||
|
|
||||||
|
std::map<std::string, nix::StorePath> outputs;
|
||||||
|
|
||||||
std::map<std::string, BuildMetric> metrics;
|
std::map<std::string, BuildMetric> metrics;
|
||||||
};
|
};
|
||||||
|
|
||||||
BuildOutput getBuildOutput(
|
BuildOutput getBuildOutput(
|
||||||
nix::ref<nix::Store> store,
|
nix::ref<nix::Store> store,
|
||||||
NarMemberDatas & narMembers,
|
NarMemberDatas & narMembers,
|
||||||
const nix::Derivation & drv);
|
const nix::OutputPathMap derivationOutputs);
|
@@ -1,32 +1,29 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
|
||||||
#include "state.hh"
|
#include <prometheus/exposer.h>
|
||||||
#include "build-result.hh"
|
|
||||||
#include "store-api.hh"
|
|
||||||
#include "remote-store.hh"
|
|
||||||
|
|
||||||
#include "globals.hh"
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
#include <nix/util/signals.hh>
|
||||||
|
#include "state.hh"
|
||||||
|
#include "hydra-build-result.hh"
|
||||||
|
#include <nix/store/store-open.hh>
|
||||||
|
#include <nix/store/remote-store.hh>
|
||||||
|
|
||||||
|
#include <nix/store/globals.hh>
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "json.hh"
|
#include <nix/store/s3-binary-cache-store.hh>
|
||||||
#include "s3-binary-cache-store.hh"
|
#include <nix/main/shared.hh>
|
||||||
#include "shared.hh"
|
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
using nlohmann::json;
|
||||||
|
|
||||||
namespace nix {
|
|
||||||
|
|
||||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
|
||||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
|
||||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::string getEnvOrDie(const std::string & key)
|
std::string getEnvOrDie(const std::string & key)
|
||||||
@@ -36,20 +33,94 @@ std::string getEnvOrDie(const std::string & key)
|
|||||||
return *value;
|
return *value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
State::PromMetrics::PromMetrics()
|
||||||
|
: registry(std::make_shared<prometheus::Registry>())
|
||||||
|
, queue_checks_started(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_checks_started_total")
|
||||||
|
.Help("Number of times State::getQueuedBuilds() was started")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_build_loads(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_build_loads_total")
|
||||||
|
.Help("Number of builds loaded")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_steps_created(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_steps_created_total")
|
||||||
|
.Help("Number of steps created")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_checks_early_exits(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_checks_early_exits_total")
|
||||||
|
.Help("Number of times State::getQueuedBuilds() yielded to potential bumps")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_checks_finished(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_checks_finished_total")
|
||||||
|
.Help("Number of times State::getQueuedBuilds() was completed")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, dispatcher_time_spent_running(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||||
|
.Help("Time (in micros) spent running the dispatcher")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, dispatcher_time_spent_waiting(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||||
|
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_monitor_time_spent_running(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||||
|
.Help("Time (in micros) spent running the queue monitor")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_monitor_time_spent_waiting(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||||
|
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
State::State()
|
}
|
||||||
|
|
||||||
|
State::State(std::optional<std::string> metricsAddrOpt)
|
||||||
: config(std::make_unique<HydraConfig>())
|
: config(std::make_unique<HydraConfig>())
|
||||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||||
|
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||||
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
|
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
|
||||||
|
, metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"}))
|
||||||
{
|
{
|
||||||
hydraData = getEnvOrDie("HYDRA_DATA");
|
hydraData = getEnvOrDie("HYDRA_DATA");
|
||||||
|
|
||||||
logDir = canonPath(hydraData + "/build-logs");
|
logDir = canonPath(hydraData + "/build-logs");
|
||||||
|
|
||||||
|
if (metricsAddrOpt.has_value()) {
|
||||||
|
metricsAddr = metricsAddrOpt.value();
|
||||||
|
}
|
||||||
|
|
||||||
/* handle deprecated store specification */
|
/* handle deprecated store specification */
|
||||||
if (config->getStrOption("store_mode") != "")
|
if (config->getStrOption("store_mode") != "")
|
||||||
throw Error("store_mode in hydra.conf is deprecated, please use store_uri");
|
throw Error("store_mode in hydra.conf is deprecated, please use store_uri");
|
||||||
@@ -86,50 +157,29 @@ void State::parseMachines(const std::string & contents)
|
|||||||
oldMachines = *machines_;
|
oldMachines = *machines_;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto line : tokenizeString<Strings>(contents, "\n")) {
|
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||||
line = trim(string(line, 0, line.find('#')));
|
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||||
auto tokens = tokenizeString<std::vector<std::string>>(line);
|
|
||||||
if (tokens.size() < 3) continue;
|
|
||||||
tokens.resize(8);
|
|
||||||
|
|
||||||
auto machine = std::make_shared<Machine>();
|
|
||||||
machine->sshName = tokens[0];
|
|
||||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
|
||||||
machine->sshKey = tokens[2] == "-" ? string("") : tokens[2];
|
|
||||||
if (tokens[3] != "")
|
|
||||||
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
|
|
||||||
else
|
|
||||||
machine->maxJobs = 1;
|
|
||||||
machine->speedFactor = atof(tokens[4].c_str());
|
|
||||||
if (tokens[5] == "-") tokens[5] = "";
|
|
||||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
|
||||||
if (tokens[6] == "-") tokens[6] = "";
|
|
||||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
|
||||||
for (auto & f : machine->mandatoryFeatures)
|
|
||||||
machine->supportedFeatures.insert(f);
|
|
||||||
if (tokens[7] != "" && tokens[7] != "-")
|
|
||||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
|
||||||
|
|
||||||
/* Re-use the State object of the previous machine with the
|
/* Re-use the State object of the previous machine with the
|
||||||
same name. */
|
same name. */
|
||||||
auto i = oldMachines.find(machine->sshName);
|
auto i = oldMachines.find(machine->storeUri.variant);
|
||||||
if (i == oldMachines.end())
|
if (i == oldMachines.end())
|
||||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||||
else
|
else
|
||||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||||
machine->state = i == oldMachines.end()
|
machine->state = i == oldMachines.end()
|
||||||
? std::make_shared<Machine::State>()
|
? std::make_shared<::Machine::State>()
|
||||||
: i->second->state;
|
: i->second->state;
|
||||||
newMachines[machine->sshName] = machine;
|
newMachines[machine->storeUri.variant] = machine;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & m : oldMachines)
|
for (auto & m : oldMachines)
|
||||||
if (newMachines.find(m.first) == newMachines.end()) {
|
if (newMachines.find(m.first) == newMachines.end()) {
|
||||||
if (m.second->enabled)
|
if (m.second->enabled)
|
||||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||||
/* Add a disabled Machine object to make sure stats are
|
/* Add a disabled ::Machine object to make sure stats are
|
||||||
maintained. */
|
maintained. */
|
||||||
auto machine = std::make_shared<Machine>(*(m.second));
|
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||||
machine->enabled = false;
|
machine->enabled = false;
|
||||||
newMachines[m.first] = machine;
|
newMachines[m.first] = machine;
|
||||||
}
|
}
|
||||||
@@ -149,7 +199,7 @@ void State::parseMachines(const std::string & contents)
|
|||||||
|
|
||||||
void State::monitorMachinesFile()
|
void State::monitorMachinesFile()
|
||||||
{
|
{
|
||||||
string defaultMachinesFile = "/etc/nix/machines";
|
std::string defaultMachinesFile = "/etc/nix/machines";
|
||||||
auto machinesFiles = tokenizeString<std::vector<Path>>(
|
auto machinesFiles = tokenizeString<std::vector<Path>>(
|
||||||
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
|
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
|
||||||
|
|
||||||
@@ -157,7 +207,7 @@ void State::monitorMachinesFile()
|
|||||||
parseMachines("localhost " +
|
parseMachines("localhost " +
|
||||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||||
machinesReadyLock.unlock();
|
machinesReadyLock.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -191,7 +241,7 @@ void State::monitorMachinesFile()
|
|||||||
|
|
||||||
debug("reloading machines files");
|
debug("reloading machines files");
|
||||||
|
|
||||||
string contents;
|
std::string contents;
|
||||||
for (auto & machinesFile : machinesFiles) {
|
for (auto & machinesFile : machinesFiles) {
|
||||||
try {
|
try {
|
||||||
contents += readFile(machinesFile);
|
contents += readFile(machinesFile);
|
||||||
@@ -264,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
|||||||
|
|
||||||
if (r.affected_rows() == 0) goto restart;
|
if (r.affected_rows() == 0) goto restart;
|
||||||
|
|
||||||
for (auto & [name, output] : step->drv->outputs)
|
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||||
txn.exec_params0
|
txn.exec_params0
|
||||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
buildId, stepNr, name,
|
||||||
|
output
|
||||||
|
? std::optional { localStore->printStorePath(*output)}
|
||||||
|
: std::nullopt);
|
||||||
|
|
||||||
if (status == bsBusy)
|
if (status == bsBusy)
|
||||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||||
@@ -304,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
|||||||
assert(result.logFile.find('\t') == std::string::npos);
|
assert(result.logFile.find('\t') == std::string::npos);
|
||||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||||
buildId, stepNr, result.logFile));
|
buildId, stepNr, result.logFile));
|
||||||
|
|
||||||
|
if (result.stepStatus == bsSuccess) {
|
||||||
|
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||||
|
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||||
|
assert(res.size());
|
||||||
|
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||||
|
// If we've finished building, all the paths should be known
|
||||||
|
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||||
|
txn.exec_params0
|
||||||
|
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||||
|
buildId, stepNr, name, localStore->printStorePath(output));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath)
|
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||||
{
|
{
|
||||||
restart:
|
restart:
|
||||||
auto stepNr = allocBuildStep(txn, build->id);
|
auto stepNr = allocBuildStep(txn, build->id);
|
||||||
@@ -409,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
|||||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||||
isCachedBuild ? 1 : 0);
|
isCachedBuild ? 1 : 0);
|
||||||
|
|
||||||
|
for (auto & [outputName, outputPath] : res.outputs) {
|
||||||
|
txn.exec_params0
|
||||||
|
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||||
|
build->id,
|
||||||
|
outputName,
|
||||||
|
localStore->printStorePath(outputPath)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||||
|
|
||||||
unsigned int productNr = 1;
|
unsigned int productNr = 1;
|
||||||
@@ -420,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
|||||||
product.type,
|
product.type,
|
||||||
product.subtype,
|
product.subtype,
|
||||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||||
product.path,
|
product.path,
|
||||||
product.name,
|
product.name,
|
||||||
product.defaultPath);
|
product.defaultPath);
|
||||||
@@ -488,182 +562,174 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
|||||||
|
|
||||||
void State::dumpStatus(Connection & conn)
|
void State::dumpStatus(Connection & conn)
|
||||||
{
|
{
|
||||||
std::ostringstream out;
|
time_t now = time(0);
|
||||||
|
json statusJson = {
|
||||||
|
{"status", "up"},
|
||||||
|
{"time", time(0)},
|
||||||
|
{"uptime", now - startedAt},
|
||||||
|
{"pid", getpid()},
|
||||||
|
|
||||||
|
{"nrQueuedBuilds", builds.lock()->size()},
|
||||||
|
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||||
|
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||||
|
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||||
|
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||||
|
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||||
|
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||||
|
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||||
|
{"bytesSent", bytesSent.load()},
|
||||||
|
{"bytesReceived", bytesReceived.load()},
|
||||||
|
{"nrBuildsRead", nrBuildsRead.load()},
|
||||||
|
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||||
|
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||||
|
{"nrBuildsDone", nrBuildsDone.load()},
|
||||||
|
{"nrStepsStarted", nrStepsStarted.load()},
|
||||||
|
{"nrStepsDone", nrStepsDone.load()},
|
||||||
|
{"nrRetries", nrRetries.load()},
|
||||||
|
{"maxNrRetries", maxNrRetries.load()},
|
||||||
|
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||||
|
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||||
|
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||||
|
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||||
|
{"nrDbConnections", dbPool.count()},
|
||||||
|
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||||
|
};
|
||||||
{
|
{
|
||||||
JSONObject root(out);
|
|
||||||
time_t now = time(0);
|
|
||||||
root.attr("status", "up");
|
|
||||||
root.attr("time", time(0));
|
|
||||||
root.attr("uptime", now - startedAt);
|
|
||||||
root.attr("pid", getpid());
|
|
||||||
{
|
|
||||||
auto builds_(builds.lock());
|
|
||||||
root.attr("nrQueuedBuilds", builds_->size());
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
auto steps_(steps.lock());
|
auto steps_(steps.lock());
|
||||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||||
root.attr("nrUnfinishedSteps", steps_->size());
|
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
auto runnable_(runnable.lock());
|
auto runnable_(runnable.lock());
|
||||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||||
root.attr("nrRunnableSteps", runnable_->size());
|
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||||
}
|
}
|
||||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
|
||||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
|
||||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
|
||||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
|
||||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
|
||||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
|
||||||
root.attr("bytesSent", bytesSent);
|
|
||||||
root.attr("bytesReceived", bytesReceived);
|
|
||||||
root.attr("nrBuildsRead", nrBuildsRead);
|
|
||||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
|
||||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
|
||||||
root.attr("nrBuildsDone", nrBuildsDone);
|
|
||||||
root.attr("nrStepsStarted", nrStepsStarted);
|
|
||||||
root.attr("nrStepsDone", nrStepsDone);
|
|
||||||
root.attr("nrRetries", nrRetries);
|
|
||||||
root.attr("maxNrRetries", maxNrRetries);
|
|
||||||
if (nrStepsDone) {
|
if (nrStepsDone) {
|
||||||
root.attr("totalStepTime", totalStepTime);
|
statusJson["totalStepTime"] = totalStepTime.load();
|
||||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||||
}
|
}
|
||||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
|
||||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
|
||||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
|
||||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
|
||||||
root.attr("nrDbConnections", dbPool.count());
|
|
||||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto nested = root.object("machines");
|
auto machines_json = json::object();
|
||||||
auto machines_(machines.lock());
|
auto machines_(machines.lock());
|
||||||
for (auto & i : *machines_) {
|
for (auto & i : *machines_) {
|
||||||
auto & m(i.second);
|
auto & m(i.second);
|
||||||
auto & s(m->state);
|
auto & s(m->state);
|
||||||
auto nested2 = nested.object(m->sshName);
|
|
||||||
nested2.attr("enabled", m->enabled);
|
|
||||||
|
|
||||||
{
|
|
||||||
auto list = nested2.list("systemTypes");
|
|
||||||
for (auto & s : m->systemTypes)
|
|
||||||
list.elem(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto list = nested2.list("supportedFeatures");
|
|
||||||
for (auto & s : m->supportedFeatures)
|
|
||||||
list.elem(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto list = nested2.list("mandatoryFeatures");
|
|
||||||
for (auto & s : m->mandatoryFeatures)
|
|
||||||
list.elem(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
nested2.attr("currentJobs", s->currentJobs);
|
|
||||||
if (s->currentJobs == 0)
|
|
||||||
nested2.attr("idleSince", s->idleSince);
|
|
||||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
|
||||||
if (m->state->nrStepsDone) {
|
|
||||||
nested2.attr("totalStepTime", s->totalStepTime);
|
|
||||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
|
||||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
|
||||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto info(m->state->connectInfo.lock());
|
auto info(m->state->connectInfo.lock());
|
||||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
|
||||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
|
||||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
|
||||||
|
|
||||||
|
json machine = {
|
||||||
|
{"enabled", m->enabled},
|
||||||
|
{"systemTypes", m->systemTypes},
|
||||||
|
{"supportedFeatures", m->supportedFeatures},
|
||||||
|
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||||
|
{"nrStepsDone", s->nrStepsDone.load()},
|
||||||
|
{"currentJobs", s->currentJobs.load()},
|
||||||
|
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||||
|
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||||
|
{"consecutiveFailures", info->consecutiveFailures},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (s->currentJobs == 0)
|
||||||
|
machine["idleSince"] = s->idleSince.load();
|
||||||
|
if (m->state->nrStepsDone) {
|
||||||
|
machine["totalStepTime"] = s->totalStepTime.load();
|
||||||
|
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||||
|
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||||
|
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||||
|
}
|
||||||
|
machines_json[m->storeUri.render()] = machine;
|
||||||
}
|
}
|
||||||
|
statusJson["machines"] = machines_json;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
auto nested = root.object("jobsets");
|
auto jobsets_json = json::object();
|
||||||
auto jobsets_(jobsets.lock());
|
auto jobsets_(jobsets.lock());
|
||||||
for (auto & jobset : *jobsets_) {
|
for (auto & jobset : *jobsets_) {
|
||||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
{"shareUsed", jobset.second->shareUsed()},
|
||||||
nested2.attr("seconds", jobset.second->getSeconds());
|
{"seconds", jobset.second->getSeconds()},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
statusJson["jobsets"] = jobsets_json;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
auto nested = root.object("machineTypes");
|
auto machineTypesJson = json::object();
|
||||||
auto machineTypes_(machineTypes.lock());
|
auto machineTypes_(machineTypes.lock());
|
||||||
for (auto & i : *machineTypes_) {
|
for (auto & i : *machineTypes_) {
|
||||||
auto nested2 = nested.object(i.first);
|
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||||
nested2.attr("runnable", i.second.runnable);
|
{"runnable", i.second.runnable},
|
||||||
nested2.attr("running", i.second.running);
|
{"running", i.second.running},
|
||||||
|
};
|
||||||
if (i.second.runnable > 0)
|
if (i.second.runnable > 0)
|
||||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||||
if (i.second.running == 0)
|
if (i.second.running == 0)
|
||||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||||
}
|
}
|
||||||
|
statusJson["machineTypes"] = machineTypesJson;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto store = getDestStore();
|
auto store = getDestStore();
|
||||||
|
|
||||||
auto nested = root.object("store");
|
|
||||||
|
|
||||||
auto & stats = store->getStats();
|
auto & stats = store->getStats();
|
||||||
nested.attr("narInfoRead", stats.narInfoRead);
|
statusJson["store"] = {
|
||||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
{"narInfoRead", stats.narInfoRead.load()},
|
||||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||||
nested.attr("narRead", stats.narRead);
|
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||||
nested.attr("narReadBytes", stats.narReadBytes);
|
{"narRead", stats.narRead.load()},
|
||||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
{"narReadBytes", stats.narReadBytes.load()},
|
||||||
nested.attr("narWrite", stats.narWrite);
|
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
{"narWrite", stats.narWrite.load()},
|
||||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||||
nested.attr("narCompressionSavings",
|
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||||
stats.narWriteBytes
|
{"narCompressionSavings",
|
||||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
stats.narWriteBytes
|
||||||
: 0.0);
|
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||||
nested.attr("narCompressionSpeed", // MiB/s
|
: 0.0},
|
||||||
|
{"narCompressionSpeed", // MiB/s
|
||||||
stats.narWriteCompressionTimeMs
|
stats.narWriteCompressionTimeMs
|
||||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
: 0.0);
|
: 0.0},
|
||||||
|
};
|
||||||
|
|
||||||
|
#if NIX_WITH_S3_SUPPORT
|
||||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||||
if (s3Store) {
|
if (s3Store) {
|
||||||
auto nested2 = nested.object("s3");
|
|
||||||
auto & s3Stats = s3Store->getS3Stats();
|
auto & s3Stats = s3Store->getS3Stats();
|
||||||
nested2.attr("put", s3Stats.put);
|
auto jsonS3 = statusJson["s3"] = {
|
||||||
nested2.attr("putBytes", s3Stats.putBytes);
|
{"put", s3Stats.put.load()},
|
||||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
{"putBytes", s3Stats.putBytes.load()},
|
||||||
nested2.attr("putSpeed",
|
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||||
s3Stats.putTimeMs
|
{"putSpeed",
|
||||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
s3Stats.putTimeMs
|
||||||
: 0.0);
|
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
nested2.attr("get", s3Stats.get);
|
: 0.0},
|
||||||
nested2.attr("getBytes", s3Stats.getBytes);
|
{"get", s3Stats.get.load()},
|
||||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
{"getBytes", s3Stats.getBytes.load()},
|
||||||
nested2.attr("getSpeed",
|
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||||
s3Stats.getTimeMs
|
{"getSpeed",
|
||||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
s3Stats.getTimeMs
|
||||||
: 0.0);
|
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
nested2.attr("head", s3Stats.head);
|
: 0.0},
|
||||||
nested2.attr("costDollarApprox",
|
{"head", s3Stats.head.load()},
|
||||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
{"costDollarApprox",
|
||||||
+ s3Stats.put / 1000.0 * 0.005 +
|
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
+ s3Stats.put / 1000.0 * 0.005 +
|
||||||
|
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -671,7 +737,7 @@ void State::dumpStatus(Connection & conn)
|
|||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
// FIXME: use PostgreSQL 9.5 upsert.
|
// FIXME: use PostgreSQL 9.5 upsert.
|
||||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||||
txn.exec("notify status_dumped");
|
txn.exec("notify status_dumped");
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
@@ -683,14 +749,14 @@ void State::showStatus()
|
|||||||
auto conn(dbPool.get());
|
auto conn(dbPool.get());
|
||||||
receiver statusDumped(*conn, "status_dumped");
|
receiver statusDumped(*conn, "status_dumped");
|
||||||
|
|
||||||
string status;
|
std::string status;
|
||||||
bool barf = false;
|
bool barf = false;
|
||||||
|
|
||||||
/* Get the last JSON status dump from the database. */
|
/* Get the last JSON status dump from the database. */
|
||||||
{
|
{
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(*conn);
|
||||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||||
if (res.size()) status = res[0][0].as<string>();
|
if (res.size()) status = res[0][0].as<std::string>();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status != "") {
|
if (status != "") {
|
||||||
@@ -710,7 +776,7 @@ void State::showStatus()
|
|||||||
{
|
{
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(*conn);
|
||||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||||
if (res.size()) status = res[0][0].as<string>();
|
if (res.size()) status = res[0][0].as<std::string>();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -754,7 +820,19 @@ void State::run(BuildID buildOne)
|
|||||||
if (!lock)
|
if (!lock)
|
||||||
throw Error("hydra-queue-runner is already running");
|
throw Error("hydra-queue-runner is already running");
|
||||||
|
|
||||||
Store::Params localParams;
|
std::cout << "Starting the Prometheus exporter on " << metricsAddr << std::endl;
|
||||||
|
|
||||||
|
/* Set up simple exporter, to show that we're still alive. */
|
||||||
|
prometheus::Exposer promExposer{metricsAddr};
|
||||||
|
auto exposerPort = promExposer.GetListeningPorts().front();
|
||||||
|
|
||||||
|
promExposer.RegisterCollectable(prom.registry);
|
||||||
|
|
||||||
|
std::cout << "Started the Prometheus exporter, listening on "
|
||||||
|
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||||
|
<< std::endl;
|
||||||
|
|
||||||
|
Store::Config::Params localParams;
|
||||||
localParams["max-connections"] = "16";
|
localParams["max-connections"] = "16";
|
||||||
localParams["max-connection-age"] = "600";
|
localParams["max-connection-age"] = "600";
|
||||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||||
@@ -836,10 +914,17 @@ void State::run(BuildID buildOne)
|
|||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
auto conn(dbPool.get());
|
auto conn(dbPool.get());
|
||||||
receiver dumpStatus_(*conn, "dump_status");
|
try {
|
||||||
while (true) {
|
receiver dumpStatus_(*conn, "dump_status");
|
||||||
conn->await_notification();
|
while (true) {
|
||||||
dumpStatus(*conn);
|
conn->await_notification();
|
||||||
|
dumpStatus(*conn);
|
||||||
|
}
|
||||||
|
} catch (pqxx::broken_connection & connEx) {
|
||||||
|
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||||
|
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||||
|
conn.markBad();
|
||||||
|
sleep(10);
|
||||||
}
|
}
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, "main thread: %s", e.what());
|
printMsg(lvlError, "main thread: %s", e.what());
|
||||||
@@ -864,6 +949,7 @@ int main(int argc, char * * argv)
|
|||||||
bool unlock = false;
|
bool unlock = false;
|
||||||
bool status = false;
|
bool status = false;
|
||||||
BuildID buildOne = 0;
|
BuildID buildOne = 0;
|
||||||
|
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
||||||
|
|
||||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||||
if (*arg == "--unlock")
|
if (*arg == "--unlock")
|
||||||
@@ -875,15 +961,16 @@ int main(int argc, char * * argv)
|
|||||||
buildOne = *b;
|
buildOne = *b;
|
||||||
else
|
else
|
||||||
throw Error("‘--build-one’ requires a build ID");
|
throw Error("‘--build-one’ requires a build ID");
|
||||||
|
} else if (*arg == "--prometheus-address") {
|
||||||
|
metricsAddrOpt = getArg(*arg, arg, end);
|
||||||
} else
|
} else
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
|
|
||||||
settings.verboseBuild = true;
|
settings.verboseBuild = true;
|
||||||
settings.lockCPU = false;
|
|
||||||
|
|
||||||
State state;
|
State state{metricsAddrOpt};
|
||||||
if (status)
|
if (status)
|
||||||
state.showStatus();
|
state.showStatus();
|
||||||
else if (unlock)
|
else if (unlock)
|
||||||
|
24
src/hydra-queue-runner/meson.build
Normal file
24
src/hydra-queue-runner/meson.build
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
srcs = files(
|
||||||
|
'builder.cc',
|
||||||
|
'build-remote.cc',
|
||||||
|
'build-result.cc',
|
||||||
|
'dispatcher.cc',
|
||||||
|
'hydra-queue-runner.cc',
|
||||||
|
'nar-extractor.cc',
|
||||||
|
'queue-monitor.cc',
|
||||||
|
)
|
||||||
|
|
||||||
|
hydra_queue_runner = executable('hydra-queue-runner',
|
||||||
|
'hydra-queue-runner.cc',
|
||||||
|
srcs,
|
||||||
|
dependencies: [
|
||||||
|
libhydra_dep,
|
||||||
|
nix_util_dep,
|
||||||
|
nix_store_dep,
|
||||||
|
nix_main_dep,
|
||||||
|
pqxx_dep,
|
||||||
|
prom_cpp_core_dep,
|
||||||
|
prom_cpp_pull_dep,
|
||||||
|
],
|
||||||
|
install: true,
|
||||||
|
)
|
@@ -1,12 +1,51 @@
|
|||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
#include "archive.hh"
|
#include <nix/util/archive.hh>
|
||||||
|
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
struct Extractor : ParseSink
|
|
||||||
|
struct NarMemberConstructor : CreateRegularFileSink
|
||||||
|
{
|
||||||
|
NarMemberData & curMember;
|
||||||
|
|
||||||
|
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||||
|
|
||||||
|
std::optional<uint64_t> expectedSize;
|
||||||
|
|
||||||
|
NarMemberConstructor(NarMemberData & curMember)
|
||||||
|
: curMember(curMember)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
void isExecutable() override
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void preallocateContents(uint64_t size) override
|
||||||
|
{
|
||||||
|
expectedSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator () (std::string_view data) override
|
||||||
|
{
|
||||||
|
assert(expectedSize);
|
||||||
|
*curMember.fileSize += data.size();
|
||||||
|
hashSink(data);
|
||||||
|
if (curMember.contents) {
|
||||||
|
curMember.contents->append(data);
|
||||||
|
}
|
||||||
|
assert(curMember.fileSize <= expectedSize);
|
||||||
|
if (curMember.fileSize == expectedSize) {
|
||||||
|
auto [hash, len] = hashSink.finish();
|
||||||
|
assert(curMember.fileSize == len);
|
||||||
|
curMember.sha256 = hash;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Extractor : FileSystemObjectSink
|
||||||
{
|
{
|
||||||
std::unordered_set<Path> filesToKeep {
|
std::unordered_set<Path> filesToKeep {
|
||||||
"/nix-support/hydra-build-products",
|
"/nix-support/hydra-build-products",
|
||||||
@@ -15,58 +54,40 @@ struct Extractor : ParseSink
|
|||||||
};
|
};
|
||||||
|
|
||||||
NarMemberDatas & members;
|
NarMemberDatas & members;
|
||||||
NarMemberData * curMember = nullptr;
|
std::filesystem::path prefix;
|
||||||
Path prefix;
|
|
||||||
|
Path toKey(const CanonPath & path)
|
||||||
|
{
|
||||||
|
std::filesystem::path p = prefix;
|
||||||
|
// Conditional to avoid trailing slash
|
||||||
|
if (!path.isRoot()) p /= path.rel();
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||||
: members(members), prefix(prefix)
|
: members(members), prefix(prefix)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
void createDirectory(const Path & path) override
|
void createDirectory(const CanonPath & path) override
|
||||||
{
|
{
|
||||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||||
}
|
}
|
||||||
|
|
||||||
void createRegularFile(const Path & path) override
|
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||||
{
|
{
|
||||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
NarMemberConstructor nmc {
|
||||||
.type = FSAccessor::Type::tRegular,
|
members.insert_or_assign(toKey(path), NarMemberData {
|
||||||
.fileSize = 0,
|
.type = SourceAccessor::Type::tRegular,
|
||||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
.fileSize = 0,
|
||||||
}).first->second;
|
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||||
|
}).first->second,
|
||||||
|
};
|
||||||
|
func(nmc);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<uint64_t> expectedSize;
|
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||||
std::unique_ptr<HashSink> hashSink;
|
|
||||||
|
|
||||||
void preallocateContents(uint64_t size) override
|
|
||||||
{
|
{
|
||||||
expectedSize = size;
|
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
|
||||||
}
|
|
||||||
|
|
||||||
void receiveContents(std::string_view data) override
|
|
||||||
{
|
|
||||||
assert(expectedSize);
|
|
||||||
assert(curMember);
|
|
||||||
assert(hashSink);
|
|
||||||
*curMember->fileSize += data.size();
|
|
||||||
(*hashSink)(data);
|
|
||||||
if (curMember->contents) {
|
|
||||||
curMember->contents->append(data);
|
|
||||||
}
|
|
||||||
assert(curMember->fileSize <= expectedSize);
|
|
||||||
if (curMember->fileSize == expectedSize) {
|
|
||||||
auto [hash, len] = hashSink->finish();
|
|
||||||
assert(curMember->fileSize == len);
|
|
||||||
curMember->sha256 = hash;
|
|
||||||
hashSink.reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void createSymlink(const Path & path, const string & target) override
|
|
||||||
{
|
|
||||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -1,13 +1,13 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "fs-accessor.hh"
|
#include <nix/util/source-accessor.hh>
|
||||||
#include "types.hh"
|
#include <nix/util/types.hh>
|
||||||
#include "serialise.hh"
|
#include <nix/util/serialise.hh>
|
||||||
#include "hash.hh"
|
#include <nix/util/hash.hh>
|
||||||
|
|
||||||
struct NarMemberData
|
struct NarMemberData
|
||||||
{
|
{
|
||||||
nix::FSAccessor::Type type;
|
nix::SourceAccessor::Type type;
|
||||||
std::optional<uint64_t> fileSize;
|
std::optional<uint64_t> fileSize;
|
||||||
std::optional<std::string> contents;
|
std::optional<std::string> contents;
|
||||||
std::optional<nix::Hash> sha256;
|
std::optional<nix::Hash> sha256;
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "globals.hh"
|
#include <nix/store/globals.hh>
|
||||||
|
#include <nix/store/parsed-derivations.hh>
|
||||||
|
#include <nix/util/thread-pool.hh>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
@@ -10,63 +12,74 @@ using namespace nix;
|
|||||||
void State::queueMonitor()
|
void State::queueMonitor()
|
||||||
{
|
{
|
||||||
while (true) {
|
while (true) {
|
||||||
|
auto conn(dbPool.get());
|
||||||
try {
|
try {
|
||||||
queueMonitorLoop();
|
queueMonitorLoop(*conn);
|
||||||
|
} catch (pqxx::broken_connection & e) {
|
||||||
|
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||||
|
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||||
|
conn.markBad();
|
||||||
|
sleep(10);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
printError("queue monitor: %s", e.what());
|
||||||
sleep(10); // probably a DB problem, so don't retry right away
|
sleep(10); // probably a DB problem, so don't retry right away
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void State::queueMonitorLoop()
|
void State::queueMonitorLoop(Connection & conn)
|
||||||
{
|
{
|
||||||
auto conn(dbPool.get());
|
receiver buildsAdded(conn, "builds_added");
|
||||||
|
receiver buildsRestarted(conn, "builds_restarted");
|
||||||
receiver buildsAdded(*conn, "builds_added");
|
receiver buildsCancelled(conn, "builds_cancelled");
|
||||||
receiver buildsRestarted(*conn, "builds_restarted");
|
receiver buildsDeleted(conn, "builds_deleted");
|
||||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
receiver buildsBumped(conn, "builds_bumped");
|
||||||
receiver buildsDeleted(*conn, "builds_deleted");
|
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||||
receiver buildsBumped(*conn, "builds_bumped");
|
|
||||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
|
||||||
|
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
|
|
||||||
unsigned int lastBuildId = 0;
|
|
||||||
|
|
||||||
bool quit = false;
|
bool quit = false;
|
||||||
while (!quit) {
|
while (!quit) {
|
||||||
|
auto t_before_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
localStore->clearPathInfoCache();
|
localStore->clearPathInfoCache();
|
||||||
|
|
||||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
bool done = getQueuedBuilds(conn, destStore);
|
||||||
|
|
||||||
if (buildOne && buildOneDone) quit = true;
|
if (buildOne && buildOneDone) quit = true;
|
||||||
|
|
||||||
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
prom.queue_monitor_time_spent_running.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||||
|
|
||||||
/* Sleep until we get notification from the database about an
|
/* Sleep until we get notification from the database about an
|
||||||
event. */
|
event. */
|
||||||
if (done && !quit) {
|
if (done && !quit) {
|
||||||
conn->await_notification();
|
conn.await_notification();
|
||||||
nrQueueWakeups++;
|
nrQueueWakeups++;
|
||||||
} else
|
} else
|
||||||
conn->get_notifs();
|
conn.get_notifs();
|
||||||
|
|
||||||
if (auto lowestId = buildsAdded.get()) {
|
if (auto lowestId = buildsAdded.get()) {
|
||||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
|
||||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||||
}
|
}
|
||||||
if (buildsRestarted.get()) {
|
if (buildsRestarted.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||||
lastBuildId = 0; // check all builds
|
|
||||||
}
|
}
|
||||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||||
processQueueChange(*conn);
|
processQueueChange(conn);
|
||||||
}
|
}
|
||||||
if (jobsetSharesChanged.get()) {
|
if (jobsetSharesChanged.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||||
processJobsetSharesChange(*conn);
|
processJobsetSharesChange(conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
|
prom.queue_monitor_time_spent_waiting.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||||
}
|
}
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
@@ -80,18 +93,18 @@ struct PreviousFailure : public std::exception {
|
|||||||
|
|
||||||
|
|
||||||
bool State::getQueuedBuilds(Connection & conn,
|
bool State::getQueuedBuilds(Connection & conn,
|
||||||
ref<Store> destStore, unsigned int & lastBuildId)
|
ref<Store> destStore)
|
||||||
{
|
{
|
||||||
printInfo("checking the queue for builds > %d...", lastBuildId);
|
prom.queue_checks_started.Increment();
|
||||||
|
|
||||||
|
printInfo("checking the queue for builds...");
|
||||||
|
|
||||||
/* Grab the queued builds from the database, but don't process
|
/* Grab the queued builds from the database, but don't process
|
||||||
them yet (since we don't want a long-running transaction). */
|
them yet (since we don't want a long-running transaction). */
|
||||||
std::vector<BuildID> newIDs;
|
std::vector<BuildID> newIDs;
|
||||||
std::map<BuildID, Build::ptr> newBuildsByID;
|
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||||
|
|
||||||
unsigned int newLastBuildId = lastBuildId;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
|
|
||||||
@@ -100,23 +113,21 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||||
"globalPriority, priority from Builds "
|
"globalPriority, priority from Builds "
|
||||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||||
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
|
"where finished = 0 order by globalPriority desc, random()");
|
||||||
lastBuildId);
|
|
||||||
|
|
||||||
for (auto const & row : res) {
|
for (auto const & row : res) {
|
||||||
auto builds_(builds.lock());
|
auto builds_(builds.lock());
|
||||||
BuildID id = row["id"].as<BuildID>();
|
BuildID id = row["id"].as<BuildID>();
|
||||||
if (buildOne && id != buildOne) continue;
|
if (buildOne && id != buildOne) continue;
|
||||||
if (id > newLastBuildId) newLastBuildId = id;
|
|
||||||
if (builds_->count(id)) continue;
|
if (builds_->count(id)) continue;
|
||||||
|
|
||||||
auto build = std::make_shared<Build>(
|
auto build = std::make_shared<Build>(
|
||||||
localStore->parseStorePath(row["drvPath"].as<string>()));
|
localStore->parseStorePath(row["drvPath"].as<std::string>()));
|
||||||
build->id = id;
|
build->id = id;
|
||||||
build->jobsetId = row["jobset_id"].as<JobsetID>();
|
build->jobsetId = row["jobset_id"].as<JobsetID>();
|
||||||
build->projectName = row["project"].as<string>();
|
build->projectName = row["project"].as<std::string>();
|
||||||
build->jobsetName = row["jobset"].as<string>();
|
build->jobsetName = row["jobset"].as<std::string>();
|
||||||
build->jobName = row["job"].as<string>();
|
build->jobName = row["job"].as<std::string>();
|
||||||
build->maxSilentTime = row["maxsilent"].as<int>();
|
build->maxSilentTime = row["maxsilent"].as<int>();
|
||||||
build->buildTimeout = row["timeout"].as<int>();
|
build->buildTimeout = row["timeout"].as<int>();
|
||||||
build->timestamp = row["timestamp"].as<time_t>();
|
build->timestamp = row["timestamp"].as<time_t>();
|
||||||
@@ -136,13 +147,14 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
std::set<StorePath> finishedDrvs;
|
std::set<StorePath> finishedDrvs;
|
||||||
|
|
||||||
createBuild = [&](Build::ptr build) {
|
createBuild = [&](Build::ptr build) {
|
||||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
prom.queue_build_loads.Increment();
|
||||||
|
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||||
nrAdded++;
|
nrAdded++;
|
||||||
newBuildsByID.erase(build->id);
|
newBuildsByID.erase(build->id);
|
||||||
|
|
||||||
if (!localStore->isValidPath(build->drvPath)) {
|
if (!localStore->isValidPath(build->drvPath)) {
|
||||||
/* Derivation has been GC'ed prematurely. */
|
/* Derivation has been GC'ed prematurely. */
|
||||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
printError("aborting GC'ed build %1%", build->id);
|
||||||
if (!build->finishedInDB) {
|
if (!build->finishedInDB) {
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
@@ -186,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||||
|
|
||||||
if (!propagatedFrom) {
|
if (!propagatedFrom) {
|
||||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||||
if (i.second.second) {
|
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||||
auto res = txn.exec_params
|
auto res = optOutputPath
|
||||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
? txn.exec_params(
|
||||||
localStore->printStorePath(*i.second.second));
|
std::string { common } + " and path = $1",
|
||||||
if (!res[0][0].is_null()) {
|
localStore->printStorePath(*optOutputPath))
|
||||||
propagatedFrom = res[0][0].as<BuildID>();
|
: txn.exec_params(
|
||||||
break;
|
std::string { common } + " and drvPath = $1 and name = $2",
|
||||||
}
|
localStore->printStorePath(ex.step->drvPath),
|
||||||
|
outputName);
|
||||||
|
if (!res[0][0].is_null()) {
|
||||||
|
propagatedFrom = res[0][0].as<BuildID>();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -230,12 +246,10 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
/* If we didn't get a step, it means the step's outputs are
|
/* If we didn't get a step, it means the step's outputs are
|
||||||
all valid. So we mark this as a finished, cached build. */
|
all valid. So we mark this as a finished, cached build. */
|
||||||
if (!step) {
|
if (!step) {
|
||||||
auto drv = localStore->readDerivation(build->drvPath);
|
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
|
||||||
|
|
||||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||||
if (i.second.second)
|
addRoot(i.second);
|
||||||
addRoot(*i.second.second);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
@@ -286,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
try {
|
try {
|
||||||
createBuild(build);
|
createBuild(build);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,7 +310,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
|
|
||||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||||
the builder threads. */
|
the builder threads. */
|
||||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||||
for (auto & r : newRunnable)
|
for (auto & r : newRunnable)
|
||||||
makeRunnable(r);
|
makeRunnable(r);
|
||||||
|
|
||||||
@@ -306,10 +320,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
|
|
||||||
/* Stop after a certain time to allow priority bumps to be
|
/* Stop after a certain time to allow priority bumps to be
|
||||||
processed. */
|
processed. */
|
||||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) break;
|
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||||
|
prom.queue_checks_early_exits.Increment();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
|
prom.queue_checks_finished.Increment();
|
||||||
return newBuildsByID.empty();
|
return newBuildsByID.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,13 +364,13 @@ void State::processQueueChange(Connection & conn)
|
|||||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||||
auto b = currentIds.find(i->first);
|
auto b = currentIds.find(i->first);
|
||||||
if (b == currentIds.end()) {
|
if (b == currentIds.end()) {
|
||||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
printInfo("discarding cancelled build %1%", i->first);
|
||||||
i = builds_->erase(i);
|
i = builds_->erase(i);
|
||||||
// FIXME: ideally we would interrupt active build steps here.
|
// FIXME: ideally we would interrupt active build steps here.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (i->second->globalPriority < b->second) {
|
if (i->second->globalPriority < b->second) {
|
||||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
printInfo("priority of build %1% increased", i->first);
|
||||||
i->second->globalPriority = b->second;
|
i->second->globalPriority = b->second;
|
||||||
i->second->propagatePriorities();
|
i->second->propagatePriorities();
|
||||||
}
|
}
|
||||||
@@ -388,6 +405,34 @@ void State::processQueueChange(Connection & conn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||||
|
ref<Store> destStore,
|
||||||
|
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||||
|
{
|
||||||
|
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||||
|
ThreadPool tp;
|
||||||
|
|
||||||
|
for (auto & [output, maybeOutputPath] : paths) {
|
||||||
|
if (!maybeOutputPath) {
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
missing->insert({output, maybeOutputPath});
|
||||||
|
} else {
|
||||||
|
tp.enqueue([&] {
|
||||||
|
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
missing->insert({output, maybeOutputPath});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tp.process();
|
||||||
|
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
return *missing;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Step::ptr State::createStep(ref<Store> destStore,
|
Step::ptr State::createStep(ref<Store> destStore,
|
||||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||||
@@ -437,6 +482,8 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
|
|
||||||
if (!isNew) return step;
|
if (!isNew) return step;
|
||||||
|
|
||||||
|
prom.queue_steps_created.Increment();
|
||||||
|
|
||||||
printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath));
|
printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath));
|
||||||
|
|
||||||
/* Initialize the step. Note that the step may be visible in
|
/* Initialize the step. Note that the step may be visible in
|
||||||
@@ -444,17 +491,23 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
it's not runnable yet, and other threads won't make it
|
it's not runnable yet, and other threads won't make it
|
||||||
runnable while step->created == false. */
|
runnable while step->created == false. */
|
||||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||||
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
|
{
|
||||||
|
auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
|
||||||
|
try {
|
||||||
|
step->drvOptions = std::make_unique<DerivationOptions>(
|
||||||
|
DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
|
||||||
|
} catch (Error & e) {
|
||||||
|
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
|
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||||
step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
|
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||||
|
|
||||||
step->systemType = step->drv->platform;
|
step->systemType = step->drv->platform;
|
||||||
{
|
{
|
||||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||||
StringSet features;
|
|
||||||
if (i != step->drv->env.end())
|
|
||||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
|
||||||
if (step->preferLocalBuild)
|
if (step->preferLocalBuild)
|
||||||
features.insert("local");
|
features.insert("local");
|
||||||
if (!features.empty()) {
|
if (!features.empty()) {
|
||||||
@@ -468,26 +521,40 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
throw PreviousFailure{step};
|
throw PreviousFailure{step};
|
||||||
|
|
||||||
/* Are all outputs valid? */
|
/* Are all outputs valid? */
|
||||||
bool valid = true;
|
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||||
DerivationOutputs missing;
|
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||||
for (auto & i : step->drv->outputs)
|
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
auto outputHash = outputHashes.at(outputName);
|
||||||
valid = false;
|
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||||
missing.insert_or_assign(i.first, i.second);
|
}
|
||||||
}
|
|
||||||
|
auto missing = getMissingRemotePaths(destStore, paths);
|
||||||
|
bool valid = missing.empty();
|
||||||
|
|
||||||
/* Try to copy the missing paths from the local store or from
|
/* Try to copy the missing paths from the local store or from
|
||||||
substitutes. */
|
substitutes. */
|
||||||
if (!missing.empty()) {
|
if (!missing.empty()) {
|
||||||
|
|
||||||
size_t avail = 0;
|
size_t avail = 0;
|
||||||
for (auto & i : missing) {
|
for (auto & [i, pathOpt] : missing) {
|
||||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
// If we don't know the output path from the destination
|
||||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
// store, see if the local store can tell us.
|
||||||
|
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||||
|
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||||
|
pathOpt = maybeRealisation->outPath;
|
||||||
|
|
||||||
|
if (!pathOpt) {
|
||||||
|
// No hope of getting the store object if we don't know
|
||||||
|
// the path.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto & path = *pathOpt;
|
||||||
|
|
||||||
|
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||||
avail++;
|
avail++;
|
||||||
else if (useSubstitutes) {
|
else if (useSubstitutes) {
|
||||||
SubstitutablePathInfos infos;
|
SubstitutablePathInfos infos;
|
||||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||||
if (infos.size() == 1)
|
if (infos.size() == 1)
|
||||||
avail++;
|
avail++;
|
||||||
}
|
}
|
||||||
@@ -495,40 +562,43 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
|
|
||||||
if (missing.size() == avail) {
|
if (missing.size() == avail) {
|
||||||
valid = true;
|
valid = true;
|
||||||
for (auto & i : missing) {
|
for (auto & [i, pathOpt] : missing) {
|
||||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
// If we found everything, then we should know the path
|
||||||
|
// to every missing store object now.
|
||||||
|
assert(pathOpt);
|
||||||
|
auto & path = *pathOpt;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
time_t startTime = time(0);
|
time_t startTime = time(0);
|
||||||
|
|
||||||
if (localStore->isValidPath(*path))
|
if (localStore->isValidPath(path))
|
||||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath));
|
localStore->printStorePath(drvPath));
|
||||||
else {
|
else {
|
||||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath));
|
localStore->printStorePath(drvPath));
|
||||||
localStore->ensurePath(*path);
|
localStore->ensurePath(path);
|
||||||
// FIXME: should copy directly from substituter to destStore.
|
// FIXME: should copy directly from substituter to destStore.
|
||||||
}
|
}
|
||||||
|
|
||||||
StorePathSet closure;
|
copyClosure(*localStore, *destStore,
|
||||||
localStore->computeFSClosure({*path}, closure);
|
StorePathSet { path },
|
||||||
copyPaths(*localStore, *destStore, closure, NoRepair, CheckSigs, NoSubstitute);
|
NoRepair, CheckSigs, NoSubstitute);
|
||||||
|
|
||||||
time_t stopTime = time(0);
|
time_t stopTime = time(0);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath),
|
localStore->printStorePath(drvPath),
|
||||||
e.what());
|
e.what());
|
||||||
valid = false;
|
valid = false;
|
||||||
@@ -548,7 +618,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||||
|
|
||||||
/* Create steps for the dependencies. */
|
/* Create steps for the dependencies. */
|
||||||
for (auto & i : step->drv->inputDrvs) {
|
for (auto & i : step->drv->inputDrvs.map) {
|
||||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||||
if (dep) {
|
if (dep) {
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
@@ -620,28 +690,30 @@ void State::processJobsetSharesChange(Connection & conn)
|
|||||||
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
|
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
|
||||||
for (auto const & row : res) {
|
for (auto const & row : res) {
|
||||||
auto jobsets_(jobsets.lock());
|
auto jobsets_(jobsets.lock());
|
||||||
auto i = jobsets_->find(std::make_pair(row["project"].as<string>(), row["name"].as<string>()));
|
auto i = jobsets_->find(std::make_pair(row["project"].as<std::string>(), row["name"].as<std::string>()));
|
||||||
if (i == jobsets_->end()) continue;
|
if (i == jobsets_->end()) continue;
|
||||||
i->second->setShares(row["schedulingShares"].as<unsigned int>());
|
i->second->setShares(row["schedulingShares"].as<unsigned int>());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||||
{
|
{
|
||||||
|
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||||
|
|
||||||
{
|
{
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
|
|
||||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
for (auto & [name, output] : derivationOutputs) {
|
||||||
auto r = txn.exec_params
|
auto r = txn.exec_params
|
||||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||||
"join BuildOutputs o on b.id = o.build "
|
"join BuildOutputs o on b.id = o.build "
|
||||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||||
localStore->printStorePath(*output.second));
|
localStore->printStorePath(output));
|
||||||
if (r.empty()) continue;
|
if (r.empty()) continue;
|
||||||
BuildID id = r[0][0].as<BuildID>();
|
BuildID id = r[0][0].as<BuildID>();
|
||||||
|
|
||||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
printInfo("reusing build %d", id);
|
||||||
|
|
||||||
BuildOutput res;
|
BuildOutput res;
|
||||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||||
@@ -664,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||||||
product.fileSize = row[2].as<off_t>();
|
product.fileSize = row[2].as<off_t>();
|
||||||
}
|
}
|
||||||
if (!row[3].is_null())
|
if (!row[3].is_null())
|
||||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||||
if (!row[4].is_null())
|
if (!row[4].is_null())
|
||||||
product.path = row[4].as<std::string>();
|
product.path = row[4].as<std::string>();
|
||||||
product.name = row[5].as<std::string>();
|
product.name = row[5].as<std::string>();
|
||||||
@@ -691,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||||||
}
|
}
|
||||||
|
|
||||||
NarMemberDatas narMembers;
|
NarMemberDatas narMembers;
|
||||||
return getBuildOutput(destStore, narMembers, drv);
|
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||||
}
|
}
|
||||||
|
@@ -6,15 +6,27 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
#include <regex>
|
||||||
|
#include <semaphore>
|
||||||
|
|
||||||
|
#include <prometheus/counter.h>
|
||||||
|
#include <prometheus/gauge.h>
|
||||||
|
#include <prometheus/registry.h>
|
||||||
|
|
||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
|
|
||||||
#include "parsed-derivations.hh"
|
#include <nix/store/derivations.hh>
|
||||||
#include "pathlocks.hh"
|
#include <nix/store/derivation-options.hh>
|
||||||
#include "pool.hh"
|
#include <nix/store/pathlocks.hh>
|
||||||
#include "store-api.hh"
|
#include <nix/util/pool.hh>
|
||||||
#include "sync.hh"
|
#include <nix/store/build-result.hh>
|
||||||
|
#include <nix/store/store-api.hh>
|
||||||
|
#include <nix/util/sync.hh>
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
#include <nix/store/serve-protocol.hh>
|
||||||
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
|
#include <nix/store/serve-protocol-connection.hh>
|
||||||
|
#include <nix/store/machines.hh>
|
||||||
|
|
||||||
|
|
||||||
typedef unsigned int BuildID;
|
typedef unsigned int BuildID;
|
||||||
@@ -48,6 +60,7 @@ typedef enum {
|
|||||||
ssConnecting = 10,
|
ssConnecting = 10,
|
||||||
ssSendingInputs = 20,
|
ssSendingInputs = 20,
|
||||||
ssBuilding = 30,
|
ssBuilding = 30,
|
||||||
|
ssWaitingForLocalSlot = 35,
|
||||||
ssReceivingOutputs = 40,
|
ssReceivingOutputs = 40,
|
||||||
ssPostProcessing = 50,
|
ssPostProcessing = 50,
|
||||||
} StepState;
|
} StepState;
|
||||||
@@ -72,6 +85,8 @@ struct RemoteResult
|
|||||||
{
|
{
|
||||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateWithBuildResult(const nix::BuildResult &);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@@ -156,8 +171,8 @@ struct Step
|
|||||||
|
|
||||||
nix::StorePath drvPath;
|
nix::StorePath drvPath;
|
||||||
std::unique_ptr<nix::Derivation> drv;
|
std::unique_ptr<nix::Derivation> drv;
|
||||||
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
|
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||||
std::set<std::string> requiredSystemFeatures;
|
nix::StringSet requiredSystemFeatures;
|
||||||
bool preferLocalBuild;
|
bool preferLocalBuild;
|
||||||
bool isDeterministic;
|
bool isDeterministic;
|
||||||
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
||||||
@@ -225,18 +240,10 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
|||||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||||
|
|
||||||
|
|
||||||
struct Machine
|
struct Machine : nix::Machine
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<Machine> ptr;
|
typedef std::shared_ptr<Machine> ptr;
|
||||||
|
|
||||||
bool enabled{true};
|
|
||||||
|
|
||||||
std::string sshName, sshKey;
|
|
||||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
|
||||||
unsigned int maxJobs = 1;
|
|
||||||
float speedFactor = 1.0;
|
|
||||||
std::string sshPublicHostKey;
|
|
||||||
|
|
||||||
struct State {
|
struct State {
|
||||||
typedef std::shared_ptr<State> ptr;
|
typedef std::shared_ptr<State> ptr;
|
||||||
counter currentJobs{0};
|
counter currentJobs{0};
|
||||||
@@ -286,10 +293,13 @@ struct Machine
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isLocalhost()
|
bool isLocalhost() const;
|
||||||
{
|
|
||||||
return sshName == "localhost";
|
// A connection to a machine
|
||||||
}
|
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||||
|
// Backpointer to the machine
|
||||||
|
ptr machine;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@@ -343,9 +353,13 @@ private:
|
|||||||
|
|
||||||
/* The build machines. */
|
/* The build machines. */
|
||||||
std::mutex machinesReadyLock;
|
std::mutex machinesReadyLock;
|
||||||
typedef std::map<std::string, Machine::ptr> Machines;
|
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||||
|
|
||||||
|
/* Throttler for CPU-bound local work. */
|
||||||
|
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||||
|
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||||
|
|
||||||
/* Various stats. */
|
/* Various stats. */
|
||||||
time_t startedAt;
|
time_t startedAt;
|
||||||
counter nrBuildsRead{0};
|
counter nrBuildsRead{0};
|
||||||
@@ -355,6 +369,7 @@ private:
|
|||||||
counter nrStepsDone{0};
|
counter nrStepsDone{0};
|
||||||
counter nrStepsBuilding{0};
|
counter nrStepsBuilding{0};
|
||||||
counter nrStepsCopyingTo{0};
|
counter nrStepsCopyingTo{0};
|
||||||
|
counter nrStepsWaitingForDownloadSlot{0};
|
||||||
counter nrStepsCopyingFrom{0};
|
counter nrStepsCopyingFrom{0};
|
||||||
counter nrStepsWaiting{0};
|
counter nrStepsWaiting{0};
|
||||||
counter nrUnsupportedSteps{0};
|
counter nrUnsupportedSteps{0};
|
||||||
@@ -385,7 +400,6 @@ private:
|
|||||||
|
|
||||||
struct MachineReservation
|
struct MachineReservation
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<MachineReservation> ptr;
|
|
||||||
State & state;
|
State & state;
|
||||||
Step::ptr step;
|
Step::ptr step;
|
||||||
Machine::ptr machine;
|
Machine::ptr machine;
|
||||||
@@ -423,7 +437,7 @@ private:
|
|||||||
|
|
||||||
/* How often the build steps of a jobset should be repeated in
|
/* How often the build steps of a jobset should be repeated in
|
||||||
order to detect non-determinism. */
|
order to detect non-determinism. */
|
||||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||||
|
|
||||||
bool uploadLogsToBinaryCache;
|
bool uploadLogsToBinaryCache;
|
||||||
|
|
||||||
@@ -432,8 +446,30 @@ private:
|
|||||||
via gc_roots_dir. */
|
via gc_roots_dir. */
|
||||||
nix::Path rootsDir;
|
nix::Path rootsDir;
|
||||||
|
|
||||||
|
std::string metricsAddr;
|
||||||
|
|
||||||
|
struct PromMetrics
|
||||||
|
{
|
||||||
|
std::shared_ptr<prometheus::Registry> registry;
|
||||||
|
|
||||||
|
prometheus::Counter& queue_checks_started;
|
||||||
|
prometheus::Counter& queue_build_loads;
|
||||||
|
prometheus::Counter& queue_steps_created;
|
||||||
|
prometheus::Counter& queue_checks_early_exits;
|
||||||
|
prometheus::Counter& queue_checks_finished;
|
||||||
|
|
||||||
|
prometheus::Counter& dispatcher_time_spent_running;
|
||||||
|
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||||
|
|
||||||
|
prometheus::Counter& queue_monitor_time_spent_running;
|
||||||
|
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||||
|
|
||||||
|
PromMetrics();
|
||||||
|
};
|
||||||
|
PromMetrics prom;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
State();
|
State(std::optional<std::string> metricsAddrOpt);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@@ -461,23 +497,28 @@ private:
|
|||||||
const std::string & machine);
|
const std::string & machine);
|
||||||
|
|
||||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||||
|
|
||||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||||
|
|
||||||
void queueMonitor();
|
void queueMonitor();
|
||||||
|
|
||||||
void queueMonitorLoop();
|
void queueMonitorLoop(Connection & conn);
|
||||||
|
|
||||||
/* Check the queue for new builds. */
|
/* Check the queue for new builds. */
|
||||||
bool getQueuedBuilds(Connection & conn,
|
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||||
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
|
|
||||||
|
|
||||||
/* Handle cancellation, deletion and priority bumps. */
|
/* Handle cancellation, deletion and priority bumps. */
|
||||||
void processQueueChange(Connection & conn);
|
void processQueueChange(Connection & conn);
|
||||||
|
|
||||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||||
const nix::Derivation & drv);
|
const nix::StorePath & drvPath);
|
||||||
|
|
||||||
|
/* Returns paths missing from the remote store. Paths are processed in
|
||||||
|
* parallel to work around the possible latency of remote stores. */
|
||||||
|
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||||
|
nix::ref<nix::Store> destStore,
|
||||||
|
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||||
|
|
||||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||||
@@ -508,19 +549,19 @@ private:
|
|||||||
|
|
||||||
void abortUnsupported();
|
void abortUnsupported();
|
||||||
|
|
||||||
void builder(MachineReservation::ptr reservation);
|
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||||
|
|
||||||
/* Perform the given build step. Return true if the step is to be
|
/* Perform the given build step. Return true if the step is to be
|
||||||
retried. */
|
retried. */
|
||||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||||
MachineReservation::ptr reservation,
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
std::shared_ptr<ActiveStep> activeStep);
|
std::shared_ptr<ActiveStep> activeStep);
|
||||||
|
|
||||||
void buildRemote(nix::ref<nix::Store> destStore,
|
void buildRemote(nix::ref<nix::Store> destStore,
|
||||||
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
Machine::ptr machine, Step::ptr step,
|
Machine::ptr machine, Step::ptr step,
|
||||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
const nix::ServeProto::BuildOptions & buildOptions,
|
||||||
unsigned int repeats,
|
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
std::function<void(StepState)> updateStep,
|
std::function<void(StepState)> updateStep,
|
||||||
NarMemberDatas & narMembers);
|
NarMemberDatas & narMembers);
|
||||||
@@ -543,6 +584,8 @@ private:
|
|||||||
|
|
||||||
void addRoot(const nix::StorePath & storePath);
|
void addRoot(const nix::StorePath & storePath);
|
||||||
|
|
||||||
|
void runMetricsExporter();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
void showStatus();
|
void showStatus();
|
||||||
|
@@ -6,6 +6,7 @@ use parent 'Catalyst';
|
|||||||
use Moose;
|
use Moose;
|
||||||
use Hydra::Plugin;
|
use Hydra::Plugin;
|
||||||
use Hydra::Model::DB;
|
use Hydra::Model::DB;
|
||||||
|
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||||
use Catalyst::Runtime '5.70';
|
use Catalyst::Runtime '5.70';
|
||||||
use Catalyst qw/ConfigLoader
|
use Catalyst qw/ConfigLoader
|
||||||
Static::Simple
|
Static::Simple
|
||||||
@@ -19,7 +20,6 @@ use Catalyst qw/ConfigLoader
|
|||||||
PrometheusTiny/,
|
PrometheusTiny/,
|
||||||
'-Log=warn,fatal,error';
|
'-Log=warn,fatal,error';
|
||||||
use CatalystX::RoleApplicator;
|
use CatalystX::RoleApplicator;
|
||||||
use YAML qw(LoadFile);
|
|
||||||
use Path::Class 'file';
|
use Path::Class 'file';
|
||||||
|
|
||||||
our $VERSION = '0.01';
|
our $VERSION = '0.01';
|
||||||
@@ -43,9 +43,7 @@ __PACKAGE__->config(
|
|||||||
role_field => "role",
|
role_field => "role",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ldap => $ENV{'HYDRA_LDAP_CONFIG'} ? LoadFile(
|
ldap => getLDAPConfigAmbient()->{'config'}
|
||||||
file($ENV{'HYDRA_LDAP_CONFIG'})
|
|
||||||
) : undef
|
|
||||||
},
|
},
|
||||||
'Plugin::ConfigLoader' => {
|
'Plugin::ConfigLoader' => {
|
||||||
driver => {
|
driver => {
|
||||||
|
@@ -4,7 +4,6 @@ use strict;
|
|||||||
use warnings;
|
use warnings;
|
||||||
use base 'Hydra::Base::Controller::REST';
|
use base 'Hydra::Base::Controller::REST';
|
||||||
use List::SomeUtils qw(any);
|
use List::SomeUtils qw(any);
|
||||||
use Nix::Store;
|
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
|
|
||||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
|||||||
my $outputs = {};
|
my $outputs = {};
|
||||||
foreach my $output (@outputs) {
|
foreach my $output (@outputs) {
|
||||||
my $outPath = $output->get_column("outpath");
|
my $outPath = $output->get_column("outpath");
|
||||||
next if $checkValidity && !isValidPath($outPath);
|
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||||
$outputs->{$output->get_column("outname")} = $outPath;
|
$outputs->{$output->get_column("outname")} = $outPath;
|
||||||
push @storePaths, $outPath;
|
push @storePaths, $outPath;
|
||||||
# Put the system type in the manifest (for top-level
|
# Put the system type in the manifest (for top-level
|
||||||
|
@@ -2,7 +2,167 @@ package Hydra::Config;
|
|||||||
|
|
||||||
use strict;
|
use strict;
|
||||||
use warnings;
|
use warnings;
|
||||||
|
use Config::General;
|
||||||
|
use List::SomeUtils qw(none);
|
||||||
|
use YAML qw(LoadFile);
|
||||||
|
|
||||||
|
our @ISA = qw(Exporter);
|
||||||
|
our @EXPORT = qw(
|
||||||
|
getHydraConfig
|
||||||
|
getLDAPConfig
|
||||||
|
getLDAPConfigAmbient
|
||||||
|
);
|
||||||
|
|
||||||
our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1);
|
our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1);
|
||||||
|
|
||||||
|
my $hydraConfigCache;
|
||||||
|
|
||||||
|
sub getHydraConfig {
|
||||||
|
return $hydraConfigCache if defined $hydraConfigCache;
|
||||||
|
|
||||||
|
my $conf;
|
||||||
|
|
||||||
|
if ($ENV{"HYDRA_CONFIG"}) {
|
||||||
|
$conf = $ENV{"HYDRA_CONFIG"};
|
||||||
|
} else {
|
||||||
|
require Hydra::Model::DB;
|
||||||
|
$conf = Hydra::Model::DB::getHydraPath() . "/hydra.conf"
|
||||||
|
};
|
||||||
|
|
||||||
|
if (-f $conf) {
|
||||||
|
$hydraConfigCache = loadConfig($conf);
|
||||||
|
} else {
|
||||||
|
$hydraConfigCache = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return $hydraConfigCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub loadConfig {
|
||||||
|
my ($sourceFile) = @_;
|
||||||
|
|
||||||
|
my %opts = (%configGeneralOpts, -ConfigFile => $sourceFile);
|
||||||
|
|
||||||
|
return { Config::General->new(%opts)->getall };
|
||||||
|
}
|
||||||
|
|
||||||
|
sub is_ldap_in_legacy_mode {
|
||||||
|
my ($config, %env) = @_;
|
||||||
|
|
||||||
|
my $legacy_defined = defined $env{"HYDRA_LDAP_CONFIG"};
|
||||||
|
|
||||||
|
if (defined $config->{"ldap"}) {
|
||||||
|
if ($legacy_defined) {
|
||||||
|
die "The legacy environment variable HYDRA_LDAP_CONFIG is set, but config is also specified in hydra.conf. Please unset the environment variable.";
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
} elsif ($legacy_defined) {
|
||||||
|
warn "Hydra is configured to use LDAP via the HYDRA_LDAP_CONFIG, a deprecated method. Please see the docs about configuring LDAP in the hydra.conf.";
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub getLDAPConfigAmbient {
|
||||||
|
return getLDAPConfig(getHydraConfig(), %ENV);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub getLDAPConfig {
|
||||||
|
my ($config, %env) = @_;
|
||||||
|
|
||||||
|
my $ldap_config;
|
||||||
|
|
||||||
|
if (is_ldap_in_legacy_mode($config, %env)) {
|
||||||
|
$ldap_config = get_legacy_ldap_config($env{"HYDRA_LDAP_CONFIG"});
|
||||||
|
} else {
|
||||||
|
$ldap_config = $config->{"ldap"};
|
||||||
|
}
|
||||||
|
|
||||||
|
$ldap_config->{"role_mapping"} = normalize_ldap_role_mappings($ldap_config->{"role_mapping"});
|
||||||
|
|
||||||
|
return $ldap_config;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub get_legacy_ldap_config {
|
||||||
|
my ($ldap_yaml_file) = @_;
|
||||||
|
|
||||||
|
return {
|
||||||
|
config => LoadFile($ldap_yaml_file),
|
||||||
|
role_mapping => {
|
||||||
|
"hydra_admin" => [ "admin" ],
|
||||||
|
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||||
|
"hydra_cancel-build" => [ "cancel-build" ],
|
||||||
|
"hydra_create-projects" => [ "create-projects" ],
|
||||||
|
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||||
|
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
sub normalize_ldap_role_mappings {
|
||||||
|
my ($input_map) = @_;
|
||||||
|
|
||||||
|
my $mapping = {};
|
||||||
|
|
||||||
|
my @errors;
|
||||||
|
|
||||||
|
for my $group (keys %{$input_map}) {
|
||||||
|
my $input = $input_map->{$group};
|
||||||
|
|
||||||
|
if (ref $input eq "ARRAY") {
|
||||||
|
$mapping->{$group} = $input;
|
||||||
|
} elsif (ref $input eq "") {
|
||||||
|
$mapping->{$group} = [ $input ];
|
||||||
|
} else {
|
||||||
|
push @errors, "On group '$group': the value is of type ${\ref $input}. Only strings and lists are acceptable.";
|
||||||
|
$mapping->{$group} = [ ];
|
||||||
|
}
|
||||||
|
|
||||||
|
eval {
|
||||||
|
validate_roles($mapping->{$group});
|
||||||
|
};
|
||||||
|
if ($@) {
|
||||||
|
push @errors, "On group '$group': $@";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (@errors) {
|
||||||
|
die "Failed to normalize LDAP role mappings:\n" . (join "\n", @errors);
|
||||||
|
}
|
||||||
|
|
||||||
|
return $mapping;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub validate_roles {
|
||||||
|
my ($roles) = @_;
|
||||||
|
|
||||||
|
my @invalid;
|
||||||
|
my $valid = valid_roles();
|
||||||
|
|
||||||
|
for my $role (@$roles) {
|
||||||
|
if (none { $_ eq $role } @$valid) {
|
||||||
|
push @invalid, "'$role'";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (@invalid) {
|
||||||
|
die "Invalid roles: ${\join ', ', @invalid}. Valid roles are: ${\join ', ', @$valid}.";
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub valid_roles {
|
||||||
|
return [
|
||||||
|
"admin",
|
||||||
|
"bump-to-front",
|
||||||
|
"cancel-build",
|
||||||
|
"create-projects",
|
||||||
|
"eval-jobset",
|
||||||
|
"restart-jobs",
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
@@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
|||||||
} elsif ($type eq "git") {
|
} elsif ($type eq "git") {
|
||||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||||
die if ! -d $clonePath;
|
die if ! -d $clonePath;
|
||||||
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
|
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
|
||||||
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
|
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
|
||||||
}
|
}
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||||
@@ -239,6 +239,8 @@ sub triggerJobset {
|
|||||||
sub push : Chained('api') PathPart('push') Args(0) {
|
sub push : Chained('api') PathPart('push') Args(0) {
|
||||||
my ($self, $c) = @_;
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
requirePost($c);
|
||||||
|
|
||||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
my $force = exists $c->request->query_params->{force};
|
my $force = exists $c->request->query_params->{force};
|
||||||
@@ -246,19 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
|||||||
foreach my $s (@jobsets) {
|
foreach my $s (@jobsets) {
|
||||||
my ($p, $j) = parseJobsetName($s);
|
my ($p, $j) = parseJobsetName($s);
|
||||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||||
|
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||||
triggerJobset($self, $c, $jobset, $force);
|
triggerJobset($self, $c, $jobset, $force);
|
||||||
}
|
}
|
||||||
|
|
||||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||||
foreach my $r (@repos) {
|
foreach my $r (@repos) {
|
||||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
{
|
{
|
||||||
join => 'project',
|
join => 'project',
|
||||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||||
order_by => 'me.id DESC'
|
order_by => 'me.id DESC'
|
||||||
});
|
});
|
||||||
|
foreach my $jobset (@jobsets) {
|
||||||
|
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||||
|
triggerJobset($self, $c, $jobset, $force)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$self->status_ok(
|
$self->status_ok(
|
||||||
@@ -285,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
|||||||
$c->response->body("");
|
$c->response->body("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
|
my $in = $c->request->{data};
|
||||||
|
my $url = $in->{repository}->{clone_url} or die;
|
||||||
|
$url =~ s/.git$//;
|
||||||
|
print STDERR "got push from Gitea repository $url\n";
|
||||||
|
|
||||||
|
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||||
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
|
{ join => 'project'
|
||||||
|
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||||
|
});
|
||||||
|
$c->response->body("");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
@@ -7,15 +7,15 @@ use base 'Hydra::Base::Controller::NixChannel';
|
|||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
use File::Basename;
|
use File::Basename;
|
||||||
|
use File::LibMagic;
|
||||||
use File::stat;
|
use File::stat;
|
||||||
use Data::Dump qw(dump);
|
use Data::Dump qw(dump);
|
||||||
use Nix::Store;
|
|
||||||
use Nix::Config;
|
|
||||||
use List::SomeUtils qw(all);
|
use List::SomeUtils qw(all);
|
||||||
use Encode;
|
use Encode;
|
||||||
use MIME::Types;
|
|
||||||
use JSON::PP;
|
use JSON::PP;
|
||||||
|
use WWW::Form::UrlEncoded::PP qw();
|
||||||
|
|
||||||
|
use feature 'state';
|
||||||
|
|
||||||
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||||
my ($self, $c, $id) = @_;
|
my ($self, $c, $id) = @_;
|
||||||
@@ -38,6 +38,17 @@ sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
|||||||
$c->stash->{jobset} = $c->stash->{build}->jobset;
|
$c->stash->{jobset} = $c->stash->{build}->jobset;
|
||||||
$c->stash->{job} = $c->stash->{build}->job;
|
$c->stash->{job} = $c->stash->{build}->job;
|
||||||
$c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})];
|
$c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})];
|
||||||
|
|
||||||
|
$c->stash->{runcommandlogProblem} = undef;
|
||||||
|
if ($c->stash->{job} =~ qr/^runCommandHook\..*/) {
|
||||||
|
if (!$c->config->{dynamicruncommand}->{enable}) {
|
||||||
|
$c->stash->{runcommandlogProblem} = "disabled-server";
|
||||||
|
} elsif (!$c->stash->{project}->enable_dynamic_run_command) {
|
||||||
|
$c->stash->{runcommandlogProblem} = "disabled-project";
|
||||||
|
} elsif (!$c->stash->{jobset}->enable_dynamic_run_command) {
|
||||||
|
$c->stash->{runcommandlogProblem} = "disabled-jobset";
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -66,14 +77,16 @@ sub build_GET {
|
|||||||
|
|
||||||
$c->stash->{template} = 'build.tt';
|
$c->stash->{template} = 'build.tt';
|
||||||
$c->stash->{isLocalStore} = isLocalStore();
|
$c->stash->{isLocalStore} = isLocalStore();
|
||||||
|
# XXX: If the derivation is content-addressed then this will always return
|
||||||
|
# false because `$_->path` will be empty
|
||||||
$c->stash->{available} =
|
$c->stash->{available} =
|
||||||
$c->stash->{isLocalStore}
|
$c->stash->{isLocalStore}
|
||||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||||
: 1;
|
: 1;
|
||||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||||
|
|
||||||
if ($build->finished && $build->iscachedbuild) {
|
if ($build->finished && $build->iscachedbuild) {
|
||||||
my $path = ($build->buildoutputs)[0]->path or die;
|
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||||
if (defined $cachedBuildStep) {
|
if (defined $cachedBuildStep) {
|
||||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||||
@@ -127,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
|||||||
$c->stash->{step} = $step;
|
$c->stash->{step} = $step;
|
||||||
|
|
||||||
my $drvPath = $step->drvpath;
|
my $drvPath = $step->drvpath;
|
||||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||||
showLog($c, $mode, $log_uri);
|
showLog($c, $mode, $log_uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
|||||||
my ($self, $c, $mode) = @_;
|
my ($self, $c, $mode) = @_;
|
||||||
|
|
||||||
my $drvPath = $c->stash->{build}->drvpath;
|
my $drvPath = $c->stash->{build}->drvpath;
|
||||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||||
showLog($c, $mode, $log_uri);
|
showLog($c, $mode, $log_uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -221,17 +234,24 @@ sub serveFile {
|
|||||||
}
|
}
|
||||||
|
|
||||||
elsif ($ls->{type} eq "regular") {
|
elsif ($ls->{type} eq "regular") {
|
||||||
|
# Have the hosted data considered its own origin to avoid being a giant
|
||||||
|
# XSS hole.
|
||||||
|
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||||
"cat-store", "--store", getStoreUri(), "$path"]) };
|
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||||
|
|
||||||
# Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple.
|
# Detect MIME type.
|
||||||
my $type = "text/plain";
|
my $type = "text/plain";
|
||||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||||
my $ext = $1;
|
my $ext = $1;
|
||||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||||
$type = ref $t ? $t->type : $t if $t;
|
$type = ref $t ? $t->type : $t if $t;
|
||||||
|
} else {
|
||||||
|
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||||
|
my $info = $magic->info_from_filename($path);
|
||||||
|
$type = $info->{mime_with_encoding};
|
||||||
}
|
}
|
||||||
$c->response->content_type($type);
|
$c->response->content_type($type);
|
||||||
$c->forward('Hydra::View::Plain');
|
$c->forward('Hydra::View::Plain');
|
||||||
@@ -277,29 +297,7 @@ sub download : Chained('buildChain') PathPart {
|
|||||||
my $path = $product->path;
|
my $path = $product->path;
|
||||||
$path .= "/" . join("/", @path) if scalar @path > 0;
|
$path .= "/" . join("/", @path) if scalar @path > 0;
|
||||||
|
|
||||||
if (isLocalStore) {
|
serveFile($c, $path);
|
||||||
|
|
||||||
notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path;
|
|
||||||
|
|
||||||
# Make sure the file is in the Nix store.
|
|
||||||
$path = checkPath($self, $c, $path);
|
|
||||||
|
|
||||||
# If this is a directory but no "/" is attached, then redirect.
|
|
||||||
if (-d $path && substr($c->request->uri, -1) ne "/") {
|
|
||||||
return $c->res->redirect($c->request->uri . "/");
|
|
||||||
}
|
|
||||||
|
|
||||||
$path = "$path/index.html" if -d $path && -e "$path/index.html";
|
|
||||||
|
|
||||||
notFound($c, "File '$path' does not exist.") if !-e $path;
|
|
||||||
|
|
||||||
notFound($c, "Path '$path' is a directory.") if -d $path;
|
|
||||||
|
|
||||||
$c->serve_static_file($path);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
serveFile($c, $path);
|
|
||||||
}
|
|
||||||
|
|
||||||
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
||||||
}
|
}
|
||||||
@@ -312,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
|||||||
error($c, "This build is not finished yet.") unless $build->finished;
|
error($c, "This build is not finished yet.") unless $build->finished;
|
||||||
my $output = $build->buildoutputs->find({name => $outputName});
|
my $output = $build->buildoutputs->find({name => $outputName});
|
||||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||||
|
|
||||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||||
$c->stash->{current_view} = 'NixNAR';
|
$c->stash->{current_view} = 'NixNAR';
|
||||||
@@ -355,7 +353,7 @@ sub contents : Chained('buildChain') PathPart Args(1) {
|
|||||||
|
|
||||||
# FIXME: don't use shell invocations below.
|
# FIXME: don't use shell invocations below.
|
||||||
|
|
||||||
# FIXME: use nix cat-store
|
# FIXME: use nix store cat
|
||||||
|
|
||||||
my $res;
|
my $res;
|
||||||
|
|
||||||
@@ -429,7 +427,7 @@ sub getDependencyGraph {
|
|||||||
};
|
};
|
||||||
$$done{$path} = $node;
|
$$done{$path} = $node;
|
||||||
my @refs;
|
my @refs;
|
||||||
foreach my $ref (queryReferences($path)) {
|
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||||
next if $ref eq $path;
|
next if $ref eq $path;
|
||||||
next unless $runtime || $ref =~ /\.drv$/;
|
next unless $runtime || $ref =~ /\.drv$/;
|
||||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||||
@@ -437,7 +435,7 @@ sub getDependencyGraph {
|
|||||||
}
|
}
|
||||||
# Show in reverse topological order to flatten the graph.
|
# Show in reverse topological order to flatten the graph.
|
||||||
# Should probably do a proper BFS.
|
# Should probably do a proper BFS.
|
||||||
my @sorted = reverse topoSortPaths(@refs);
|
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
|||||||
my $build = $c->stash->{build};
|
my $build = $c->stash->{build};
|
||||||
my $drvPath = $build->drvpath;
|
my $drvPath = $build->drvpath;
|
||||||
|
|
||||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||||
|
|
||||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||||
|
|
||||||
@@ -465,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
|||||||
|
|
||||||
requireLocalStore($c);
|
requireLocalStore($c);
|
||||||
|
|
||||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||||
|
|
||||||
my $done = {};
|
my $done = {};
|
||||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||||
@@ -485,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
|||||||
if (isLocalStore) {
|
if (isLocalStore) {
|
||||||
foreach my $out ($build->buildoutputs) {
|
foreach my $out ($build->buildoutputs) {
|
||||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||||
unless isValidPath($out->path);
|
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -69,7 +69,7 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
|||||||
|
|
||||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||||
{ job => $c->stash->{job}, finished => 1 },
|
{ job => $c->stash->{job}, finished => 1 },
|
||||||
{ order_by => 'id DESC', rows => 1, columns => [@buildListColumns] }
|
{ order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] }
|
||||||
);
|
);
|
||||||
|
|
||||||
$prometheus->new_counter(
|
$prometheus->new_counter(
|
||||||
@@ -92,6 +92,26 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
|||||||
$c->stash->{job},
|
$c->stash->{job},
|
||||||
)->inc($lastBuild->buildstatus > 0);
|
)->inc($lastBuild->buildstatus > 0);
|
||||||
|
|
||||||
|
$prometheus->new_gauge(
|
||||||
|
name => "hydra_build_closure_size",
|
||||||
|
help => "Closure size of the last job's build in bytes",
|
||||||
|
labels => [ "project", "jobset", "job" ]
|
||||||
|
)->labels(
|
||||||
|
$c->stash->{project}->name,
|
||||||
|
$c->stash->{jobset}->name,
|
||||||
|
$c->stash->{job},
|
||||||
|
)->inc($lastBuild->closuresize);
|
||||||
|
|
||||||
|
$prometheus->new_gauge(
|
||||||
|
name => "hydra_build_output_size",
|
||||||
|
help => "Output size of the last job's build in bytes",
|
||||||
|
labels => [ "project", "jobset", "job" ]
|
||||||
|
)->labels(
|
||||||
|
$c->stash->{project}->name,
|
||||||
|
$c->stash->{jobset}->name,
|
||||||
|
$c->stash->{job},
|
||||||
|
)->inc($lastBuild->size);
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => $prometheus->render };
|
$c->stash->{'plain'} = { data => $prometheus->render };
|
||||||
$c->forward('Hydra::View::Plain');
|
$c->forward('Hydra::View::Plain');
|
||||||
}
|
}
|
||||||
|
@@ -261,6 +261,14 @@ sub updateJobset {
|
|||||||
|
|
||||||
my $checkinterval = int(trim($c->stash->{params}->{checkinterval}));
|
my $checkinterval = int(trim($c->stash->{params}->{checkinterval}));
|
||||||
|
|
||||||
|
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||||
|
if ($enable_dynamic_run_command
|
||||||
|
&& !($c->config->{dynamicruncommand}->{enable}
|
||||||
|
&& $jobset->project->enable_dynamic_run_command))
|
||||||
|
{
|
||||||
|
badRequest($c, "Dynamic RunCommand is not enabled by the server or the parent project.");
|
||||||
|
}
|
||||||
|
|
||||||
$jobset->update(
|
$jobset->update(
|
||||||
{ name => $jobsetName
|
{ name => $jobsetName
|
||||||
, description => trim($c->stash->{params}->{"description"})
|
, description => trim($c->stash->{params}->{"description"})
|
||||||
@@ -268,6 +276,7 @@ sub updateJobset {
|
|||||||
, nixexprinput => $nixExprInput
|
, nixexprinput => $nixExprInput
|
||||||
, enabled => $enabled
|
, enabled => $enabled
|
||||||
, enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0
|
, enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0
|
||||||
|
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||||
, emailoverride => trim($c->stash->{params}->{emailoverride}) || ""
|
, emailoverride => trim($c->stash->{params}->{emailoverride}) || ""
|
||||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||||
, keepnr => int(trim($c->stash->{params}->{keepnr} // "0"))
|
, keepnr => int(trim($c->stash->{params}->{keepnr} // "0"))
|
||||||
@@ -355,6 +364,21 @@ sub evals_GET {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||||
|
|
||||||
|
sub errors_GET {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->stash->{template} = 'eval-error.tt';
|
||||||
|
|
||||||
|
my $jobsetName = $c->stash->{params}->{name};
|
||||||
|
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||||
|
{ name => $jobsetName },
|
||||||
|
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||||
|
);
|
||||||
|
|
||||||
|
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||||
|
}
|
||||||
|
|
||||||
# Redirect to the latest finished evaluation of this jobset.
|
# Redirect to the latest finished evaluation of this jobset.
|
||||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||||
|
@@ -76,7 +76,9 @@ sub view_GET {
|
|||||||
$c->stash->{removed} = $diff->{removed};
|
$c->stash->{removed} = $diff->{removed};
|
||||||
$c->stash->{unfinished} = $diff->{unfinished};
|
$c->stash->{unfinished} = $diff->{unfinished};
|
||||||
$c->stash->{aborted} = $diff->{aborted};
|
$c->stash->{aborted} = $diff->{aborted};
|
||||||
$c->stash->{failed} = $diff->{failed};
|
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||||
|
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||||
|
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||||
|
|
||||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||||
|
|
||||||
@@ -86,6 +88,17 @@ sub view_GET {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||||
|
|
||||||
|
sub errors_GET {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->stash->{template} = 'eval-error.tt';
|
||||||
|
|
||||||
|
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||||
|
|
||||||
|
$self->status_ok($c, entity => $c->stash->{eval});
|
||||||
|
}
|
||||||
|
|
||||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||||
my ($self, $c) = @_;
|
my ($self, $c) = @_;
|
||||||
|
@@ -149,6 +149,11 @@ sub updateProject {
|
|||||||
my $displayName = trim $c->stash->{params}->{displayname};
|
my $displayName = trim $c->stash->{params}->{displayname};
|
||||||
error($c, "You must specify a display name.") if $displayName eq "";
|
error($c, "You must specify a display name.") if $displayName eq "";
|
||||||
|
|
||||||
|
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||||
|
if ($enable_dynamic_run_command && !$c->config->{dynamicruncommand}->{enable}) {
|
||||||
|
badRequest($c, "Dynamic RunCommand is not enabled by the server.");
|
||||||
|
}
|
||||||
|
|
||||||
$project->update(
|
$project->update(
|
||||||
{ name => $projectName
|
{ name => $projectName
|
||||||
, displayname => $displayName
|
, displayname => $displayName
|
||||||
@@ -157,6 +162,7 @@ sub updateProject {
|
|||||||
, enabled => defined $c->stash->{params}->{enabled} ? 1 : 0
|
, enabled => defined $c->stash->{params}->{enabled} ? 1 : 0
|
||||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||||
, owner => $owner
|
, owner => $owner
|
||||||
|
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||||
, declfile => trim($c->stash->{params}->{declarative}->{file})
|
, declfile => trim($c->stash->{params}->{declarative}->{file})
|
||||||
, decltype => trim($c->stash->{params}->{declarative}->{type})
|
, decltype => trim($c->stash->{params}->{declarative}->{type})
|
||||||
, declvalue => trim($c->stash->{params}->{declarative}->{value})
|
, declvalue => trim($c->stash->{params}->{declarative}->{value})
|
||||||
|
@@ -16,8 +16,11 @@ use List::Util qw[min max];
|
|||||||
use List::SomeUtils qw{any};
|
use List::SomeUtils qw{any};
|
||||||
use Net::Prometheus;
|
use Net::Prometheus;
|
||||||
use Types::Standard qw/StrMatch/;
|
use Types::Standard qw/StrMatch/;
|
||||||
|
use WWW::Form::UrlEncoded::PP qw();
|
||||||
|
|
||||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||||
|
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||||
|
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||||
|
|
||||||
# Put this controller at top-level.
|
# Put this controller at top-level.
|
||||||
__PACKAGE__->config->{namespace} = '';
|
__PACKAGE__->config->{namespace} = '';
|
||||||
@@ -32,6 +35,7 @@ sub noLoginNeeded {
|
|||||||
|
|
||||||
return $whitelisted ||
|
return $whitelisted ||
|
||||||
$c->request->path eq "api/push-github" ||
|
$c->request->path eq "api/push-github" ||
|
||||||
|
$c->request->path eq "api/push-gitea" ||
|
||||||
$c->request->path eq "google-login" ||
|
$c->request->path eq "google-login" ||
|
||||||
$c->request->path eq "github-redirect" ||
|
$c->request->path eq "github-redirect" ||
|
||||||
$c->request->path eq "github-login" ||
|
$c->request->path eq "github-login" ||
|
||||||
@@ -47,6 +51,7 @@ sub begin :Private {
|
|||||||
$c->stash->{curUri} = $c->request->uri;
|
$c->stash->{curUri} = $c->request->uri;
|
||||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||||
|
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||||
$c->stash->{curTime} = time;
|
$c->stash->{curTime} = time;
|
||||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||||
@@ -78,7 +83,7 @@ sub begin :Private {
|
|||||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||||
|
|
||||||
# XSRF protection: require POST requests to have the same origin.
|
# XSRF protection: require POST requests to have the same origin.
|
||||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||||
my $referer = $c->req->header('Referer');
|
my $referer = $c->req->header('Referer');
|
||||||
$referer //= $c->req->header('Origin');
|
$referer //= $c->req->header('Origin');
|
||||||
my $base = $c->req->base;
|
my $base = $c->req->base;
|
||||||
@@ -158,7 +163,7 @@ sub status_GET {
|
|||||||
{ "buildsteps.busy" => { '!=', 0 } },
|
{ "buildsteps.busy" => { '!=', 0 } },
|
||||||
{ order_by => ["globalpriority DESC", "id"],
|
{ order_by => ["globalpriority DESC", "id"],
|
||||||
join => "buildsteps",
|
join => "buildsteps",
|
||||||
columns => [@buildListColumns]
|
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||||
})]
|
})]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -327,7 +332,7 @@ sub nar :Local :Args(1) {
|
|||||||
else {
|
else {
|
||||||
$path = $Nix::Config::storeDir . "/$path";
|
$path = $Nix::Config::storeDir . "/$path";
|
||||||
|
|
||||||
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
|
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||||
|
|
||||||
$c->stash->{current_view} = 'NixNAR';
|
$c->stash->{current_view} = 'NixNAR';
|
||||||
$c->stash->{storePath} = $path;
|
$c->stash->{storePath} = $path;
|
||||||
@@ -356,6 +361,33 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||||
|
my ($self, $c, $realisation) = @_;
|
||||||
|
|
||||||
|
if (!isLocalStore) {
|
||||||
|
notFound($c, "There is no binary cache here.");
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||||
|
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||||
|
|
||||||
|
if (!$rawRealisation) {
|
||||||
|
$c->response->status(404);
|
||||||
|
$c->response->content_type('text/plain');
|
||||||
|
$c->stash->{plain}->{data} = "does not exist\n";
|
||||||
|
$c->forward('Hydra::View::Plain');
|
||||||
|
setCacheHeaders($c, 60 * 60);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$c->response->content_type('text/plain');
|
||||||
|
$c->stash->{plain}->{data} = $rawRealisation;
|
||||||
|
$c->forward('Hydra::View::Plain');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||||
my ($self, $c, $narinfo) = @_;
|
my ($self, $c, $narinfo) = @_;
|
||||||
|
|
||||||
@@ -367,7 +399,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
|||||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||||
|
|
||||||
die("Hash length was not 32") if length($hash) != 32;
|
die("Hash length was not 32") if length($hash) != 32;
|
||||||
my $path = queryPathFromHashPart($hash);
|
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||||
|
|
||||||
if (!$path) {
|
if (!$path) {
|
||||||
$c->response->status(404);
|
$c->response->status(404);
|
||||||
@@ -525,7 +557,7 @@ sub log :Local :Args(1) {
|
|||||||
my $logPrefix = $c->config->{log_prefix};
|
my $logPrefix = $c->config->{log_prefix};
|
||||||
|
|
||||||
if (defined $logPrefix) {
|
if (defined $logPrefix) {
|
||||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||||
} else {
|
} else {
|
||||||
notFound($c, "The build log of $drvPath is not available.");
|
notFound($c, "The build log of $drvPath is not available.");
|
||||||
}
|
}
|
||||||
|
@@ -7,6 +7,7 @@ use base 'Hydra::Base::Controller::REST';
|
|||||||
use File::Slurper qw(read_text);
|
use File::Slurper qw(read_text);
|
||||||
use Crypt::RandPasswd;
|
use Crypt::RandPasswd;
|
||||||
use Digest::SHA1 qw(sha1_hex);
|
use Digest::SHA1 qw(sha1_hex);
|
||||||
|
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
use Hydra::Helper::Email;
|
use Hydra::Helper::Email;
|
||||||
@@ -56,10 +57,10 @@ sub logout_POST {
|
|||||||
|
|
||||||
sub doLDAPLogin {
|
sub doLDAPLogin {
|
||||||
my ($self, $c, $username) = @_;
|
my ($self, $c, $username) = @_;
|
||||||
|
|
||||||
my $user = $c->find_user({ username => $username });
|
my $user = $c->find_user({ username => $username });
|
||||||
my $LDAPUser = $c->find_user({ username => $username }, 'ldap');
|
my $LDAPUser = $c->find_user({ username => $username }, 'ldap');
|
||||||
my @LDAPRoles = grep { (substr $_, 0, 6) eq "hydra_" } $LDAPUser->roles;
|
my @LDAPRoles = $LDAPUser->roles;
|
||||||
|
my $role_mapping = getLDAPConfigAmbient()->{"role_mapping"};
|
||||||
|
|
||||||
if (!$user) {
|
if (!$user) {
|
||||||
$c->model('DB::Users')->create(
|
$c->model('DB::Users')->create(
|
||||||
@@ -79,8 +80,13 @@ sub doLDAPLogin {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
$user->userroles->delete;
|
$user->userroles->delete;
|
||||||
if (@LDAPRoles) {
|
foreach my $ldap_role (@LDAPRoles) {
|
||||||
$user->userroles->create({ role => (substr $_, 6) }) for @LDAPRoles;
|
if (defined($role_mapping->{$ldap_role})) {
|
||||||
|
my $roles = $role_mapping->{$ldap_role};
|
||||||
|
for my $mapped_role (@$roles) {
|
||||||
|
$user->userroles->create({ role => $mapped_role });
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
$c->set_authenticated($user);
|
$c->set_authenticated($user);
|
||||||
}
|
}
|
||||||
@@ -457,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
|||||||
, "jobset.enabled" => 1
|
, "jobset.enabled" => 1
|
||||||
},
|
},
|
||||||
{ order_by => ["project", "jobset", "job"]
|
{ order_by => ["project", "jobset", "job"]
|
||||||
, join => ["project", "jobset"]
|
, join => {"jobset" => "project"}
|
||||||
})];
|
})];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -19,14 +19,16 @@ use Hydra::Helper::CatalystUtils;
|
|||||||
|
|
||||||
our @ISA = qw(Exporter);
|
our @ISA = qw(Exporter);
|
||||||
our @EXPORT = qw(
|
our @EXPORT = qw(
|
||||||
|
validateDeclarativeJobset
|
||||||
|
createJobsetInputsRowAndData
|
||||||
updateDeclarativeJobset
|
updateDeclarativeJobset
|
||||||
handleDeclarativeJobsetBuild
|
handleDeclarativeJobsetBuild
|
||||||
handleDeclarativeJobsetJson
|
handleDeclarativeJobsetJson
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
sub updateDeclarativeJobset {
|
sub validateDeclarativeJobset {
|
||||||
my ($db, $project, $jobsetName, $declSpec) = @_;
|
my ($config, $project, $jobsetName, $declSpec) = @_;
|
||||||
|
|
||||||
my @allowed_keys = qw(
|
my @allowed_keys = qw(
|
||||||
enabled
|
enabled
|
||||||
@@ -39,6 +41,7 @@ sub updateDeclarativeJobset {
|
|||||||
checkinterval
|
checkinterval
|
||||||
schedulingshares
|
schedulingshares
|
||||||
enableemail
|
enableemail
|
||||||
|
enable_dynamic_run_command
|
||||||
emailoverride
|
emailoverride
|
||||||
keepnr
|
keepnr
|
||||||
);
|
);
|
||||||
@@ -61,16 +64,39 @@ sub updateDeclarativeJobset {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0;
|
||||||
|
if ($enable_dynamic_run_command
|
||||||
|
&& !($config->{dynamicruncommand}->{enable}
|
||||||
|
&& $project->enable_dynamic_run_command))
|
||||||
|
{
|
||||||
|
die "Dynamic RunCommand is not enabled by the server or the parent project.";
|
||||||
|
}
|
||||||
|
|
||||||
|
return %update;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub createJobsetInputsRowAndData {
|
||||||
|
my ($name, $declSpec) = @_;
|
||||||
|
my $data = $declSpec->{"inputs"}->{$name};
|
||||||
|
my $row = {
|
||||||
|
name => $name,
|
||||||
|
type => $data->{type}
|
||||||
|
};
|
||||||
|
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
||||||
|
|
||||||
|
return ($row, $data);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub updateDeclarativeJobset {
|
||||||
|
my ($config, $db, $project, $jobsetName, $declSpec) = @_;
|
||||||
|
|
||||||
|
my %update = validateDeclarativeJobset($config, $project, $jobsetName, $declSpec);
|
||||||
|
|
||||||
$db->txn_do(sub {
|
$db->txn_do(sub {
|
||||||
my $jobset = $project->jobsets->update_or_create(\%update);
|
my $jobset = $project->jobsets->update_or_create(\%update);
|
||||||
$jobset->jobsetinputs->delete;
|
$jobset->jobsetinputs->delete;
|
||||||
foreach my $name (keys %{$declSpec->{"inputs"}}) {
|
foreach my $name (keys %{$declSpec->{"inputs"}}) {
|
||||||
my $data = $declSpec->{"inputs"}->{$name};
|
my ($row, $data) = createJobsetInputsRowAndData($name, $declSpec);
|
||||||
my $row = {
|
|
||||||
name => $name,
|
|
||||||
type => $data->{type}
|
|
||||||
};
|
|
||||||
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
|
||||||
my $input = $jobset->jobsetinputs->create($row);
|
my $input = $jobset->jobsetinputs->create($row);
|
||||||
$input->jobsetinputalts->create({altnr => 0, value => $data->{value}});
|
$input->jobsetinputalts->create({altnr => 0, value => $data->{value}});
|
||||||
}
|
}
|
||||||
@@ -81,6 +107,7 @@ sub updateDeclarativeJobset {
|
|||||||
|
|
||||||
sub handleDeclarativeJobsetJson {
|
sub handleDeclarativeJobsetJson {
|
||||||
my ($db, $project, $declSpec) = @_;
|
my ($db, $project, $declSpec) = @_;
|
||||||
|
my $config = getHydraConfig();
|
||||||
$db->txn_do(sub {
|
$db->txn_do(sub {
|
||||||
my @kept = keys %$declSpec;
|
my @kept = keys %$declSpec;
|
||||||
push @kept, ".jobsets";
|
push @kept, ".jobsets";
|
||||||
@@ -88,7 +115,7 @@ sub handleDeclarativeJobsetJson {
|
|||||||
foreach my $jobsetName (keys %$declSpec) {
|
foreach my $jobsetName (keys %$declSpec) {
|
||||||
my $spec = $declSpec->{$jobsetName};
|
my $spec = $declSpec->{$jobsetName};
|
||||||
eval {
|
eval {
|
||||||
updateDeclarativeJobset($db, $project, $jobsetName, $spec);
|
updateDeclarativeJobset($config, $db, $project, $jobsetName, $spec);
|
||||||
1;
|
1;
|
||||||
} or do {
|
} or do {
|
||||||
print STDERR "ERROR: failed to process declarative jobset ", $project->name, ":${jobsetName}, ", $@, "\n";
|
print STDERR "ERROR: failed to process declarative jobset ", $project->name, ":${jobsetName}, ", $@, "\n";
|
||||||
|
@@ -32,12 +32,26 @@ sub buildDiff {
|
|||||||
removed => [],
|
removed => [],
|
||||||
unfinished => [],
|
unfinished => [],
|
||||||
aborted => [],
|
aborted => [],
|
||||||
failed => [],
|
|
||||||
|
# These summary counters cut across the categories to determine whether
|
||||||
|
# actions such as "Restart all failed" or "Bump queue" are available.
|
||||||
|
totalAborted => 0,
|
||||||
|
totalFailed => 0,
|
||||||
|
totalQueued => 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
my $n = 0;
|
my $n = 0;
|
||||||
foreach my $build (@{$builds}) {
|
foreach my $build (@{$builds}) {
|
||||||
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
|
my $aborted = $build->finished != 0 && (
|
||||||
|
# aborted
|
||||||
|
$build->buildstatus == 3
|
||||||
|
# cancelled
|
||||||
|
|| $build->buildstatus == 4
|
||||||
|
# timeout
|
||||||
|
|| $build->buildstatus == 7
|
||||||
|
# log limit exceeded
|
||||||
|
|| $build->buildstatus == 10
|
||||||
|
);
|
||||||
my $d;
|
my $d;
|
||||||
my $found = 0;
|
my $found = 0;
|
||||||
while ($n < scalar(@{$builds2})) {
|
while ($n < scalar(@{$builds2})) {
|
||||||
@@ -71,8 +85,15 @@ sub buildDiff {
|
|||||||
} else {
|
} else {
|
||||||
push @{$ret->{new}}, $build if !$found;
|
push @{$ret->{new}}, $build if !$found;
|
||||||
}
|
}
|
||||||
if (defined $build->buildstatus && $build->buildstatus != 0) {
|
|
||||||
push @{$ret->{failed}}, $build;
|
if ($build->finished != 0 && $build->buildstatus != 0) {
|
||||||
|
if ($aborted) {
|
||||||
|
++$ret->{totalAborted};
|
||||||
|
} else {
|
||||||
|
++$ret->{totalFailed};
|
||||||
|
}
|
||||||
|
} elsif ($build->finished == 0) {
|
||||||
|
++$ret->{totalQueued};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -15,6 +15,7 @@ our @EXPORT = qw(
|
|||||||
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
||||||
requireBumpPrivileges
|
requireBumpPrivileges
|
||||||
requireCancelBuildPrivileges
|
requireCancelBuildPrivileges
|
||||||
|
requireEvalJobsetPrivileges
|
||||||
trim
|
trim
|
||||||
getLatestFinishedEval getFirstEval
|
getLatestFinishedEval getFirstEval
|
||||||
paramToList
|
paramToList
|
||||||
@@ -186,6 +187,27 @@ sub isProjectOwner {
|
|||||||
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub hasEvalJobsetRole {
|
||||||
|
my ($c) = @_;
|
||||||
|
return $c->user_exists && $c->check_user_roles("eval-jobset");
|
||||||
|
}
|
||||||
|
|
||||||
|
sub mayEvalJobset {
|
||||||
|
my ($c, $project) = @_;
|
||||||
|
return
|
||||||
|
$c->user_exists &&
|
||||||
|
(isAdmin($c) ||
|
||||||
|
hasEvalJobsetRole($c) ||
|
||||||
|
isProjectOwner($c, $project));
|
||||||
|
}
|
||||||
|
|
||||||
|
sub requireEvalJobsetPrivileges {
|
||||||
|
my ($c, $project) = @_;
|
||||||
|
requireUser($c);
|
||||||
|
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
|
||||||
|
unless mayEvalJobset($c, $project);
|
||||||
|
}
|
||||||
|
|
||||||
sub hasCancelBuildRole {
|
sub hasCancelBuildRole {
|
||||||
my ($c) = @_;
|
my ($c) = @_;
|
||||||
return $c->user_exists && $c->check_user_roles('cancel-build');
|
return $c->user_exists && $c->check_user_roles('cancel-build');
|
||||||
@@ -272,7 +294,7 @@ sub requireAdmin {
|
|||||||
|
|
||||||
sub requirePost {
|
sub requirePost {
|
||||||
my ($c) = @_;
|
my ($c) = @_;
|
||||||
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
|
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -5,7 +5,6 @@ use warnings;
|
|||||||
use Exporter;
|
use Exporter;
|
||||||
use File::Path;
|
use File::Path;
|
||||||
use File::Basename;
|
use File::Basename;
|
||||||
use Config::General;
|
|
||||||
use Hydra::Config;
|
use Hydra::Config;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
use Hydra::Model::DB;
|
use Hydra::Model::DB;
|
||||||
@@ -37,36 +36,22 @@ our @EXPORT = qw(
|
|||||||
jobsetOverview
|
jobsetOverview
|
||||||
jobsetOverview_
|
jobsetOverview_
|
||||||
pathIsInsidePrefix
|
pathIsInsidePrefix
|
||||||
|
readIntoSocket
|
||||||
readNixFile
|
readNixFile
|
||||||
registerRoot
|
registerRoot
|
||||||
restartBuilds
|
restartBuilds
|
||||||
run
|
run
|
||||||
|
$MACHINE_LOCAL_STORE
|
||||||
);
|
);
|
||||||
|
|
||||||
|
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||||
|
|
||||||
|
|
||||||
sub getHydraHome {
|
sub getHydraHome {
|
||||||
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
||||||
return $dir;
|
return $dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
my $hydraConfig;
|
|
||||||
|
|
||||||
sub getHydraConfig {
|
|
||||||
return $hydraConfig if defined $hydraConfig;
|
|
||||||
my $conf = $ENV{"HYDRA_CONFIG"} || (Hydra::Model::DB::getHydraPath . "/hydra.conf");
|
|
||||||
my %opts = (%Hydra::Config::configGeneralOpts, -ConfigFile => $conf);
|
|
||||||
if (-f $conf) {
|
|
||||||
my %h = Config::General->new(%opts)->getall;
|
|
||||||
|
|
||||||
$hydraConfig = \%h;
|
|
||||||
} else {
|
|
||||||
$hydraConfig = {};
|
|
||||||
}
|
|
||||||
return $hydraConfig;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# Return hash of statsd configuration of the following shape:
|
# Return hash of statsd configuration of the following shape:
|
||||||
# (
|
# (
|
||||||
# host => string,
|
# host => string,
|
||||||
@@ -190,6 +175,9 @@ sub getDrvLogPath {
|
|||||||
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
||||||
return $_ if -f $_;
|
return $_ if -f $_;
|
||||||
}
|
}
|
||||||
|
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
|
||||||
|
return $_ if -f $_;
|
||||||
|
}
|
||||||
return undef;
|
return undef;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,6 +194,10 @@ sub findLog {
|
|||||||
|
|
||||||
return undef if scalar @outPaths == 0;
|
return undef if scalar @outPaths == 0;
|
||||||
|
|
||||||
|
# Filter out any NULLs. Content-addressed derivations
|
||||||
|
# that haven't built yet or failed to build may have a NULL outPath.
|
||||||
|
@outPaths = grep {defined} @outPaths;
|
||||||
|
|
||||||
my @steps = $c->model('DB::BuildSteps')->search(
|
my @steps = $c->model('DB::BuildSteps')->search(
|
||||||
{ path => { -in => [@outPaths] } },
|
{ path => { -in => [@outPaths] } },
|
||||||
{ select => ["drvpath"]
|
{ select => ["drvpath"]
|
||||||
@@ -305,8 +297,7 @@ sub getEvals {
|
|||||||
|
|
||||||
my @evals = $evals_result_set->search(
|
my @evals = $evals_result_set->search(
|
||||||
{ hasnewbuilds => 1 },
|
{ hasnewbuilds => 1 },
|
||||||
{ order_by => "$me.id DESC", rows => $rows, offset => $offset
|
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
|
||||||
, prefetch => { evaluationerror => [ ] } });
|
|
||||||
my @res = ();
|
my @res = ();
|
||||||
my $cache = {};
|
my $cache = {};
|
||||||
|
|
||||||
@@ -426,6 +417,16 @@ sub pathIsInsidePrefix {
|
|||||||
return $cur;
|
return $cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub readIntoSocket{
|
||||||
|
my (%args) = @_;
|
||||||
|
my $sock;
|
||||||
|
|
||||||
|
eval {
|
||||||
|
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||||
|
};
|
||||||
|
|
||||||
|
return $sock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -513,7 +514,7 @@ sub restartBuilds {
|
|||||||
$builds = $builds->search({ finished => 1 });
|
$builds = $builds->search({ finished => 1 });
|
||||||
|
|
||||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||||
next if !isValidPath($build->drvpath);
|
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||||
registerRoot $build->drvpath;
|
registerRoot $build->drvpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -556,7 +557,7 @@ sub getStoreUri {
|
|||||||
sub readNixFile {
|
sub readNixFile {
|
||||||
my ($path) = @_;
|
my ($path) = @_;
|
||||||
return grab(cmd => ["nix", "--experimental-features", "nix-command",
|
return grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||||
"cat-store", "--store", getStoreUri(), "$path"]);
|
"store", "cat", "--store", getStoreUri(), "$path"]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -38,9 +37,9 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||||
{uri => $uri, revision => $revision});
|
{uri => $uri, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
} else {
|
} else {
|
||||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', $stdout;
|
($sha256, $storePath) = split ' ', $stdout;
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||||
|
@@ -9,11 +9,24 @@ use Hydra::Helper::CatalystUtils;
|
|||||||
sub stepFinished {
|
sub stepFinished {
|
||||||
my ($self, $step, $logPath) = @_;
|
my ($self, $step, $logPath) = @_;
|
||||||
|
|
||||||
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
|
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
|
||||||
|
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
|
||||||
|
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
|
||||||
|
|
||||||
if ($doCompress eq "1" && -e $logPath) {
|
if (not -e $logPath or $doCompress ne "1") {
|
||||||
print STDERR "compressing ‘$logPath’...\n";
|
return;
|
||||||
system("bzip2", "--force", $logPath);
|
}
|
||||||
|
|
||||||
|
if ($silent ne '1') {
|
||||||
|
print STDERR "compressing '$logPath' with $compression...\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($compression eq 'bzip2') {
|
||||||
|
system('bzip2', '--force', $logPath);
|
||||||
|
} elsif ($compression eq 'zstd') {
|
||||||
|
system('zstd', '--rm', '--quiet', '-T0', $logPath);
|
||||||
|
} else {
|
||||||
|
print STDERR "unknown compression type '$compression'\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
|||||||
{uri => $uri, revision => $revision},
|
{uri => $uri, revision => $revision},
|
||||||
{rows => 1});
|
{rows => 1});
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$revision = $cachedInput->revision;
|
$revision = $cachedInput->revision;
|
||||||
@@ -75,8 +74,8 @@ sub fetchInput {
|
|||||||
die "darcs changes --count failed" if $? != 0;
|
die "darcs changes --count failed" if $? != 0;
|
||||||
|
|
||||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||||
$sha256 = queryPathHash($storePath);
|
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||||
$sha256 =~ s/sha256://;
|
$sha256 =~ s/sha256://;
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
|
@@ -186,9 +186,9 @@ sub fetchInput {
|
|||||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||||
{rows => 1});
|
{rows => 1});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$revision = $cachedInput->revision;
|
$revision = $cachedInput->revision;
|
||||||
@@ -217,7 +217,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||||
@@ -261,7 +261,7 @@ sub getCommits {
|
|||||||
|
|
||||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||||
|
|
||||||
my $out = grab(cmd => ["git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
my $out = grab(cmd => ["git", "--git-dir=.git", "log", "--pretty=format:%H%x09%an%x09%ae%x09%at", "$rev1..$rev2"], dir => $clonePath);
|
||||||
|
|
||||||
my $res = [];
|
my $res = [];
|
||||||
foreach my $line (split /\n/, $out) {
|
foreach my $line (split /\n/, $out) {
|
||||||
|
@@ -88,10 +88,6 @@ sub buildQueued {
|
|||||||
common(@_, [], 0);
|
common(@_, [], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
sub buildStarted {
|
|
||||||
common(@_, [], 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub buildFinished {
|
sub buildFinished {
|
||||||
common(@_, 2);
|
common(@_, 2);
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,7 @@ sub _iterate {
|
|||||||
$pulls->{$pull->{number}} = $pull;
|
$pulls->{$pull->{number}} = $pull;
|
||||||
}
|
}
|
||||||
# TODO Make Link header parsing more robust!!!
|
# TODO Make Link header parsing more robust!!!
|
||||||
my @links = split ',', $res->header("Link");
|
my @links = split ',', ($res->header("Link") // "");
|
||||||
my $next = "";
|
my $next = "";
|
||||||
foreach my $link (@links) {
|
foreach my $link (@links) {
|
||||||
my ($url, $rel) = split ";", $link;
|
my ($url, $rel) = split ";", $link;
|
||||||
|
@@ -1,89 +0,0 @@
|
|||||||
package Hydra::Plugin::HipChatNotification;
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use warnings;
|
|
||||||
use parent 'Hydra::Plugin';
|
|
||||||
use LWP::UserAgent;
|
|
||||||
use Hydra::Helper::CatalystUtils;
|
|
||||||
|
|
||||||
sub isEnabled {
|
|
||||||
my ($self) = @_;
|
|
||||||
return defined $self->{config}->{hipchat};
|
|
||||||
}
|
|
||||||
|
|
||||||
sub buildFinished {
|
|
||||||
my ($self, $topbuild, $dependents) = @_;
|
|
||||||
|
|
||||||
my $cfg = $self->{config}->{hipchat};
|
|
||||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
|
||||||
|
|
||||||
my $baseurl = $self->{config}->{'base_uri'} || "http://localhost:3000";
|
|
||||||
|
|
||||||
# Figure out to which rooms to send notification. For each email
|
|
||||||
# room, we send one aggregate message.
|
|
||||||
my %rooms;
|
|
||||||
foreach my $build ($topbuild, @{$dependents}) {
|
|
||||||
my $prevBuild = getPreviousBuild($build);
|
|
||||||
my $jobName = showJobName $build;
|
|
||||||
|
|
||||||
foreach my $room (@config) {
|
|
||||||
my $force = $room->{force};
|
|
||||||
next unless $jobName =~ /^$room->{jobs}$/;
|
|
||||||
|
|
||||||
# If build is cancelled or aborted, do not send email.
|
|
||||||
next if ! $force && ($build->buildstatus == 4 || $build->buildstatus == 3);
|
|
||||||
|
|
||||||
# If there is a previous (that is not cancelled or aborted) build
|
|
||||||
# with same buildstatus, do not send email.
|
|
||||||
next if ! $force && defined $prevBuild && ($build->buildstatus == $prevBuild->buildstatus);
|
|
||||||
|
|
||||||
$rooms{$room->{room}} //= { room => $room, builds => [] };
|
|
||||||
push @{$rooms{$room->{room}}->{builds}}, $build;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return if scalar keys %rooms == 0;
|
|
||||||
|
|
||||||
my ($authors, $nrCommits) = getResponsibleAuthors($topbuild, $self->{plugins});
|
|
||||||
|
|
||||||
# Send a message to each room.
|
|
||||||
foreach my $roomId (keys %rooms) {
|
|
||||||
my $room = $rooms{$roomId};
|
|
||||||
my @deps = grep { $_->id != $topbuild->id } @{$room->{builds}};
|
|
||||||
|
|
||||||
my $img =
|
|
||||||
$topbuild->buildstatus == 0 ? "$baseurl/static/images/checkmark_16.png" :
|
|
||||||
$topbuild->buildstatus == 2 ? "$baseurl/static/images/dependency_16.png" :
|
|
||||||
$topbuild->buildstatus == 4 ? "$baseurl/static/images/cancelled_16.png" :
|
|
||||||
"$baseurl/static/images/error_16.png";
|
|
||||||
|
|
||||||
my $msg = "";
|
|
||||||
$msg .= "<img src='$img'/> ";
|
|
||||||
$msg .= "Job <a href='$baseurl/job/${\$topbuild->jobset->get_column('project')}/${\$topbuild->jobset->get_column('name')}/${\$topbuild->get_column('job')}'>${\showJobName($topbuild)}</a>";
|
|
||||||
$msg .= " (and ${\scalar @deps} others)" if scalar @deps > 0;
|
|
||||||
$msg .= ": <a href='$baseurl/build/${\$topbuild->id}'>" . showStatus($topbuild) . "</a>";
|
|
||||||
|
|
||||||
if (scalar keys %{$authors} > 0) {
|
|
||||||
# FIXME: HTML escaping
|
|
||||||
my @x = map { "<a href='mailto:$authors->{$_}'>$_</a>" } (sort keys %{$authors});
|
|
||||||
$msg .= ", likely due to ";
|
|
||||||
$msg .= "$nrCommits commits by " if $nrCommits > 1;
|
|
||||||
$msg .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]);
|
|
||||||
}
|
|
||||||
|
|
||||||
print STDERR "sending hipchat notification to room $roomId: $msg\n";
|
|
||||||
|
|
||||||
my $ua = LWP::UserAgent->new();
|
|
||||||
my $resp = $ua->post('https://api.hipchat.com/v1/rooms/message?format=json&auth_token=' . $room->{room}->{token}, {
|
|
||||||
room_id => $roomId,
|
|
||||||
from => 'Hydra',
|
|
||||||
message => $msg,
|
|
||||||
message_format => 'html',
|
|
||||||
notify => $room->{room}->{notify} || 0,
|
|
||||||
color => $topbuild->buildstatus == 0 ? 'green' : 'red' });
|
|
||||||
|
|
||||||
print STDERR $resp->status_line, ": ", $resp->decoded_content,"\n" if !$resp->is_success;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
1;
|
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Nix::Store;
|
|
||||||
use Fcntl qw(:flock);
|
use Fcntl qw(:flock);
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
@@ -68,9 +67,9 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||||
{uri => $uri, branch => $branch, revision => $revision});
|
{uri => $uri, branch => $branch, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
} else {
|
} else {
|
||||||
@@ -85,7 +84,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', $stdout;
|
($sha256, $storePath) = split ' ', $stdout;
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||||
|
@@ -5,7 +5,6 @@ use warnings;
|
|||||||
use parent 'Hydra::Plugin';
|
use parent 'Hydra::Plugin';
|
||||||
use POSIX qw(strftime);
|
use POSIX qw(strftime);
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -30,7 +29,7 @@ sub fetchInput {
|
|||||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||||
{rows => 1, order_by => "lastseen DESC"});
|
{rows => 1, order_by => "lastseen DESC"});
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$timestamp = $cachedInput->timestamp;
|
$timestamp = $cachedInput->timestamp;
|
||||||
@@ -46,7 +45,7 @@ sub fetchInput {
|
|||||||
}
|
}
|
||||||
chomp $storePath;
|
chomp $storePath;
|
||||||
|
|
||||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||||
|
|
||||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||||
{srcpath => $uri, sha256hash => $sha256});
|
{srcpath => $uri, sha256hash => $sha256});
|
||||||
|
@@ -12,7 +12,74 @@ use Try::Tiny;
|
|||||||
|
|
||||||
sub isEnabled {
|
sub isEnabled {
|
||||||
my ($self) = @_;
|
my ($self) = @_;
|
||||||
return defined $self->{config}->{runcommand};
|
|
||||||
|
return areStaticCommandsEnabled($self->{config}) || areDynamicCommandsEnabled($self->{config});
|
||||||
|
}
|
||||||
|
|
||||||
|
sub areStaticCommandsEnabled {
|
||||||
|
my ($config) = @_;
|
||||||
|
|
||||||
|
if (defined $config->{runcommand}) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub areDynamicCommandsEnabled {
|
||||||
|
my ($config) = @_;
|
||||||
|
|
||||||
|
if ((defined $config->{dynamicruncommand})
|
||||||
|
&& $config->{dynamicruncommand}->{enable}) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub isBuildEligibleForDynamicRunCommand {
|
||||||
|
my ($build) = @_;
|
||||||
|
|
||||||
|
if ($build->get_column("buildstatus") != 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($build->get_column("job") =~ "^runCommandHook\..+") {
|
||||||
|
my $out = $build->buildoutputs->find({name => "out"});
|
||||||
|
if (!defined $out) {
|
||||||
|
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: no output named 'out'.";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
my $path = $out->path;
|
||||||
|
if (-l $path) {
|
||||||
|
$path = readlink($path);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! -e $path) {
|
||||||
|
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output doesn't exist locally. This is a bug.";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! -x $path) {
|
||||||
|
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not executable.";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! -f $path) {
|
||||||
|
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The 'out' output is not a regular file or symlink.";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! $build->jobset->supportsDynamicRunCommand()) {
|
||||||
|
warn "DynamicRunCommand hook on " . $build->job . " (" . $build->id . ") rejected: The project or jobset don't have dynamic runcommand enabled.";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub configSectionMatches {
|
sub configSectionMatches {
|
||||||
@@ -43,10 +110,11 @@ sub eventMatches {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sub fanoutToCommands {
|
sub fanoutToCommands {
|
||||||
my ($config, $event, $project, $jobset, $job) = @_;
|
my ($config, $event, $build) = @_;
|
||||||
|
|
||||||
my @commands;
|
my @commands;
|
||||||
|
|
||||||
|
# Calculate all the statically defined commands to execute
|
||||||
my $cfg = $config->{runcommand};
|
my $cfg = $config->{runcommand};
|
||||||
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
|
||||||
|
|
||||||
@@ -55,9 +123,10 @@ sub fanoutToCommands {
|
|||||||
next unless eventMatches($conf, $event);
|
next unless eventMatches($conf, $event);
|
||||||
next unless configSectionMatches(
|
next unless configSectionMatches(
|
||||||
$matcher,
|
$matcher,
|
||||||
$project,
|
$build->jobset->get_column('project'),
|
||||||
$jobset,
|
$build->jobset->get_column('name'),
|
||||||
$job);
|
$build->get_column('job')
|
||||||
|
);
|
||||||
|
|
||||||
if (!defined($conf->{command})) {
|
if (!defined($conf->{command})) {
|
||||||
warn "<runcommand> section for '$matcher' lacks a 'command' option";
|
warn "<runcommand> section for '$matcher' lacks a 'command' option";
|
||||||
@@ -70,6 +139,18 @@ sub fanoutToCommands {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Calculate all dynamically defined commands to execute
|
||||||
|
if (areDynamicCommandsEnabled($config)) {
|
||||||
|
if (isBuildEligibleForDynamicRunCommand($build)) {
|
||||||
|
my $job = $build->get_column('job');
|
||||||
|
my $out = $build->buildoutputs->find({name => "out"});
|
||||||
|
push(@commands, {
|
||||||
|
matcher => "DynamicRunCommand($job)",
|
||||||
|
command => $out->path
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return \@commands;
|
return \@commands;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,9 +219,7 @@ sub buildFinished {
|
|||||||
my $commandsToRun = fanoutToCommands(
|
my $commandsToRun = fanoutToCommands(
|
||||||
$self->{config},
|
$self->{config},
|
||||||
$event,
|
$event,
|
||||||
$build->project->get_column('name'),
|
$build
|
||||||
$build->jobset->get_column('name'),
|
|
||||||
$build->get_column('job')
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if (@$commandsToRun == 0) {
|
if (@$commandsToRun == 0) {
|
||||||
|
@@ -14,6 +14,7 @@ use Nix::Config;
|
|||||||
use Nix::Store;
|
use Nix::Store;
|
||||||
use Hydra::Model::DB;
|
use Hydra::Model::DB;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
|
use Hydra::Helper::Nix;
|
||||||
|
|
||||||
sub isEnabled {
|
sub isEnabled {
|
||||||
my ($self) = @_;
|
my ($self) = @_;
|
||||||
@@ -92,7 +93,7 @@ sub buildFinished {
|
|||||||
my $hash = substr basename($path), 0, 32;
|
my $hash = substr basename($path), 0, 32;
|
||||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
|
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
|
||||||
my $system;
|
my $system;
|
||||||
if (defined $deriver and isValidPath($deriver)) {
|
if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||||
$system = derivationFromPath($deriver)->{platform};
|
$system = derivationFromPath($deriver)->{platform};
|
||||||
}
|
}
|
||||||
foreach my $reference (@{$refs}) {
|
foreach my $reference (@{$refs}) {
|
||||||
|
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use IPC::Run;
|
use IPC::Run;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -45,9 +44,9 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||||
{uri => $uri, revision => $revision});
|
{uri => $uri, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
} else {
|
} else {
|
||||||
@@ -62,16 +61,16 @@ sub fetchInput {
|
|||||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||||
|
|
||||||
if ($type eq "svn-checkout") {
|
if ($type eq "svn-checkout") {
|
||||||
$storePath = addToStore($wcPath, 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||||
} else {
|
} else {
|
||||||
# Hm, if the Nix Perl bindings supported filters in
|
# Hm, if the Nix Perl bindings supported filters in
|
||||||
# addToStore(), then we wouldn't need to make a copy here.
|
# addToStore(), then we wouldn't need to make a copy here.
|
||||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||||
}
|
}
|
||||||
|
|
||||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||||
|
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
|||||||
=head2 path
|
=head2 path
|
||||||
|
|
||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 0
|
is_nullable: 1
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
|||||||
"name",
|
"name",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 0 },
|
||||||
"path",
|
"path",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||||
|
|
||||||
my %hint = (
|
my %hint = (
|
||||||
columns => [
|
columns => [
|
||||||
|
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
|||||||
=head2 path
|
=head2 path
|
||||||
|
|
||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 0
|
is_nullable: 1
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
|||||||
"name",
|
"name",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 0 },
|
||||||
"path",
|
"path",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||||
|
|
||||||
|
|
||||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
||||||
|
@@ -105,4 +105,6 @@ __PACKAGE__->add_column(
|
|||||||
"+id" => { retrieve_on_insert => 1 }
|
"+id" => { retrieve_on_insert => 1 }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
@@ -155,6 +155,12 @@ __PACKAGE__->table("jobsets");
|
|||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 1
|
is_nullable: 1
|
||||||
|
|
||||||
|
=head2 enable_dynamic_run_command
|
||||||
|
|
||||||
|
data_type: 'boolean'
|
||||||
|
default_value: false
|
||||||
|
is_nullable: 0
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
__PACKAGE__->add_columns(
|
__PACKAGE__->add_columns(
|
||||||
@@ -207,6 +213,8 @@ __PACKAGE__->add_columns(
|
|||||||
{ data_type => "integer", default_value => 0, is_nullable => 0 },
|
{ data_type => "integer", default_value => 0, is_nullable => 0 },
|
||||||
"flake",
|
"flake",
|
||||||
{ data_type => "text", is_nullable => 1 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
|
"enable_dynamic_run_command",
|
||||||
|
{ data_type => "boolean", default_value => \"false", is_nullable => 0 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -354,8 +362,8 @@ __PACKAGE__->has_many(
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-08 22:24:10
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-24 14:17:33
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:cQOnMitrWGMoJX6kZGNW+w
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:7wPE5ebeVTkenMCWG9Sgcg
|
||||||
|
|
||||||
use JSON::MaybeXS;
|
use JSON::MaybeXS;
|
||||||
|
|
||||||
@@ -378,6 +386,15 @@ __PACKAGE__->add_column(
|
|||||||
"+id" => { retrieve_on_insert => 1 }
|
"+id" => { retrieve_on_insert => 1 }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||||
|
|
||||||
|
sub supportsDynamicRunCommand {
|
||||||
|
my ($self) = @_;
|
||||||
|
|
||||||
|
return $self->get_column('enable_dynamic_run_command') == 1
|
||||||
|
&& $self->project->supportsDynamicRunCommand();
|
||||||
|
}
|
||||||
|
|
||||||
sub as_json {
|
sub as_json {
|
||||||
my $self = shift;
|
my $self = shift;
|
||||||
|
|
||||||
@@ -406,6 +423,7 @@ sub as_json {
|
|||||||
|
|
||||||
# boolean_columns
|
# boolean_columns
|
||||||
"enableemail" => $self->get_column("enableemail") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
"enableemail" => $self->get_column("enableemail") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||||
|
"enable_dynamic_run_command" => $self->get_column("enable_dynamic_run_command") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||||
"visible" => $self->get_column("hidden") ? JSON::MaybeXS::false : JSON::MaybeXS::true,
|
"visible" => $self->get_column("hidden") ? JSON::MaybeXS::false : JSON::MaybeXS::true,
|
||||||
|
|
||||||
"inputs" => { map { $_->name => $_ } $self->jobsetinputs }
|
"inputs" => { map { $_->name => $_ } $self->jobsetinputs }
|
||||||
|
@@ -88,6 +88,12 @@ __PACKAGE__->table("projects");
|
|||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 1
|
is_nullable: 1
|
||||||
|
|
||||||
|
=head2 enable_dynamic_run_command
|
||||||
|
|
||||||
|
data_type: 'boolean'
|
||||||
|
default_value: false
|
||||||
|
is_nullable: 0
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
__PACKAGE__->add_columns(
|
__PACKAGE__->add_columns(
|
||||||
@@ -111,6 +117,8 @@ __PACKAGE__->add_columns(
|
|||||||
{ data_type => "text", is_nullable => 1 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
"declvalue",
|
"declvalue",
|
||||||
{ data_type => "text", is_nullable => 1 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
|
"enable_dynamic_run_command",
|
||||||
|
{ data_type => "boolean", default_value => \"false", is_nullable => 0 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -228,8 +236,8 @@ Composing rels: L</projectmembers> -> username
|
|||||||
__PACKAGE__->many_to_many("usernames", "projectmembers", "username");
|
__PACKAGE__->many_to_many("usernames", "projectmembers", "username");
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-08 22:24:10
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-01-24 14:20:32
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:r/wbX3FAm5/OFrrwOQL5fA
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:PtXDyT8Pc7LYhhdEG39EKQ
|
||||||
|
|
||||||
use JSON::MaybeXS;
|
use JSON::MaybeXS;
|
||||||
|
|
||||||
@@ -238,6 +246,12 @@ sub builds {
|
|||||||
return $self->jobsets->related_resultset('builds');
|
return $self->jobsets->related_resultset('builds');
|
||||||
};
|
};
|
||||||
|
|
||||||
|
sub supportsDynamicRunCommand {
|
||||||
|
my ($self) = @_;
|
||||||
|
|
||||||
|
return $self->get_column('enable_dynamic_run_command') == 1;
|
||||||
|
}
|
||||||
|
|
||||||
sub as_json {
|
sub as_json {
|
||||||
my $self = shift;
|
my $self = shift;
|
||||||
|
|
||||||
@@ -251,6 +265,7 @@ sub as_json {
|
|||||||
|
|
||||||
# boolean_columns
|
# boolean_columns
|
||||||
"enabled" => $self->get_column("enabled") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
"enabled" => $self->get_column("enabled") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||||
|
"enable_dynamic_run_command" => $self->get_column("enable_dynamic_run_command") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||||
"hidden" => $self->get_column("hidden") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
"hidden" => $self->get_column("hidden") ? JSON::MaybeXS::true : JSON::MaybeXS::false,
|
||||||
|
|
||||||
"jobsets" => [ map { $_->name } $self->jobsets ]
|
"jobsets" => [ map { $_->name } $self->jobsets ]
|
||||||
|
@@ -216,7 +216,7 @@ sub json_hint {
|
|||||||
|
|
||||||
sub _authenticator() {
|
sub _authenticator() {
|
||||||
my $authenticator = Crypt::Passphrase->new(
|
my $authenticator = Crypt::Passphrase->new(
|
||||||
encoder => 'Argon2',
|
encoder => { module => 'Argon2', output_size => 16 },
|
||||||
validators => [
|
validators => [
|
||||||
(sub {
|
(sub {
|
||||||
my ($password, $hash) = @_;
|
my ($password, $hash) = @_;
|
||||||
|
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package Hydra::Schema::ResultSet::EvaluationErrors;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use utf8;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use parent 'DBIx::Class::ResultSet';
|
||||||
|
|
||||||
|
use Storable qw(dclone);
|
||||||
|
|
||||||
|
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||||
|
|
||||||
|
# Exclude expensive error message values unless explicitly requested, and
|
||||||
|
# replace them with a summary field describing their presence/absence.
|
||||||
|
sub search_rs {
|
||||||
|
my ( $class, $query, $attrs ) = @_;
|
||||||
|
|
||||||
|
if ($attrs) {
|
||||||
|
$attrs = dclone($attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||||
|
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||||
|
}
|
||||||
|
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||||
|
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||||
|
}
|
||||||
|
|
||||||
|
return $class->next::method($query, $attrs);
|
||||||
|
}
|
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package Hydra::Schema::ResultSet::Jobsets;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use utf8;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use parent 'DBIx::Class::ResultSet';
|
||||||
|
|
||||||
|
use Storable qw(dclone);
|
||||||
|
|
||||||
|
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||||
|
|
||||||
|
# Exclude expensive error message values unless explicitly requested, and
|
||||||
|
# replace them with a summary field describing their presence/absence.
|
||||||
|
sub search_rs {
|
||||||
|
my ( $class, $query, $attrs ) = @_;
|
||||||
|
|
||||||
|
if ($attrs) {
|
||||||
|
$attrs = dclone($attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||||
|
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||||
|
}
|
||||||
|
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||||
|
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||||
|
}
|
||||||
|
|
||||||
|
return $class->next::method($query, $attrs);
|
||||||
|
}
|
@@ -8,6 +8,7 @@ use MIME::Base64;
|
|||||||
use Nix::Manifest;
|
use Nix::Manifest;
|
||||||
use Nix::Store;
|
use Nix::Store;
|
||||||
use Nix::Utils;
|
use Nix::Utils;
|
||||||
|
use Hydra::Helper::Nix;
|
||||||
use base qw/Catalyst::View/;
|
use base qw/Catalyst::View/;
|
||||||
|
|
||||||
sub process {
|
sub process {
|
||||||
@@ -17,7 +18,7 @@ sub process {
|
|||||||
|
|
||||||
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
||||||
|
|
||||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
|
||||||
|
|
||||||
my $info;
|
my $info;
|
||||||
$info .= "StorePath: $storePath\n";
|
$info .= "StorePath: $storePath\n";
|
||||||
@@ -28,8 +29,8 @@ sub process {
|
|||||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||||
if (defined $deriver) {
|
if (defined $deriver) {
|
||||||
$info .= "Deriver: " . basename $deriver . "\n";
|
$info .= "Deriver: " . basename $deriver . "\n";
|
||||||
if (isValidPath($deriver)) {
|
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||||
my $drv = derivationFromPath($deriver);
|
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
|
||||||
$info .= "System: $drv->{platform}\n";
|
$info .= "System: $drv->{platform}\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -16,7 +16,10 @@ sub process {
|
|||||||
|
|
||||||
my $tail = int($c->stash->{tail} // "0");
|
my $tail = int($c->stash->{tail} // "0");
|
||||||
|
|
||||||
if ($logPath =~ /\.bz2$/) {
|
if ($logPath =~ /\.zst$/) {
|
||||||
|
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||||
|
open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die;
|
||||||
|
} elsif ($logPath =~ /\.bz2$/) {
|
||||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||||
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
||||||
} else {
|
} else {
|
||||||
|
@@ -6,6 +6,7 @@ use base 'Catalyst::View::TT';
|
|||||||
use Template::Plugin::HTML;
|
use Template::Plugin::HTML;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Time::Seconds;
|
use Time::Seconds;
|
||||||
|
use Digest::SHA qw(sha1_hex);
|
||||||
|
|
||||||
__PACKAGE__->config(
|
__PACKAGE__->config(
|
||||||
TEMPLATE_EXTENSION => '.tt',
|
TEMPLATE_EXTENSION => '.tt',
|
||||||
@@ -25,8 +26,14 @@ __PACKAGE__->config(
|
|||||||
makeNameTextForJobset
|
makeNameTextForJobset
|
||||||
relativeDuration
|
relativeDuration
|
||||||
stripSSHUser
|
stripSSHUser
|
||||||
|
metricDivId
|
||||||
/]);
|
/]);
|
||||||
|
|
||||||
|
sub metricDivId {
|
||||||
|
my ($self, $c, $text) = @_;
|
||||||
|
return "metric-" . sha1_hex($text);
|
||||||
|
}
|
||||||
|
|
||||||
sub buildLogExists {
|
sub buildLogExists {
|
||||||
my ($self, $c, $build) = @_;
|
my ($self, $c, $build) = @_;
|
||||||
return 1 if defined $c->config->{log_prefix};
|
return 1 if defined $c->config->{log_prefix};
|
||||||
|
@@ -1,22 +0,0 @@
|
|||||||
PERL_MODULES = \
|
|
||||||
$(wildcard *.pm) \
|
|
||||||
$(wildcard Hydra/*.pm) \
|
|
||||||
$(wildcard Hydra/Helper/*.pm) \
|
|
||||||
$(wildcard Hydra/Model/*.pm) \
|
|
||||||
$(wildcard Hydra/View/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/Result/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/ResultSet/*.pm) \
|
|
||||||
$(wildcard Hydra/Controller/*.pm) \
|
|
||||||
$(wildcard Hydra/Base/*.pm) \
|
|
||||||
$(wildcard Hydra/Base/Controller/*.pm) \
|
|
||||||
$(wildcard Hydra/Script/*.pm) \
|
|
||||||
$(wildcard Hydra/Component/*.pm) \
|
|
||||||
$(wildcard Hydra/Event/*.pm) \
|
|
||||||
$(wildcard Hydra/Plugin/*.pm)
|
|
||||||
|
|
||||||
EXTRA_DIST = \
|
|
||||||
$(PERL_MODULES)
|
|
||||||
|
|
||||||
hydradir = $(libexecdir)/hydra/lib
|
|
||||||
nobase_hydra_DATA = $(PERL_MODULES)
|
|
@@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
#include <pqxx/pqxx>
|
#include <pqxx/pqxx>
|
||||||
|
|
||||||
#include "util.hh"
|
#include <nix/util/environment-variables.hh>
|
||||||
|
#include <nix/util/util.hh>
|
||||||
|
|
||||||
|
|
||||||
struct Connection : pqxx::connection
|
struct Connection : pqxx::connection
|
||||||
@@ -18,7 +19,7 @@ struct Connection : pqxx::connection
|
|||||||
std::string upper_prefix = "DBI:Pg:";
|
std::string upper_prefix = "DBI:Pg:";
|
||||||
|
|
||||||
if (hasPrefix(s, lower_prefix) || hasPrefix(s, upper_prefix)) {
|
if (hasPrefix(s, lower_prefix) || hasPrefix(s, upper_prefix)) {
|
||||||
return concatStringsSep(" ", tokenizeString<Strings>(string(s, lower_prefix.size()), ";"));
|
return concatStringsSep(" ", tokenizeString<Strings>(std::string(s, lower_prefix.size()), ";"));
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Error("$HYDRA_DBI does not denote a PostgreSQL database");
|
throw Error("$HYDRA_DBI does not denote a PostgreSQL database");
|
||||||
|
@@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
#include "util.hh"
|
#include <nix/util/file-system.hh>
|
||||||
|
#include <nix/util/util.hh>
|
||||||
|
|
||||||
struct HydraConfig
|
struct HydraConfig
|
||||||
{
|
{
|
||||||
@@ -17,7 +18,7 @@ struct HydraConfig
|
|||||||
if (hydraConfigFile && pathExists(*hydraConfigFile)) {
|
if (hydraConfigFile && pathExists(*hydraConfigFile)) {
|
||||||
|
|
||||||
for (auto line : tokenizeString<Strings>(readFile(*hydraConfigFile), "\n")) {
|
for (auto line : tokenizeString<Strings>(readFile(*hydraConfigFile), "\n")) {
|
||||||
line = trim(string(line, 0, line.find('#')));
|
line = trim(std::string(line, 0, line.find('#')));
|
||||||
|
|
||||||
auto eq = line.find('=');
|
auto eq = line.find('=');
|
||||||
if (eq == std::string::npos) continue;
|
if (eq == std::string::npos) continue;
|
||||||
|
5
src/libhydra/meson.build
Normal file
5
src/libhydra/meson.build
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
libhydra_inc = include_directories('.')
|
||||||
|
|
||||||
|
libhydra_dep = declare_dependency(
|
||||||
|
include_directories: [libhydra_inc],
|
||||||
|
)
|
85
src/meson.build
Normal file
85
src/meson.build
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# Native code
|
||||||
|
subdir('libhydra')
|
||||||
|
subdir('hydra-evaluator')
|
||||||
|
subdir('hydra-queue-runner')
|
||||||
|
|
||||||
|
hydra_libexecdir = get_option('libexecdir') / 'hydra'
|
||||||
|
|
||||||
|
# Data and interpreted
|
||||||
|
foreach dir : ['lib', 'root']
|
||||||
|
install_subdir(dir,
|
||||||
|
install_dir: hydra_libexecdir,
|
||||||
|
)
|
||||||
|
endforeach
|
||||||
|
subdir('sql')
|
||||||
|
subdir('ttf')
|
||||||
|
|
||||||
|
# Static files for website
|
||||||
|
|
||||||
|
hydra_libexecdir_static = hydra_libexecdir / 'root' / 'static'
|
||||||
|
|
||||||
|
## Bootstrap
|
||||||
|
|
||||||
|
bootstrap_name = 'bootstrap-4.3.1-dist'
|
||||||
|
bootstrap = custom_target(
|
||||||
|
'extract-bootstrap',
|
||||||
|
input: 'root' / (bootstrap_name + '.zip'),
|
||||||
|
output: bootstrap_name,
|
||||||
|
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||||
|
)
|
||||||
|
custom_target(
|
||||||
|
'name-bootstrap',
|
||||||
|
input: bootstrap,
|
||||||
|
output: 'bootstrap',
|
||||||
|
command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'],
|
||||||
|
install: true,
|
||||||
|
install_dir: hydra_libexecdir_static,
|
||||||
|
)
|
||||||
|
|
||||||
|
## Flot
|
||||||
|
|
||||||
|
custom_target(
|
||||||
|
'extract-flot',
|
||||||
|
input: 'root' / 'flot-0.8.3.zip',
|
||||||
|
output: 'flot',
|
||||||
|
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||||
|
install: true,
|
||||||
|
install_dir: hydra_libexecdir_static / 'js',
|
||||||
|
)
|
||||||
|
|
||||||
|
## Fontawesome
|
||||||
|
|
||||||
|
fontawesome_name = 'fontawesome-free-5.10.2-web'
|
||||||
|
fontawesome = custom_target(
|
||||||
|
'extract-fontawesome',
|
||||||
|
input: 'root' / (fontawesome_name + '.zip'),
|
||||||
|
output: fontawesome_name,
|
||||||
|
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||||
|
)
|
||||||
|
custom_target(
|
||||||
|
'name-fontawesome-css',
|
||||||
|
input: fontawesome,
|
||||||
|
output: 'css',
|
||||||
|
command: ['cp', '-r', '@INPUT@/css', '@OUTPUT@'],
|
||||||
|
install: true,
|
||||||
|
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||||
|
)
|
||||||
|
custom_target(
|
||||||
|
'name-fontawesome-webfonts',
|
||||||
|
input: fontawesome,
|
||||||
|
output: 'webfonts',
|
||||||
|
command: ['cp', '-r', '@INPUT@/webfonts', '@OUTPUT@'],
|
||||||
|
install: true,
|
||||||
|
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||||
|
)
|
||||||
|
|
||||||
|
# Scripts
|
||||||
|
|
||||||
|
install_subdir('script',
|
||||||
|
install_dir: get_option('bindir'),
|
||||||
|
exclude_files: [
|
||||||
|
'hydra-dev-server',
|
||||||
|
],
|
||||||
|
install_mode: 'rwxr-xr-x',
|
||||||
|
strip_directory: true,
|
||||||
|
)
|
@@ -1,39 +0,0 @@
|
|||||||
TEMPLATES = $(wildcard *.tt)
|
|
||||||
STATIC = \
|
|
||||||
$(wildcard static/images/*) \
|
|
||||||
$(wildcard static/css/*) \
|
|
||||||
static/js/bootbox.min.js \
|
|
||||||
static/js/popper.min.js \
|
|
||||||
static/js/common.js \
|
|
||||||
static/js/jquery/jquery-3.4.1.min.js \
|
|
||||||
static/js/jquery/jquery-ui-1.10.4.min.js
|
|
||||||
|
|
||||||
FLOT = flot-0.8.3.zip
|
|
||||||
BOOTSTRAP = bootstrap-4.3.1-dist.zip
|
|
||||||
FONTAWESOME = fontawesome-free-5.10.2-web.zip
|
|
||||||
|
|
||||||
ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME)
|
|
||||||
|
|
||||||
EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS)
|
|
||||||
|
|
||||||
hydradir = $(libexecdir)/hydra/root
|
|
||||||
nobase_hydra_DATA = $(EXTRA_DIST)
|
|
||||||
|
|
||||||
all:
|
|
||||||
mkdir -p $(srcdir)/static/js
|
|
||||||
unzip -u -d $(srcdir)/static $(BOOTSTRAP)
|
|
||||||
rm -rf $(srcdir)/static/bootstrap
|
|
||||||
mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap
|
|
||||||
unzip -u -d $(srcdir)/static/js $(FLOT)
|
|
||||||
unzip -u -d $(srcdir)/static $(FONTAWESOME)
|
|
||||||
rm -rf $(srcdir)/static/fontawesome
|
|
||||||
mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome
|
|
||||||
|
|
||||||
install-data-local: $(ZIPS)
|
|
||||||
mkdir -p $(hydradir)/static/js
|
|
||||||
cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js
|
|
||||||
mkdir -p $(hydradir)/static/bootstrap
|
|
||||||
cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap
|
|
||||||
mkdir -p $(hydradir)/static/fontawesome/{css,webfonts}
|
|
||||||
cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css
|
|
||||||
cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts
|
|
@@ -33,7 +33,7 @@
|
|||||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||||
<div class="modal-dialog" role="document">
|
<div class="modal-dialog" role="document">
|
||||||
<div class="modal-content">
|
<div class="modal-content">
|
||||||
<form>
|
<form id="signin-form">
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="username" class="col-form-label">User name</label>
|
<label for="username" class="col-form-label">User name</label>
|
||||||
@@ -45,7 +45,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
|
|
||||||
function finishSignOut() { }
|
function finishSignOut() { }
|
||||||
|
|
||||||
$("#do-signin").click(function() {
|
$("#signin-form").submit(function(e) {
|
||||||
|
e.preventDefault();
|
||||||
requestJSON({
|
requestJSON({
|
||||||
url: "[% c.uri_for('/login') %]",
|
url: "[% c.uri_for('/login') %]",
|
||||||
data: $(this).parents("form").serialize(),
|
data: $(this).serialize(),
|
||||||
type: 'POST',
|
type: 'POST',
|
||||||
success: function(data) {
|
success: function(data) {
|
||||||
window.location.reload();
|
window.location.reload();
|
||||||
@@ -82,7 +83,7 @@
|
|||||||
function onGoogleSignIn(googleUser) {
|
function onGoogleSignIn(googleUser) {
|
||||||
requestJSON({
|
requestJSON({
|
||||||
url: "[% c.uri_for('/google-login') %]",
|
url: "[% c.uri_for('/google-login') %]",
|
||||||
data: "id_token=" + googleUser.getAuthResponse().id_token,
|
data: "id_token=" + googleUser.credential,
|
||||||
type: 'POST',
|
type: 'POST',
|
||||||
success: function(data) {
|
success: function(data) {
|
||||||
window.location.reload();
|
window.location.reload();
|
||||||
@@ -91,9 +92,6 @@
|
|||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
$("#google-signin").click(function() {
|
|
||||||
$(".g-signin2:first-child > div").click();
|
|
||||||
});
|
|
||||||
</script>
|
</script>
|
||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
|
@@ -4,6 +4,6 @@
|
|||||||
|
|
||||||
<div class="dep-tree">
|
<div class="dep-tree">
|
||||||
<ul class="tree">
|
<ul class="tree">
|
||||||
[% INCLUDE renderNode node=buildTimeGraph %]
|
[% INCLUDE renderNode node=buildTimeGraph isRoot=1 %]
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
|
@@ -61,21 +61,7 @@ END;
|
|||||||
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
|
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
|
||||||
<td class="step-status">
|
<td class="step-status">
|
||||||
[% IF step.busy != 0 %]
|
[% IF step.busy != 0 %]
|
||||||
[% IF step.busy == 1 %]
|
[% INCLUDE renderBusyStatus %]
|
||||||
<strong>Preparing</strong>
|
|
||||||
[% ELSIF step.busy == 10 %]
|
|
||||||
<strong>Connecting</strong>
|
|
||||||
[% ELSIF step.busy == 20 %]
|
|
||||||
<strong>Sending inputs</strong>
|
|
||||||
[% ELSIF step.busy == 30 %]
|
|
||||||
<strong>Building</strong>
|
|
||||||
[% ELSIF step.busy == 40 %]
|
|
||||||
<strong>Receiving outputs</strong>
|
|
||||||
[% ELSIF step.busy == 50 %]
|
|
||||||
<strong>Post-processing</strong>
|
|
||||||
[% ELSE %]
|
|
||||||
<strong>Unknown state</strong>
|
|
||||||
[% END %]
|
|
||||||
[% ELSIF step.status == 0 %]
|
[% ELSIF step.status == 0 %]
|
||||||
[% IF step.isnondeterministic %]
|
[% IF step.isnondeterministic %]
|
||||||
<span class="warn">Succeeded with non-determistic result</span>
|
<span class="warn">Succeeded with non-determistic result</span>
|
||||||
@@ -149,7 +135,7 @@ END;
|
|||||||
[% IF build.dependents %]<li class="nav-item"><a class="nav-link" href="#tabs-usedby" data-toggle="tab">Used By</a></li>[% END%]
|
[% IF build.dependents %]<li class="nav-item"><a class="nav-link" href="#tabs-usedby" data-toggle="tab">Used By</a></li>[% END%]
|
||||||
[% IF drvAvailable %]<li class="nav-item"><a class="nav-link" href="#tabs-build-deps" data-toggle="tab">Build Dependencies</a></li>[% END %]
|
[% IF drvAvailable %]<li class="nav-item"><a class="nav-link" href="#tabs-build-deps" data-toggle="tab">Build Dependencies</a></li>[% END %]
|
||||||
[% IF localStore && available %]<li class="nav-item"><a class="nav-link" href="#tabs-runtime-deps" data-toggle="tab">Runtime Dependencies</a></li>[% END %]
|
[% IF localStore && available %]<li class="nav-item"><a class="nav-link" href="#tabs-runtime-deps" data-toggle="tab">Runtime Dependencies</a></li>[% END %]
|
||||||
[% IF runcommandlogs.size() > 0 %]<li class="nav-item"><a class="nav-link" href="#tabs-runcommandlogs" data-toggle="tab">RunCommand Logs</a></li>[% END %]
|
[% IF runcommandlogProblem || runcommandlogs.size() > 0 %]<li class="nav-item"><a class="nav-link" href="#tabs-runcommandlogs" data-toggle="tab">RunCommand Logs[% IF runcommandlogProblem %] <span class="badge badge-warning">Disabled</span>[% END %]</a></li>[% END %]
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<div id="generic-tabs" class="tab-content">
|
<div id="generic-tabs" class="tab-content">
|
||||||
@@ -481,14 +467,27 @@ END;
|
|||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
[% IF drvAvailable %]
|
[% IF drvAvailable %]
|
||||||
[% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') %]
|
[% INCLUDE makeLazyTab tabName="tabs-build-deps" uri=c.uri_for('/build' build.id 'build-deps') callback="makeTreeCollapsible" %]
|
||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
[% IF available %]
|
[% IF available %]
|
||||||
[% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') %]
|
[% INCLUDE makeLazyTab tabName="tabs-runtime-deps" uri=c.uri_for('/build' build.id 'runtime-deps') callback="makeTreeCollapsible" %]
|
||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
<div id="tabs-runcommandlogs" class="tab-pane">
|
<div id="tabs-runcommandlogs" class="tab-pane">
|
||||||
|
[% IF runcommandlogProblem %]
|
||||||
|
<div class="alert alert-warning" role="alert">
|
||||||
|
[% IF runcommandlogProblem == "disabled-server" %]
|
||||||
|
This server does not enable Dynamic RunCommand support.
|
||||||
|
[% ELSIF runcommandlogProblem == "disabled-project" %]
|
||||||
|
This project does not enable Dynamic RunCommand support.
|
||||||
|
[% ELSIF runcommandlogProblem == "disabled-jobset" %]
|
||||||
|
This jobset does not enable Dynamic RunCommand support.
|
||||||
|
[% ELSE %]
|
||||||
|
Dynamic RunCommand is not enabled: [% runcommandlogProblem %].
|
||||||
|
[% END %]
|
||||||
|
</div>
|
||||||
|
[% END %]
|
||||||
<div class="d-flex flex-column">
|
<div class="d-flex flex-column">
|
||||||
[% FOREACH runcommandlog IN runcommandlogs %]
|
[% FOREACH runcommandlog IN runcommandlogs %]
|
||||||
<div class="p-2 border-bottom">
|
<div class="p-2 border-bottom">
|
||||||
@@ -564,7 +563,7 @@ END;
|
|||||||
|
|
||||||
[% IF eval.flake %]
|
[% IF eval.flake %]
|
||||||
|
|
||||||
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
|
<p>If you have <a href='https://nixos.org/download/'>Nix
|
||||||
installed</a>, you can reproduce this build on your own machine by
|
installed</a>, you can reproduce this build on your own machine by
|
||||||
running the following command:</p>
|
running the following command:</p>
|
||||||
|
|
||||||
@@ -574,7 +573,7 @@ END;
|
|||||||
|
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
|
|
||||||
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
|
<p>If you have <a href='https://nixos.org/download/'>Nix
|
||||||
installed</a>, you can reproduce this build on your own machine by
|
installed</a>, you can reproduce this build on your own machine by
|
||||||
downloading <a [% HTML.attributes(href => url) %]>a script</a>
|
downloading <a [% HTML.attributes(href => url) %]>a script</a>
|
||||||
that checks out all inputs of the build and then invokes Nix to
|
that checks out all inputs of the build and then invokes Nix to
|
||||||
|
@@ -91,6 +91,17 @@ BLOCK renderDuration;
|
|||||||
duration % 60 %]s[%
|
duration % 60 %]s[%
|
||||||
END;
|
END;
|
||||||
|
|
||||||
|
BLOCK renderDrvInfo;
|
||||||
|
drvname = step.drvpath
|
||||||
|
.substr(11) # strip `/nix/store/`
|
||||||
|
.split('-').slice(1).join("-") # strip hash part
|
||||||
|
.substr(0, -4); # strip `.drv`
|
||||||
|
IF drvname != releasename;
|
||||||
|
IF step.type == 0; action = "Build"; ELSE; action = "Substitution"; END;
|
||||||
|
IF drvname; %]<em> ([% action %] of [% drvname %])</em>[% END;
|
||||||
|
END;
|
||||||
|
END;
|
||||||
|
|
||||||
|
|
||||||
BLOCK renderBuildListHeader %]
|
BLOCK renderBuildListHeader %]
|
||||||
<table class="table table-striped table-condensed clickable-rows">
|
<table class="table table-striped table-condensed clickable-rows">
|
||||||
@@ -131,7 +142,12 @@ BLOCK renderBuildListBody;
|
|||||||
[% END %]
|
[% END %]
|
||||||
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
|
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
|
||||||
[% IF !hideJobName %]
|
[% IF !hideJobName %]
|
||||||
<td><a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</td>
|
<td>
|
||||||
|
<a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</a>
|
||||||
|
[% IF showStepName %]
|
||||||
|
[% INCLUDE renderDrvInfo step=build.buildsteps releasename=build.nixname %]
|
||||||
|
[% END %]
|
||||||
|
</td>
|
||||||
[% END %]
|
[% END %]
|
||||||
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
|
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
|
||||||
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
|
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
|
||||||
@@ -245,6 +261,27 @@ BLOCK renderBuildStatusIcon;
|
|||||||
END;
|
END;
|
||||||
|
|
||||||
|
|
||||||
|
BLOCK renderBusyStatus;
|
||||||
|
IF step.busy == 1 %]
|
||||||
|
<strong>Preparing</strong>
|
||||||
|
[% ELSIF step.busy == 10 %]
|
||||||
|
<strong>Connecting</strong>
|
||||||
|
[% ELSIF step.busy == 20 %]
|
||||||
|
<strong>Sending inputs</strong>
|
||||||
|
[% ELSIF step.busy == 30 %]
|
||||||
|
<strong>Building</strong>
|
||||||
|
[% ELSIF step.busy == 35 %]
|
||||||
|
<strong>Waiting to receive outputs</strong>
|
||||||
|
[% ELSIF step.busy == 40 %]
|
||||||
|
<strong>Receiving outputs</strong>
|
||||||
|
[% ELSIF step.busy == 50 %]
|
||||||
|
<strong>Post-processing</strong>
|
||||||
|
[% ELSE %]
|
||||||
|
<strong>Unknown state</strong>
|
||||||
|
[% END;
|
||||||
|
END;
|
||||||
|
|
||||||
|
|
||||||
BLOCK renderStatus;
|
BLOCK renderStatus;
|
||||||
IF build.finished;
|
IF build.finished;
|
||||||
buildstatus = build.buildstatus;
|
buildstatus = build.buildstatus;
|
||||||
@@ -374,7 +411,7 @@ BLOCK renderInputDiff; %]
|
|||||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||||
[% IF bi1.type == "git" %]
|
[% IF bi1.type == "git" %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
|
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
@@ -476,7 +513,7 @@ BLOCK renderEvals %]
|
|||||||
ELSE %]
|
ELSE %]
|
||||||
-
|
-
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF eval.evaluationerror.errormsg %]
|
[% IF eval.evaluationerror.has_error %]
|
||||||
<span class="badge badge-warning">Eval Errors</span>
|
<span class="badge badge-warning">Eval Errors</span>
|
||||||
[% END %]
|
[% END %]
|
||||||
</td>
|
</td>
|
||||||
@@ -520,7 +557,11 @@ BLOCK makeLazyTab %]
|
|||||||
<center><span class="spinner-border spinner-border-sm"/></center>
|
<center><span class="spinner-border spinner-border-sm"/></center>
|
||||||
</div>
|
</div>
|
||||||
<script>
|
<script>
|
||||||
$(function() { makeLazyTab("[% tabName %]", "[% uri %]"); });
|
[% IF callback.defined %]
|
||||||
|
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", [% callback %] ); });
|
||||||
|
[% ELSE %]
|
||||||
|
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", null ); });
|
||||||
|
[% END %]
|
||||||
</script>
|
</script>
|
||||||
[% END;
|
[% END;
|
||||||
|
|
||||||
@@ -598,7 +639,7 @@ BLOCK renderJobsetOverview %]
|
|||||||
<td>[% HTML.escape(j.description) %]</td>
|
<td>[% HTML.escape(j.description) %]</td>
|
||||||
<td>[% IF j.lastcheckedtime;
|
<td>[% IF j.lastcheckedtime;
|
||||||
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
|
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
|
||||||
IF j.errormsg || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
IF j.has_error || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
||||||
ELSE; "-";
|
ELSE; "-";
|
||||||
END %]</td>
|
END %]</td>
|
||||||
[% IF j.get_column('nrtotal') > 0 %]
|
[% IF j.get_column('nrtotal') > 0 %]
|
||||||
|
@@ -19,9 +19,16 @@
|
|||||||
<tt>[% node.name %]</tt> (<em>no info</em>)
|
<tt>[% node.name %]</tt> (<em>no info</em>)
|
||||||
[% END %]
|
[% END %]
|
||||||
</span></span>
|
</span></span>
|
||||||
|
[% IF isRoot %]
|
||||||
|
<span class="dep-tree-buttons">
|
||||||
|
(<a href="#" class="tree-collapse-all">collapse all</a>
|
||||||
|
–
|
||||||
|
<a href="#" class="tree-expand-all">expand all</a>)
|
||||||
|
</span>
|
||||||
|
[% END %]
|
||||||
[% IF node.refs.size > 0 %]
|
[% IF node.refs.size > 0 %]
|
||||||
<ul class="subtree">
|
<ul class="subtree">
|
||||||
[% FOREACH ref IN node.refs; INCLUDE renderNode node=ref; END %]
|
[% FOREACH ref IN node.refs; INCLUDE renderNode node=ref isRoot=0; END %]
|
||||||
</ul>
|
</ul>
|
||||||
[% END %]
|
[% END %]
|
||||||
[% END %]
|
[% END %]
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user