Merge branch 'master' into patch-1
This commit is contained in:
24
.editorconfig
Normal file
24
.editorconfig
Normal file
@@ -0,0 +1,24 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.{cc,hh,hpp,pl,pm,sh,t}]
|
||||
indent_style = space
|
||||
intend_size = 4
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[*.nix]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Match diffs, avoid to trim trailing whitespace
|
||||
[*.{diff,patch}]
|
||||
trim_trailing_whitespace = false
|
||||
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Hydra Server:**
|
||||
|
||||
Please fill out this data as well as you can, but don't worry if you can't -- just do your best.
|
||||
|
||||
- OS and version: [e.g. NixOS 22.05.20211203.ee3794c]
|
||||
- Version of Hydra
|
||||
- Version of Nix Hydra is built against
|
||||
- Version of the Nix daemon
|
||||
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
9
.github/workflows/test.yml
vendored
9
.github/workflows/test.yml
vendored
@@ -1,14 +1,17 @@
|
||||
name: "Test"
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: cachix/install-nix-action@v12
|
||||
- uses: cachix/install-nix-action@v31
|
||||
#- run: nix flake check
|
||||
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
||||
|
||||
28
.github/workflows/update-flakes.yml
vendored
Normal file
28
.github/workflows/update-flakes.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: "Update Flakes"
|
||||
on:
|
||||
schedule:
|
||||
# Run weekly on Monday at 00:00 UTC
|
||||
- cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
update-flakes:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- name: Update flake inputs
|
||||
run: nix flake update
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
commit-message: "flake.lock: Update"
|
||||
title: "Update flake inputs"
|
||||
body: |
|
||||
Automated flake input updates.
|
||||
|
||||
This PR was automatically created by the update-flakes workflow.
|
||||
branch: update-flakes
|
||||
delete-branch: true
|
||||
38
.gitignore
vendored
38
.gitignore
vendored
@@ -1,41 +1,9 @@
|
||||
*.o
|
||||
*~
|
||||
Makefile
|
||||
Makefile.in
|
||||
.deps
|
||||
.hydra-data
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/configure
|
||||
/depcomp
|
||||
/libtool
|
||||
/ltmain.sh
|
||||
/autom4te.cache
|
||||
/aclocal.m4
|
||||
/missing
|
||||
/install-sh
|
||||
.test_info.*
|
||||
/src/sql/hydra-postgresql.sql
|
||||
/src/sql/hydra-sqlite.sql
|
||||
/src/sql/tmp.sqlite
|
||||
/src/hydra-eval-jobs/hydra-eval-jobs
|
||||
/src/root/static/bootstrap
|
||||
/src/root/static/js/flot
|
||||
/doc/manual/images
|
||||
/doc/manual/manual.html
|
||||
/doc/manual/manual.pdf
|
||||
/tests/.bzr*
|
||||
/tests/.git*
|
||||
/tests/.hg*
|
||||
/tests/nix
|
||||
/inst
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
.hydra-data
|
||||
result
|
||||
tests/jobs/config.nix
|
||||
result-*
|
||||
outputs
|
||||
config
|
||||
stamp-h1
|
||||
src/hydra-evaluator/hydra-evaluator
|
||||
src/hydra-queue-runner/hydra-queue-runner
|
||||
|
||||
4
.perlcriticrc
Normal file
4
.perlcriticrc
Normal file
@@ -0,0 +1,4 @@
|
||||
theme = community
|
||||
|
||||
# 5 is the least complainy, 1 is the most complainy
|
||||
severity = 1
|
||||
@@ -1,8 +0,0 @@
|
||||
SUBDIRS = src tests doc
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
EXTRA_DIST = hydra-module.nix
|
||||
|
||||
install-data-local: hydra-module.nix
|
||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
||||
5
Procfile
5
Procfile
@@ -1,5 +1,6 @@
|
||||
hydra-server: ./foreman/start-hydra.sh
|
||||
hydra-queue-runner: ./foreman/start-queue-runner.sh
|
||||
hydra-evaluator: ./foreman/start-evaluator.sh
|
||||
hydra-queue-runner: ./foreman/start-queue-runner.sh
|
||||
hydra-notify: ./foreman/start-notify.sh
|
||||
hydra-server: ./foreman/start-hydra.sh
|
||||
manual: ./foreman/start-manual.sh
|
||||
postgres: ./foreman/start-postgres.sh
|
||||
|
||||
62
README.md
62
README.md
@@ -28,36 +28,36 @@ Once the Hydra service has been configured as above and activate you should alre
|
||||
```
|
||||
$ su - hydra
|
||||
$ hydra-create-user <USER> --full-name '<NAME>' \
|
||||
--email-address '<EMAIL>' --password <PASSWORD> --role admin
|
||||
--email-address '<EMAIL>' --password-prompt --role admin
|
||||
```
|
||||
|
||||
Afterwards you should be able to log by clicking on "_Sign In_" on the top right of the web interface using the credentials specified by `hydra-crate-user`. Once you are logged in you can click "_Admin -> Create Project_" to configure your first project.
|
||||
Afterwards you should be able to log by clicking on "_Sign In_" on the top right of the web interface using the credentials specified by `hydra-create-user`. Once you are logged in you can click "_Admin -> Create Project_" to configure your first project.
|
||||
|
||||
### Creating A Simple Project And Jobset
|
||||
In order to evaluate and build anything you need to crate _projects_ that contain _jobsets_. Hydra supports imperative and declarative projects and many different configurations. The steps below will guide you through the required steps to creating a minimal imperative project configuration.
|
||||
In order to evaluate and build anything you need to create _projects_ that contain _jobsets_. Hydra supports imperative and declarative projects and many different configurations. The steps below will guide you through the required steps to creating a minimal imperative project configuration.
|
||||
|
||||
#### Creating A Project
|
||||
Log in as adminstrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Display name**: `hello`
|
||||
- **Description**: `hello project`
|
||||
|
||||
Click "_Create project_".
|
||||
|
||||
#### Creating A Jobset
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Nix expression**: `examples/hello.nix` in `hydra`
|
||||
- **Check interval**: 60
|
||||
- **Scheduling shares**: 1
|
||||
|
||||
We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_ (which we are referrencing in the Nix expression above):
|
||||
We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_ (which we are referencing in the Nix expression above):
|
||||
|
||||
- **Input name**: `nixpkgs`
|
||||
- **Type**: `Git checkout`
|
||||
- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
|
||||
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
|
||||
|
||||
- **Input name**: `hydra`
|
||||
- **Type**: `Git checkout`
|
||||
@@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
|
||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||
|
||||
```
|
||||
$ nix-build
|
||||
$ nix build
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix-shell
|
||||
$ ./bootstrap
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
$ nix develop
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
### Executing Hydra During Development
|
||||
@@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ nix develop
|
||||
$ # hack hack
|
||||
$ make
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
@@ -106,6 +105,35 @@ conflicts with services that might be running on your host, hydra and postgress
|
||||
Note that this is only ever meant as an ad-hoc way of executing Hydra during development. Please make use of the
|
||||
NixOS module for actually running Hydra in production.
|
||||
|
||||
### Checking your patches
|
||||
|
||||
After making your changes, verify the test suite passes and perlcritic is still happy.
|
||||
|
||||
Start by following the steps in [Development Environment](#development-environment).
|
||||
|
||||
Then, you can run the tests and the perlcritic linter together with:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
$ ninja -C build test
|
||||
```
|
||||
|
||||
You can run a single test with:
|
||||
|
||||
```
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||
```
|
||||
|
||||
And you can run just perlcritic with:
|
||||
|
||||
```
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test perlcritic
|
||||
```
|
||||
|
||||
### JSON API
|
||||
|
||||
You can also interface with Hydra through a JSON API. The API is defined in [hydra-api.yaml](./hydra-api.yaml) and you can test and explore via the [swagger editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/NixOS/hydra/master/hydra-api.yaml)
|
||||
@@ -113,7 +141,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
||||
## Additional Resources
|
||||
|
||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
||||
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||
|
||||
|
||||
81
configure.ac
81
configure.ac
@@ -1,81 +0,0 @@
|
||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])])
|
||||
AC_CONFIG_AUX_DIR(config)
|
||||
AM_INIT_AUTOMAKE([foreign serial-tests])
|
||||
|
||||
AC_LANG([C++])
|
||||
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
CXXFLAGS+=" -std=c++17"
|
||||
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
||||
[path of the DocBook XSL stylesheets])],
|
||||
[docbookxsl="$withval"],
|
||||
[docbookxsl="/docbook-xsl-missing"])
|
||||
AC_SUBST([docbookxsl])
|
||||
|
||||
|
||||
AC_DEFUN([NEED_PROG],
|
||||
[
|
||||
AC_PATH_PROG($1, $2)
|
||||
if test -z "$$1"; then
|
||||
AC_MSG_ERROR([$2 is required])
|
||||
fi
|
||||
])
|
||||
|
||||
NEED_PROG(perl, perl)
|
||||
|
||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
||||
|
||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
||||
then
|
||||
# This may be executed from within a build chroot, so pacify
|
||||
# `nix-store' instead of letting it choke while trying to mkdir
|
||||
# /nix/var.
|
||||
NIX_STATE_DIR="$TMPDIR"
|
||||
export NIX_STATE_DIR
|
||||
fi
|
||||
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
||||
fi
|
||||
|
||||
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
||||
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
CXXFLAGS+=" -include nix/config.h"
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
doc/Makefile
|
||||
doc/manual/Makefile
|
||||
src/Makefile
|
||||
src/hydra-evaluator/Makefile
|
||||
src/hydra-eval-jobs/Makefile
|
||||
src/hydra-queue-runner/Makefile
|
||||
src/sql/Makefile
|
||||
src/ttf/Makefile
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
tests/Makefile
|
||||
tests/jobs/config.nix
|
||||
])
|
||||
|
||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||
|
||||
AC_CONFIG_HEADER([hydra-config.h])
|
||||
|
||||
AC_OUTPUT
|
||||
@@ -1,6 +1,6 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).defaultNix
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
SUBDIRS = manual
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
||||
129
doc/architecture.md
Normal file
129
doc/architecture.md
Normal file
@@ -0,0 +1,129 @@
|
||||
This is a rough overview from informal discussions and explanations of inner workings of Hydra.
|
||||
You can use it as a guide to navigate the codebase or ask questions.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
- Postgres database
|
||||
- configuration
|
||||
- build queue
|
||||
- what is already built
|
||||
- what is going to build
|
||||
- `hydra-server`
|
||||
- Perl, Catalyst
|
||||
- web frontend
|
||||
- `hydra-evaluator`
|
||||
- Perl, C++
|
||||
- fetches repositories
|
||||
- evaluates job sets
|
||||
- pointers to a repository
|
||||
- adds builds to the queue
|
||||
- `hydra-queue-runner`
|
||||
- C++
|
||||
- monitors the queue
|
||||
- executes build steps
|
||||
- uploads build results
|
||||
- copy to a Nix store
|
||||
- Nix store
|
||||
- contains `.drv`s
|
||||
- populated by `hydra-evaluator`
|
||||
- read by `hydra-queue-runner`
|
||||
- destination Nix store
|
||||
- can be a binary cache
|
||||
- e.g. `[cache.nixos.org](http://cache.nixos.org)` or the same store again (for small Hydra instances)
|
||||
- plugin architecture
|
||||
- extend evaluator for new kinds of repositories
|
||||
- e.g. fetch from `git`
|
||||
|
||||
### Database Schema
|
||||
|
||||
[https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql](https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql)
|
||||
|
||||
- `Jobsets`
|
||||
- populated by calling Nix evaluator
|
||||
- every Nix derivation in `release.nix` is a Job
|
||||
- `flake`
|
||||
- URL to flake, if job is from a flake
|
||||
- single-point of configuration for flake builds
|
||||
- flake itself contains pointers to dependencies
|
||||
- for other builds we need more configuration data
|
||||
- `JobsetInputs`
|
||||
- more configuration for a Job
|
||||
- `JobsetInputAlts`
|
||||
- historical, where you could have more than one alternative for each input
|
||||
- it would have done the cross product of all possibilities
|
||||
- not used any more, as now every input is unique
|
||||
- originally that was to have alternative values for the system parameter
|
||||
- `x86-linux`, `x86_64-darwin`
|
||||
- turned out not to be a good idea, as job set names did not uniquely identify output
|
||||
- `Builds`
|
||||
- queue: scheduled and finished builds
|
||||
- instance of a Job
|
||||
- corresponds to a top-level derivation
|
||||
- can have many dependencies that don’t have a corresponding build
|
||||
- dependencies represented as `BuildSteps`
|
||||
- a Job is all the builds with a particular name, e.g.
|
||||
- `git.x86_64-linux` is a job
|
||||
- there maybe be multiple builds for that job
|
||||
- build ID: just an auto-increment number
|
||||
- building one thing can actually cause many (hundreds of) derivations to be built
|
||||
- for queued builds, the `drv` has to be present in the store
|
||||
- otherwise build will fail, e.g. after garbage collection
|
||||
- `BuildSteps`
|
||||
- corresponds to a derivation or substitution
|
||||
- are reused through the Nix store
|
||||
- may be duplicated for unique derivations due to how they relate to `Jobs`
|
||||
- `BuildStepOutputs`
|
||||
- corresponds directly to derivation outputs
|
||||
- `out`, `dev`, ...
|
||||
- `BuildProducts`
|
||||
- not a Nix concept
|
||||
- populated from a special file `$out/nix-support/hydra-build-producs`
|
||||
- used to scrape parts of build results out to the web frontend
|
||||
- e.g. manuals, ISO images, etc.
|
||||
- `BuildMetrics`
|
||||
- scrapes data from magic location, similar to `BuildProducts` to show fancy graphs
|
||||
- e.g. test coverage, build times, CPU utilization for build
|
||||
- `$out/nix-support/hydra-metrics`
|
||||
- `BuildInputs`
|
||||
- probably obsolute
|
||||
- `JobsetEvalMembers`
|
||||
- joins evaluations with jobs
|
||||
- huge table, 10k’s of entries for one `nixpkgs` evaluation
|
||||
- can be imagined as a subset of the eval cache
|
||||
- could in principle use the eval cache
|
||||
|
||||
### `release.nix`
|
||||
|
||||
- hydra-specific convention to describe the build
|
||||
- should evaluate to an attribute set that contains derivations
|
||||
- hydra considers every attribute in that set a job
|
||||
- every job needs a unique name
|
||||
- if you want to build for multiple platforms, you need to reflect that in the name
|
||||
- hydra does a deep traversal of the attribute set
|
||||
- just evaluating the names may take half an hour
|
||||
|
||||
## FAQ
|
||||
|
||||
Can we imagine Hydra to be a persistence layer for the build graph?
|
||||
|
||||
- partially, it lacks a lot of information
|
||||
- does not keep edges of the build graph
|
||||
|
||||
How does Hydra relate to `nix build`?
|
||||
|
||||
- reimplements the top level Nix build loop, scheduling, etc.
|
||||
- Hydra has to persist build results
|
||||
- Hydra has more sophisticated remote build execution and scheduling than Nix
|
||||
|
||||
Is it conceptually possible to unify Hydra’s capabilities with regular Nix?
|
||||
|
||||
- Nix does not have any scheduling, it just traverses the build graph
|
||||
- Hydra has scheduling in terms of job set priorities, tracks how much of a job set it has worked on
|
||||
- makes sure jobs don’t starve each other
|
||||
- Nix cannot dynamically add build jobs at runtime
|
||||
- [RFC 92](https://github.com/NixOS/rfcs/blob/master/rfcs/0092-plan-dynamism.md) should enable that
|
||||
- internally it is already possible, but there is no interface to do that
|
||||
- Hydra queue runner is a long running process
|
||||
- Nix takes a static set of jobs, working it off at once
|
||||
@@ -13,7 +13,7 @@
|
||||
* Creating a user:
|
||||
|
||||
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
|
||||
--password-hash "$(echo -n foobar | sha1sum | cut -c1-40)"
|
||||
--password-prompt
|
||||
|
||||
(Replace "foobar" with the desired password.)
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
DOCBOOK_FILES = installation.xml introduction.xml manual.xml projects.xml hacking.xml
|
||||
|
||||
EXTRA_DIST = $(DOCBOOK_FILES)
|
||||
|
||||
xsltproc_opts = \
|
||||
--param callout.graphics.extension \'.gif\' \
|
||||
--param section.autolabel 1 \
|
||||
--param section.label.includes.component.label 1
|
||||
|
||||
|
||||
# Include the manual in the tarball.
|
||||
dist_html_DATA = manual.html
|
||||
|
||||
# Embed Docbook's callout images in the distribution.
|
||||
EXTRA_DIST += images
|
||||
|
||||
manual.html: $(DOCBOOK_FILES)
|
||||
$(XSLTPROC) $(xsltproc_opts) --nonet --xinclude \
|
||||
--output manual.html \
|
||||
$(docbookxsl)/xhtml/docbook.xsl manual.xml
|
||||
|
||||
images:
|
||||
$(MKDIR_P) images/callouts
|
||||
cp $(docbookxsl)/images/callouts/*.gif images/callouts
|
||||
chmod +wx images images/callouts
|
||||
|
||||
install-data-hook: images
|
||||
$(INSTALL) -d $(DESTDIR)$(htmldir)/images/callouts
|
||||
$(INSTALL_DATA) images/callouts/* $(DESTDIR)$(htmldir)/images/callouts
|
||||
ln -sfn manual.html $(DESTDIR)$(htmldir)/index.html
|
||||
|
||||
distclean-hook:
|
||||
-rm -rf images
|
||||
@@ -1,334 +0,0 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="chap-api">
|
||||
|
||||
<title>Using the external API</title>
|
||||
|
||||
<para>
|
||||
To be able to create integrations with other services, Hydra exposes
|
||||
an external API that you can manage projects with.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The API is accessed over HTTP(s) where all data is sent and received
|
||||
as JSON.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Creating resources requires the caller to be authenticated, while
|
||||
retrieving resources does not.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The API does not have a separate URL structure for it's endpoints.
|
||||
Instead you request the pages of the web interface as
|
||||
<literal>application/json</literal> to use the API.
|
||||
</para>
|
||||
|
||||
<section>
|
||||
<title>List projects</title>
|
||||
|
||||
<para>
|
||||
To list all the <literal>projects</literal> of the Hydra install:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET /
|
||||
Accept: application/json
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
This will give you a list of <literal>projects</literal>, where each
|
||||
<literal>project</literal> contains general information and a list
|
||||
of its <literal>job sets</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Example</emphasis>
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Note:</emphasis> this response is truncated
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET https://hydra.nixos.org/
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"displayname": "Acoda",
|
||||
"name": "acoda",
|
||||
"description": "Acoda is a tool set for automatic data migration along an evolving data model",
|
||||
"enabled": 0,
|
||||
"owner": "sander",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"trunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"displayname": "cabal2nix",
|
||||
"name": "cabal2nix",
|
||||
"description": "Convert Cabal files into Nix build instructions",
|
||||
"enabled": 0,
|
||||
"owner": "simons@cryp.to",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"master"
|
||||
]
|
||||
}
|
||||
]
|
||||
</programlisting>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Get a single project</title>
|
||||
|
||||
<para>
|
||||
To get a single <literal>project</literal> by identifier:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET /project/:project-identifier
|
||||
Accept: application/json
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Example</emphasis>
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/project/hydra
|
||||
</programlisting>
|
||||
|
||||
<programlisting>
|
||||
GET https://hydra.nixos.org/project/hydra
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"description": "Hydra, the Nix-based continuous build system",
|
||||
"hidden": 0,
|
||||
"displayname": "Hydra",
|
||||
"jobsets": [
|
||||
"hydra-master",
|
||||
"hydra-ant-logger-trunk",
|
||||
"master",
|
||||
"build-ng"
|
||||
],
|
||||
"name": "hydra",
|
||||
"enabled": 1,
|
||||
"owner": "eelco"
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Get a single job set</title>
|
||||
|
||||
<para>
|
||||
To get a single <literal>job set</literal> by identifier:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET /jobset/:project-identifier/:jobset-identifier
|
||||
Content-Type: application/json
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Example</emphasis>
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
</programlisting>
|
||||
|
||||
<programlisting>
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"errormsg": "evaluation failed due to signal 9 (Killed)",
|
||||
"fetcherrormsg": null,
|
||||
"nixexprpath": "release.nix",
|
||||
"nixexprinput": "hydraSrc",
|
||||
"emailoverride": "rob.vermaas@gmail.com, eelco.dolstra@logicblox.com",
|
||||
"jobsetinputs": {
|
||||
"officialRelease": {
|
||||
"jobsetinputalts": [
|
||||
"false"
|
||||
]
|
||||
},
|
||||
"hydraSrc": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/hydra.git build-ng"
|
||||
]
|
||||
},
|
||||
"nixpkgs": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/nixpkgs.git release-14.12"
|
||||
]
|
||||
}
|
||||
},
|
||||
"enabled": 0
|
||||
}
|
||||
</programlisting>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>List evaluations</title>
|
||||
|
||||
<para>
|
||||
To list the <literal>evaluations</literal> of a
|
||||
<literal>job set</literal> by identifier:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET /jobset/:project-identifier/:jobset-identifier/evals
|
||||
Content-Type: application/json
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Example</emphasis>
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Note:</emphasis> this response is truncated
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"evals": [
|
||||
{
|
||||
"jobsetevalinputs": {
|
||||
"nixpkgs": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/nixpkgs.git",
|
||||
"revision": "f60e48ce81b6f428d072d3c148f6f2e59f1dfd7a"
|
||||
},
|
||||
"hydraSrc": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/hydra.git",
|
||||
"revision": "48d6f0de2ab94f728d287b9c9670c4d237e7c0f6"
|
||||
},
|
||||
"officialRelease": {
|
||||
"dependency": null,
|
||||
"value": "false",
|
||||
"type": "boolean",
|
||||
"uri": null,
|
||||
"revision": null
|
||||
}
|
||||
},
|
||||
"hasnewbuilds": 1,
|
||||
"builds": [
|
||||
24670686,
|
||||
24670684,
|
||||
24670685,
|
||||
24670687
|
||||
],
|
||||
"id": 1213758
|
||||
}
|
||||
],
|
||||
"first": "?page=1",
|
||||
"last": "?page=1"
|
||||
}
|
||||
</programlisting>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Get a single build</title>
|
||||
|
||||
<para>
|
||||
To get a single <literal>build</literal> by its id:
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
GET /build/:build-id
|
||||
Content-Type: application/json
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
<emphasis role="strong">Example</emphasis>
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/build/24670686
|
||||
</programlisting>
|
||||
|
||||
<programlisting>
|
||||
GET /build/24670686
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"job": "tests.api.x86_64-linux",
|
||||
"jobsetevals": [
|
||||
1213758
|
||||
],
|
||||
"buildstatus": 0,
|
||||
"buildmetrics": null,
|
||||
"project": "hydra",
|
||||
"system": "x86_64-linux",
|
||||
"priority": 100,
|
||||
"releasename": null,
|
||||
"starttime": 1439402853,
|
||||
"nixname": "vm-test-run-unnamed",
|
||||
"timestamp": 1439388618,
|
||||
"id": 24670686,
|
||||
"stoptime": 1439403403,
|
||||
"jobset": "build-ng",
|
||||
"buildoutputs": {
|
||||
"out": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed"
|
||||
}
|
||||
},
|
||||
"buildproducts": {
|
||||
"1": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed",
|
||||
"defaultpath": "log.html",
|
||||
"type": "report",
|
||||
"sha256hash": null,
|
||||
"filesize": null,
|
||||
"name": "",
|
||||
"subtype": "testlog"
|
||||
}
|
||||
},
|
||||
"finished": 1
|
||||
}
|
||||
</programlisting>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
@@ -1,196 +0,0 @@
|
||||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="sec-declarative-projects">
|
||||
|
||||
<title>Declarative projects</title>
|
||||
|
||||
<para>
|
||||
Hydra supports declaratively configuring a project's jobsets. This
|
||||
configuration can be done statically, or generated by a build job.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
Hydra will treat the project's declarative input as a static definition
|
||||
if and only if the spec file contains a dictionary of dictionaries.
|
||||
If the value of any key in the spec is not a dictionary, it will
|
||||
treat the spec as a generated declarative spec.
|
||||
</para></note>
|
||||
|
||||
<section xml:id="sec-static-declarative-projects">
|
||||
|
||||
<title>Static, Declarative Projects</title>
|
||||
<para>
|
||||
Hydra supports declarative projects, where jobsets are configured
|
||||
from a static JSON document in a repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To configure a static declarative project, take the following steps:
|
||||
</para>
|
||||
<orderedlist numeration="arabic" spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Create a Hydra-fetchable source like a Git repository or local path.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
In that source, create a file called <filename>spec.json</filename>,
|
||||
and add the specification for all of the jobsets. Each key is jobset
|
||||
and each value is a jobset's specification. For example:
|
||||
|
||||
<programlisting language="json">
|
||||
{
|
||||
"nixpkgs": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "Nixpkgs",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "pkgs/top-level/release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"nixos": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "NixOS: Small Evaluation",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "nixos/release-small.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Create a new project, and set the project's declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>
|
||||
Hydra will create a special jobset named <literal>.jobsets</literal>.
|
||||
When the <literal>.jobsets</literal> jobset is evaluated, this static
|
||||
specification will be used for configuring the rest of the project's
|
||||
jobsets.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sec-generated-declarative-projects">
|
||||
|
||||
<title>Generated, Declarative Projects</title>
|
||||
<para>
|
||||
Hydra also supports generated declarative projects, where jobsets are
|
||||
configured automatically from specification files instead of being
|
||||
managed through the UI. A jobset specification is a JSON object
|
||||
containing the configuration of the jobset, for example:
|
||||
</para>
|
||||
<programlisting language="json">
|
||||
{
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "js",
|
||||
"nixexprinput": "src",
|
||||
"nixexprpath": "release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false },
|
||||
"nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false }
|
||||
}
|
||||
}
|
||||
</programlisting>
|
||||
<para>
|
||||
To configure a declarative project, take the following steps:
|
||||
</para>
|
||||
<orderedlist numeration="arabic" spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Create a jobset repository in the normal way (e.g. a git repo with
|
||||
a <literal>release.nix</literal> file, any other needed helper
|
||||
files, and taking any kind of hydra input), but without adding it
|
||||
to the UI. The nix expression of this repository should contain a
|
||||
single job, named <literal>jobsets</literal>. The output of the
|
||||
<literal>jobsets</literal> job should be a JSON file containing an
|
||||
object of jobset specifications. Each member of the object will
|
||||
become a jobset of the project, configured by the corresponding
|
||||
jobset specification.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
In some hydra-fetchable source (potentially, but not necessarily,
|
||||
the same repo you created in step 1), create a JSON file
|
||||
containing a jobset specification that points to the jobset
|
||||
repository you created in the first step, specifying any needed
|
||||
inputs (e.g. nixpkgs) as necessary.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
In the project creation/edit page, set declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>
|
||||
Hydra will create a special jobset named <literal>.jobsets</literal>,
|
||||
which whenever evaluated will go through the steps above in reverse
|
||||
order:
|
||||
</para>
|
||||
<orderedlist numeration="arabic" spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Hydra will fetch the input specified by the declarative input type
|
||||
and value.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Hydra will use the configuration given in the declarative spec
|
||||
file as the jobset configuration for this evaluation. In addition
|
||||
to any inputs specified in the spec file, hydra will also pass the
|
||||
<literal>declInput</literal> argument corresponding to the input
|
||||
fetched in step 1.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
As normal, hydra will build the jobs specified in the jobset
|
||||
repository, which in this case is the single
|
||||
<literal>jobsets</literal> job. When that job completes, hydra
|
||||
will read the created jobset specifications and create
|
||||
corresponding jobsets in the project, disabling any jobsets that
|
||||
used to exist but are not present in the current spec.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</section>
|
||||
</section>
|
||||
@@ -1,39 +0,0 @@
|
||||
<appendix xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-hacking">
|
||||
|
||||
<title>Hacking</title>
|
||||
|
||||
<para>This section provides some notes on how to hack on Hydra. To
|
||||
get the latest version of Hydra from GitHub:
|
||||
<screen>
|
||||
$ git clone git://github.com/NixOS/hydra.git
|
||||
$ cd hydra
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>To build it and its dependencies:
|
||||
<screen>
|
||||
$ nix-build release.nix -A build.x86_64-linux
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>To build all dependencies and start a shell in which all
|
||||
environment variables (such as <envar>PERL5LIB</envar>) are set up so
|
||||
that those dependencies can be found:
|
||||
<screen>
|
||||
$ nix-shell
|
||||
</screen>
|
||||
To build Hydra, you should then do:
|
||||
<screen>
|
||||
[nix-shell]$ ./bootstrap
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
</screen>
|
||||
You can run the Hydra web server in your source tree as follows:
|
||||
<screen>
|
||||
$ ./src/script/hydra-server
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
</appendix>
|
||||
@@ -1,338 +0,0 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-installation">
|
||||
|
||||
<title>Installation</title>
|
||||
|
||||
<para>
|
||||
This chapter explains how to install Hydra on your own build farm server.
|
||||
</para>
|
||||
|
||||
<section>
|
||||
<title>Prerequisites</title>
|
||||
<para>
|
||||
To install and use Hydra you need to have installed the following dependencies:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para>Nix</para></listitem>
|
||||
<listitem><para>PostgreSQL</para></listitem>
|
||||
<listitem><para>many Perl packages, notably Catalyst, EmailSender,
|
||||
and NixPerl (see the <link
|
||||
xlink:href="https://github.com/NixOS/hydra/blob/master/release.nix">Hydra
|
||||
expression in Nixpkgs</link> for the complete
|
||||
list)</para></listitem>
|
||||
</itemizedlist>
|
||||
|
||||
At the moment, Hydra runs only on GNU/Linux
|
||||
(<emphasis>i686-linux</emphasis> and
|
||||
<emphasis>x86_64_linux</emphasis>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For small projects, Hydra can be run on any reasonably modern
|
||||
machine. For individual projects you can even run Hydra on a
|
||||
laptop. However, the charm of a buildfarm server is usually that
|
||||
it operates without disturbing the developer's working
|
||||
environment and can serve releases over the internet. In
|
||||
conjunction you should typically have your source code
|
||||
administered in a version management system, such as
|
||||
subversion. Therefore, you will probably want to install a
|
||||
server that is connected to the internet. To scale up to large
|
||||
and/or many projects, you will need at least a considerable
|
||||
amount of diskspace to store builds. Since Hydra can schedule
|
||||
multiple simultaneous build jobs, it can be useful to have a
|
||||
multi-core machine, and/or attach multiple build machines in a
|
||||
network to the central Hydra server.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Of course we think it is a good idea to use the <link
|
||||
xlink:href="http://nixos.org/nixos">NixOS</link> GNU/Linux
|
||||
distribution for your buildfarm server. But this is not a
|
||||
requirement. The Nix software deployment system can be
|
||||
installed on any GNU/Linux distribution in parallel to the
|
||||
regular package management system. Thus, you can use Hydra on a
|
||||
Debian, Fedora, SuSE, or Ubuntu system.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Getting Nix</title>
|
||||
|
||||
<para>
|
||||
If your server runs NixOS you are all set to continue with
|
||||
installation of Hydra. Otherwise you first need to install Nix.
|
||||
The latest stable version can be found one <link
|
||||
xlink:href="http://nixos.org/nix/download.html">the Nix web
|
||||
site</link>, along with a manual, which includes installation
|
||||
instructions.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Installation</title>
|
||||
|
||||
<!--
|
||||
<para>
|
||||
Hydra can be installed using Nixpkgs:
|
||||
|
||||
<screen>
|
||||
nix-env -f /path/to/nixpkgs -iA hydra</screen>
|
||||
|
||||
This makes the tools available in your Nix user environment,
|
||||
<literal>$HOME/.nix-profile</literal> by default.
|
||||
</para>
|
||||
-->
|
||||
|
||||
<para>
|
||||
The latest development snapshot of Hydra can be installed
|
||||
by visiting the URL <link
|
||||
xlink:href="http://hydra.nixos.org/view/hydra/unstable"><literal>http://hydra.nixos.org/view/hydra/unstable</literal></link>
|
||||
and using the one-click install available at one of the build
|
||||
pages. You can also install Hydra through the channel by
|
||||
performing the following commands:
|
||||
|
||||
<screen>
|
||||
nix-channel --add http://hydra.nixos.org/jobset/hydra/master/channel/latest
|
||||
nix-channel --update
|
||||
nix-env -i hydra</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Command completion should reveal a number of command-line tools
|
||||
from Hydra, such as <command>hydra-queue-runner</command>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Creating the database</title>
|
||||
<para>
|
||||
Hydra stores its results in a PostgreSQL database.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To setup a PostgreSQL database with <emphasis>hydra</emphasis>
|
||||
as database name and user name, issue the following commands on
|
||||
the PostgreSQL server:
|
||||
|
||||
<screen>
|
||||
createuser -S -D -R -P hydra
|
||||
createdb -O hydra hydra</screen>
|
||||
|
||||
Note that <emphasis>$prefix</emphasis> is the location of Hydra
|
||||
in the nix store.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Hydra uses an environment variable to know which database should
|
||||
be used, and a variable which point to a location that holds
|
||||
some state. To set these variables for a PostgreSQL database,
|
||||
add the following to the file <filename>~/.profile</filename> of
|
||||
the user running the Hydra services.
|
||||
|
||||
<screen>
|
||||
export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;"
|
||||
export HYDRA_DATA=/var/lib/hydra</screen>
|
||||
|
||||
You can provide the username and password in the file
|
||||
<filename>~/.pgpass</filename>, e.g.
|
||||
|
||||
<screen>
|
||||
dbserver.example.org:*:hydra:hydra:password</screen>
|
||||
|
||||
Make sure that the <emphasis>HYDRA_DATA</emphasis> directory
|
||||
exists and is writable for the user which will run the Hydra
|
||||
services.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Having set these environment variables, you can now initialise
|
||||
the database by doing:
|
||||
<screen>
|
||||
hydra-init</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To create projects, you need to create a user with
|
||||
<emphasis>admin</emphasis> privileges. This can be done using
|
||||
the command <command>hydra-create-user</command>:
|
||||
|
||||
<screen>
|
||||
$ hydra-create-user alice --full-name 'Alice Q. User' \
|
||||
--email-address 'alice@example.org' --password foobar --role admin
|
||||
</screen>
|
||||
|
||||
Additional users can be created through the web interface.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Upgrading</title>
|
||||
|
||||
<para>If you're upgrading Hydra from a previous version, you
|
||||
should do the following to perform any necessary database schema migrations:
|
||||
<screen>
|
||||
hydra-init</screen>
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Getting Started</title>
|
||||
|
||||
<para>
|
||||
To start the Hydra web server, execute:
|
||||
<screen>
|
||||
hydra-server</screen>
|
||||
|
||||
When the server is started, you can browse to
|
||||
<ulink>http://localhost:3000/</ulink> to start configuring
|
||||
your Hydra instance.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <command>hydra-server</command> command launches the web
|
||||
server. There are two other processes that come into play:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
The <emphasis>evaluator</emphasis> is responsible for
|
||||
periodically evaluating job sets, checking out their
|
||||
dependencies off their version control systems (VCS), and
|
||||
queueing new builds if the result of the evaluation changed.
|
||||
It is launched by the <command>hydra-evaluator</command>
|
||||
command.
|
||||
</listitem>
|
||||
<listitem>
|
||||
The <emphasis>queue runner</emphasis> launches builds (using
|
||||
Nix) as they are queued by the evaluator, scheduling them
|
||||
onto the configured Nix hosts. It is launched using the
|
||||
<command>hydra-queue-runner</command> command.
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
All three processes must be running for Hydra to be fully
|
||||
functional, though it's possible to temporarily stop any one of
|
||||
them for maintenance purposes, for instance.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title> Serving behind reverse proxy </title>
|
||||
|
||||
<para>
|
||||
To serve hydra web server behind reverse proxy like
|
||||
<emphasis>nginx</emphasis> or <emphasis>httpd</emphasis> some
|
||||
additional configuration must be made.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Edit your <literal>hydra.conf</literal> file in a similar way to
|
||||
this example:
|
||||
|
||||
<screen>
|
||||
using_frontend_proxy 1
|
||||
base_uri example.com</screen>
|
||||
|
||||
<literal>base_uri</literal> should be your hydra servers proxied URL.
|
||||
|
||||
If you are using Hydra nixos module then setting <literal>hydraURL</literal>
|
||||
option should be enough.
|
||||
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
||||
If you want to serve Hydra with a prefix path, for example
|
||||
<ulink>http://example.com/hydra</ulink> then you need to configure your
|
||||
reverse proxy to pass <literal>X-Request-Base</literal> to hydra, with
|
||||
prefix path as value.
|
||||
|
||||
For example if you are using nginx, then use configuration similar to following:
|
||||
<screen>
|
||||
server {
|
||||
listen 433 ssl;
|
||||
server_name example.com;
|
||||
.. other configuration ..
|
||||
location /hydra/ {
|
||||
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Request-Base /hydra;
|
||||
}
|
||||
}</screen>
|
||||
|
||||
</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Using LDAP as authentication backend (optional)</title>
|
||||
<para>
|
||||
Instead of using Hydra's built-in user management you can optionally use LDAP to manage roles and users.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <command>hydra-server</command> accepts the environment
|
||||
variable <emphasis>HYDRA_LDAP_CONFIG</emphasis>. The value of
|
||||
the variable should point to a valid YAML file containing the
|
||||
Catalyst LDAP configuration. The format of the configuration
|
||||
file is describe in the
|
||||
<link xlink:href="https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS">
|
||||
<emphasis>Catalyst::Authentication::Store::LDAP</emphasis> documentation</link>.
|
||||
An example is given below.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Roles can be assigned to users based on their LDAP group membership
|
||||
(<emphasis>use_roles: 1</emphasis> in the below example).
|
||||
For a user to have the role <emphasis>admin</emphasis> assigned to them
|
||||
they should be in the group <emphasis>hydra_admin</emphasis>. In general
|
||||
any LDAP group of the form <emphasis>hydra_some_role</emphasis>
|
||||
(notice the <emphasis>hydra_</emphasis> prefix) will work.
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
credential:
|
||||
class: Password
|
||||
password_field: password
|
||||
password_type: self_check
|
||||
store:
|
||||
class: LDAP
|
||||
ldap_server: localhost
|
||||
ldap_server_options.timeout: 30
|
||||
binddn: "cn=root,dc=example"
|
||||
bindpw: notapassword
|
||||
start_tls: 0
|
||||
start_tls_options
|
||||
verify: none
|
||||
user_basedn: "ou=users,dc=example"
|
||||
user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))"
|
||||
user_scope: one
|
||||
user_field: cn
|
||||
user_search_options:
|
||||
deref: always
|
||||
use_roles: 1
|
||||
role_basedn: "ou=groups,dc=example"
|
||||
role_filter: "(&(objectClass=groupOfNames)(member=%s))"
|
||||
role_scope: one
|
||||
role_field: cn
|
||||
role_value: dn
|
||||
role_search_options:
|
||||
deref: always
|
||||
</screen>
|
||||
</section>
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
@@ -1,267 +0,0 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-introduction">
|
||||
|
||||
<title>Introduction</title>
|
||||
|
||||
<section>
|
||||
<title>About Hydra</title>
|
||||
|
||||
<para>
|
||||
Hydra is a tool for continuous integration testing and software
|
||||
release that uses a purely functional language to describe build jobs
|
||||
and their dependencies. Continuous integration is a simple technique
|
||||
to improve the quality of the software development process. An
|
||||
automated system continuously or periodically checks out the source
|
||||
code of a project, builds it, runs tests, and produces reports for the
|
||||
developers. Thus, various errors that might accidentally be committed
|
||||
into the code base are automatically caught. Such a system allows
|
||||
more in-depth testing than what developers could feasibly do manually:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem> <emphasis>Portability testing</emphasis>: The
|
||||
software may need to be built and tested on many different
|
||||
platforms. It is infeasible for each developer to do this
|
||||
before every commit.
|
||||
</listitem>
|
||||
|
||||
<listitem> Likewise, many projects have very large test sets
|
||||
(e.g., regression tests in a compiler, or stress tests in a
|
||||
DBMS) that can take hours or days to run to completion.
|
||||
</listitem>
|
||||
|
||||
<listitem> Many kinds of static and dynamic analyses can be
|
||||
performed as part of the tests, such as code coverage runs and
|
||||
static analyses.
|
||||
</listitem>
|
||||
|
||||
<listitem> It may also be necessary to build many different
|
||||
<emphasis>variants</emphasis> of the software. For instance,
|
||||
it may be necessary to verify that the component builds with
|
||||
various versions of a compiler.
|
||||
</listitem>
|
||||
|
||||
<listitem> Developers typically use incremental building to
|
||||
test their changes (since a full build may take too long), but
|
||||
this is unreliable with many build management tools (such as
|
||||
Make), i.e., the result of the incremental build might differ
|
||||
from a full build.
|
||||
</listitem>
|
||||
|
||||
<listitem> It ensures that the software can be built from the
|
||||
sources under revision control. Users of version management
|
||||
systems such as CVS and Subversion often forget to place
|
||||
source files under revision control.
|
||||
</listitem>
|
||||
|
||||
<listitem> The machines on which the continuous integration
|
||||
system runs ideally provides a clean, well-defined build
|
||||
environment. If this environment is administered through
|
||||
proper SCM techniques, then builds produced by the system can
|
||||
be reproduced. In contrast, developer work environments are
|
||||
typically not under any kind of SCM control.
|
||||
</listitem>
|
||||
|
||||
<listitem> In large projects, developers often work on a
|
||||
particular component of the project, and do not build and test
|
||||
the composition of those components (again since this is
|
||||
likely to take too long). To prevent the phenomenon of ``big
|
||||
bang integration'', where components are only tested together
|
||||
near the end of the development process, it is important to
|
||||
test components together as soon as possible (hence
|
||||
<emphasis>continuous integration</emphasis>).
|
||||
</listitem>
|
||||
|
||||
<listitem> It allows software to be
|
||||
<emphasis>released</emphasis> by automatically creating
|
||||
packages that users can download and install. To do this
|
||||
manually represents an often prohibitive amount of work, as
|
||||
one may want to produce releases for many different platforms:
|
||||
e.g., installers for Windows and Mac OS X, RPM or Debian
|
||||
packages for certain Linux distributions, and so on.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In its simplest form, a continuous integration tool sits in a
|
||||
loop building and releasing software components from a version
|
||||
management system. For each component, it performs the
|
||||
following tasks:
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem>It obtains the latest version of the component's
|
||||
source code from the version management system.
|
||||
</listitem>
|
||||
|
||||
<listitem> It runs the component's build process (which
|
||||
presumably includes the execution of the component's test
|
||||
set).
|
||||
</listitem>
|
||||
|
||||
<listitem> It presents the results of the build (such as error
|
||||
logs and releases) to the developers, e.g., by producing a web
|
||||
page.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
|
||||
Examples of continuous integration tools include Jenkins,
|
||||
CruiseControl Tinderbox, Sisyphus, Anthill and BuildBot. These
|
||||
tools have various limitations.
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem> They do not manage the <emphasis>build
|
||||
environment</emphasis>. The build environment consists of the
|
||||
dependencies necessary to perform a build action, e.g.,
|
||||
compilers, libraries, etc. Setting up the environment is
|
||||
typically done manually, and without proper SCM control (so it
|
||||
may be hard to reproduce a build at a later time). Manual
|
||||
management of the environment scales poorly in the number of
|
||||
configurations that must be supported. For instance, suppose
|
||||
that we want to build a component that requires a certain
|
||||
compiler X. We then have to go to each machine and install X.
|
||||
If we later need a newer version of X, the process must be
|
||||
repeated all over again. An ever worse problem occurs if
|
||||
there are conflicting, mutually exclusive versions of the
|
||||
dependencies. Thus, simply installing the latest version is
|
||||
not an option. Of course, we can install these components in
|
||||
different directories and manually pass the appropriate paths
|
||||
to the build processes of the various components. But this is
|
||||
a rather tiresome and error-prone process.
|
||||
</listitem>
|
||||
|
||||
<listitem> They do not easily support <emphasis>variability in software
|
||||
systems</emphasis>. A system may have a great deal of build-time
|
||||
variability: optional functionality, whether to build a debug or
|
||||
production version, different versions of dependencies, and so on.
|
||||
(For instance, the Linux kernel now has over 2,600 build-time
|
||||
configuration switches.) It is therefore important that a continuous
|
||||
integration tool can easily select and test different instances from
|
||||
the configuration space of the system to reveal problems, such as
|
||||
erroneous interactions between features. In a continuous integration
|
||||
setting, it is also useful to test different combinations of versions
|
||||
of subsystems, e.g., the head revision of a component against stable
|
||||
releases of its dependencies, and vice versa, as this can reveal
|
||||
various integration problems.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<emphasis>Hydra</emphasis>, is a continuous integration tool
|
||||
that solves these problems. It is built on top of the <link
|
||||
xlink:href="http://nixos.org/nix/">Nix package manager</link>,
|
||||
which has a purely functional language for describing package
|
||||
build actions and their dependencies. This allows the build
|
||||
environment for projects to be produced automatically and
|
||||
deterministically, and variability in components to be expressed
|
||||
naturally using functions; and as such is an ideal fit for a
|
||||
continuous build system.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>About Us</title>
|
||||
|
||||
<para>
|
||||
Hydra is the successor of the Nix Buildfarm, which was developed
|
||||
in tandem with the Nix software deployment system. Nix was
|
||||
originally developed at the Department of Information and
|
||||
Computing Sciences, Utrecht University by the TraCE project
|
||||
(2003-2008). The project was funded by the Software Engineering
|
||||
Research Program Jacquard to improve the support for variability
|
||||
in software systems. Funding for the development of Nix and
|
||||
Hydra is now provided by the NIRICT LaQuSo Build Farm project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>About this Manual</title>
|
||||
|
||||
<para>
|
||||
This manual tells you how to install the Hydra buildfarm
|
||||
software on your own server and how to operate that server using
|
||||
its web interface.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
<section>
|
||||
<title>License</title>
|
||||
|
||||
<para>
|
||||
Hydra is free software: you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License as
|
||||
published by the Free Software Foundation, either version 3 of
|
||||
the License, or (at your option) any later version.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Hydra is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
<link xlink:href="http://www.gnu.org/licenses/">GNU General
|
||||
Public License</link> for more details.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Hydra at <literal>nixos.org</literal></title>
|
||||
|
||||
<para>
|
||||
The <literal>nixos.org</literal> installation of Hydra runs at
|
||||
<link
|
||||
xlink:href="http://hydra.nixos.org/"><literal>http://hydra.nixos.org/</literal></link>.
|
||||
|
||||
That installation is used to build software components from the
|
||||
<link xlink:href="http://nixos.org">Nix</link>,
|
||||
<link xlink:href="http://nixos.org/nixos">NixOS</link>,
|
||||
<link xlink:href="http://www.gnu.org/">GNU</link>,
|
||||
<link xlink:href="http://strategoxt.org">Stratego/XT</link>,
|
||||
and related projects.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you are one of the developers on those projects, it is likely
|
||||
that you will be using the NixOS Hydra server in some way. If
|
||||
you need to administer automatic builds for your project, you
|
||||
should pull the right strings to get an account on the
|
||||
server. This manual will tell you how to set up new projects and
|
||||
build jobs within those projects and write a release.nix file to
|
||||
describe the build process of your project to Hydra. You can
|
||||
skip the next chapter.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If your project does not yet have automatic builds within the
|
||||
NixOS Hydra server, it may actually be eligible. We are in the
|
||||
process of setting up a large buildfarm that should be able to
|
||||
support open source and academic software projects. Get in
|
||||
touch.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Hydra on your own buildfarm</title>
|
||||
|
||||
<para>
|
||||
If you need to run your own Hydra installation, <xref
|
||||
linkend="chap-installation" /> explains how to download and
|
||||
install the system on your own server.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
@@ -1,70 +0,0 @@
|
||||
<book xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
|
||||
<info>
|
||||
|
||||
<title>Hydra User's Guide</title>
|
||||
|
||||
<subtitle>Draft</subtitle>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Eelco</firstname>
|
||||
<surname>Dolstra</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Rob</firstname>
|
||||
<surname>Vermaas</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Eelco</firstname>
|
||||
<surname>Visser</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Ludovic</firstname>
|
||||
<surname>Courtès</surname>
|
||||
</personname>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
</authorgroup>
|
||||
|
||||
|
||||
<copyright>
|
||||
<year>2009-2013</year>
|
||||
<holder>Eelco Dolstra</holder>
|
||||
</copyright>
|
||||
|
||||
<date>March 2010</date>
|
||||
|
||||
</info>
|
||||
|
||||
<xi:include href="introduction.xml" />
|
||||
<xi:include href="installation.xml" />
|
||||
<xi:include href="projects.xml" />
|
||||
<xi:include href="api.xml" />
|
||||
<xi:include href="hacking.xml" />
|
||||
|
||||
|
||||
</book>
|
||||
36
doc/manual/meson.build
Normal file
36
doc/manual/meson.build
Normal file
@@ -0,0 +1,36 @@
|
||||
srcs = files(
|
||||
'src/SUMMARY.md',
|
||||
'src/about.md',
|
||||
'src/api.md',
|
||||
'src/configuration.md',
|
||||
'src/hacking.md',
|
||||
'src/installation.md',
|
||||
'src/introduction.md',
|
||||
'src/jobs.md',
|
||||
'src/monitoring/README.md',
|
||||
'src/notifications.md',
|
||||
'src/plugins/README.md',
|
||||
'src/plugins/RunCommand.md',
|
||||
'src/plugins/declarative-projects.md',
|
||||
'src/projects.md',
|
||||
'src/webhooks.md',
|
||||
)
|
||||
|
||||
manual = custom_target(
|
||||
'manual',
|
||||
command: [
|
||||
mdbook,
|
||||
'build',
|
||||
'@SOURCE_ROOT@/doc/manual',
|
||||
'-d', meson.current_build_dir() / 'html'
|
||||
],
|
||||
depend_files: srcs,
|
||||
output: ['html'],
|
||||
build_by_default: true,
|
||||
)
|
||||
|
||||
install_subdir(
|
||||
manual.full_path(),
|
||||
install_dir: get_option('datadir') / 'doc/hydra',
|
||||
strip_directory: true,
|
||||
)
|
||||
@@ -1,496 +0,0 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
xml:id="chap-projects">
|
||||
|
||||
<title>Creating and Managing Projects</title>
|
||||
|
||||
<para>
|
||||
Once Hydra is installed and running, the next step is to add
|
||||
projects to the build farm. We follow the example of the <link
|
||||
xlink:href="http://nixos.org/patchelf.html">Patchelf
|
||||
project</link>, a software tool written in C and using the GNU
|
||||
Build System (GNU Autoconf and GNU Automake).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Log in to the web interface of your Hydra installation using the
|
||||
user name and password you inserted in the database (by default,
|
||||
Hydra's web server listens on <link
|
||||
xlink:href="http://localhost:3000/"><literal>localhost:3000</literal></link>).
|
||||
Then follow the "Create Project" link to create a new project.
|
||||
</para>
|
||||
|
||||
<section>
|
||||
<title>Project Information</title>
|
||||
|
||||
<para>
|
||||
A project definition consists of some general information and a
|
||||
set of job sets. The general information identifies a project,
|
||||
its owner, and current state of activity.
|
||||
|
||||
Here's what we fill in for the patchelf project:
|
||||
|
||||
<screen>
|
||||
Identifier: patchelf
|
||||
</screen>
|
||||
|
||||
The <emphasis>identifier</emphasis> is the identity of the
|
||||
project. It is used in URLs and in the names of build results.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The identifier should be a unique name (it is the primary
|
||||
database key for the project table in the database). If you try
|
||||
to create a project with an already existing identifier you'd
|
||||
get an error message from the database.
|
||||
|
||||
So try to create the project after entering just the general
|
||||
information to figure out if you have chosen a unique name.
|
||||
Job sets can be added once the project has been created.
|
||||
|
||||
<screen>
|
||||
Display name: Patchelf
|
||||
</screen>
|
||||
|
||||
The <emphasis>display name</emphasis> is used in menus.
|
||||
|
||||
<screen>
|
||||
Description: A tool for modifying ELF binaries
|
||||
</screen>
|
||||
|
||||
The <emphasis>description</emphasis> is used as short
|
||||
documentation of the nature of the project.
|
||||
|
||||
<screen>
|
||||
Owner: eelco
|
||||
</screen>
|
||||
|
||||
The <emphasis>owner</emphasis> of a project can create and edit
|
||||
job sets.
|
||||
|
||||
<screen>
|
||||
Enabled: Yes
|
||||
</screen>
|
||||
|
||||
Only if the project is <emphasis>enabled</emphasis> are builds
|
||||
performed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once created there should be an entry for the project in the
|
||||
sidebar. Go to the project page for the <link
|
||||
xlink:href="http://localhost:3000/project/patchelf">Patchelf</link>
|
||||
project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Job Sets</title>
|
||||
|
||||
<para>
|
||||
A project can consist of multiple <emphasis>job sets</emphasis>
|
||||
(hereafter <emphasis>jobsets</emphasis>), separate tasks that
|
||||
can be built separately, but may depend on each other (without
|
||||
cyclic dependencies, of course). Go to the <link
|
||||
xlink:href="http://localhost:3000/project/patchelf/edit">Edit</link>
|
||||
page of the Patchelf project and "Add a new jobset" by providing
|
||||
the following "Information":
|
||||
|
||||
<screen>
|
||||
Identifier: trunk
|
||||
Description: Trunk
|
||||
Nix expression: release.nix in input patchelfSrc
|
||||
</screen>
|
||||
|
||||
This states that in order to build the <literal>trunk</literal>
|
||||
jobset, the Nix expression in the file
|
||||
<filename>release.nix</filename>, which can be obtained from
|
||||
input <literal>patchelfSrc</literal>, should be
|
||||
evaluated. (We'll have a look at
|
||||
<filename>release.nix</filename> later.)
|
||||
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To realize a job we probably need a number of inputs, which can
|
||||
be declared in the table below. As many inputs as required can
|
||||
be added. For patchelf we declare the following inputs.
|
||||
|
||||
<screen>
|
||||
patchelfSrc
|
||||
'Git checkout' https://github.com/NixOS/patchelf
|
||||
|
||||
nixpkgs 'Git checkout' https://github.com/NixOS/nixpkgs
|
||||
|
||||
officialRelease Boolean false
|
||||
|
||||
system String value "i686-linux"
|
||||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Building Jobs</title>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Build Recipes</title>
|
||||
|
||||
<para>
|
||||
Build jobs and <emphasis>build recipes</emphasis> for a jobset are
|
||||
specified in a text file written in the <link
|
||||
xlink:href="http://nixos.org/nix/">Nix language</link>. The
|
||||
recipe is actually called a <emphasis>Nix expression</emphasis> in
|
||||
Nix parlance. By convention this file is often called
|
||||
<filename>release.nix</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>release.nix</filename> file is typically kept under
|
||||
version control, and the repository that contains it one of the
|
||||
build inputs of the corresponding–often called
|
||||
<literal>hydraConfig</literal> by convention. The repository for
|
||||
that file and the actual file name are specified on the web
|
||||
interface of Hydra under the <literal>Setup</literal> tab of the
|
||||
jobset's overview page, under the <literal>Nix
|
||||
expression</literal> heading. See, for example, the <link
|
||||
xlink:href="http://hydra.nixos.org/jobset/patchelf/trunk">jobset
|
||||
overview page</link> of the PatchELF project, and <link
|
||||
xlink:href="https://github.com/NixOS/patchelf/blob/master/release.nix">
|
||||
the corresponding Nix file</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Knowledge of the Nix language is recommended, but the example
|
||||
below should already give a good idea of how it works:
|
||||
</para>
|
||||
|
||||
<example xml:id='ex-hello'>
|
||||
<title><filename>release.nix</filename> file for GNU Hello</title>
|
||||
<programlisting>
|
||||
let
|
||||
pkgs = import <nixpkgs> {}; <co xml:id='ex-hello-co-import-nixpkgs' />
|
||||
|
||||
jobs = rec { <co xml:id='ex-hello-co-jobs' />
|
||||
|
||||
tarball = <co xml:id='ex-hello-co-tarball' />
|
||||
pkgs.releaseTools.sourceTarball { <co xml:id='ex-hello-co-source-tarball' />
|
||||
name = "hello-tarball";
|
||||
src = <hello>; <co xml:id='ex-hello-co-tarball-args' />
|
||||
buildInputs = (with pkgs; [ gettext texLive texinfo ]);
|
||||
};
|
||||
|
||||
build = <co xml:id='ex-hello-co-build' />
|
||||
{ system ? builtins.currentSystem }: <co xml:id='ex-hello-co-build-args' />
|
||||
|
||||
let pkgs = import <nixpkgs> { inherit system; }; in
|
||||
pkgs.releaseTools.nixBuild { <co xml:id='ex-hello-co-nix-build' />
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
};
|
||||
in
|
||||
jobs <co xml:id='ex-hello-co-body' />
|
||||
</programlisting>
|
||||
</example>
|
||||
|
||||
<para>
|
||||
<xref linkend='ex-hello' /> shows what a
|
||||
<filename>release.nix</filename> file for <link
|
||||
xlink:href="http://www.gnu.org/software/hello/">GNU Hello</link>
|
||||
would look like. GNU Hello is representative of many GNU
|
||||
and non-GNU free software projects:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>it uses the GNU Build System, namely GNU Autoconf,
|
||||
and GNU Automake; for users, it means it can be installed
|
||||
using the <link
|
||||
xlink:href="http://www.gnu.org/prep/standards/html_node/Managing-Releases.html">usual
|
||||
<literal>./configure && make install</literal>
|
||||
procedure</link>;
|
||||
</listitem>
|
||||
<listitem>it uses Gettext for internationalization;</listitem>
|
||||
<listitem>it has a Texinfo manual, which can be rendered as PDF
|
||||
with TeX.</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
The file defines a jobset consisting of two jobs:
|
||||
<literal>tarball</literal>, and <literal>build</literal>. It
|
||||
contains the following elements (referenced from the figure by
|
||||
numbers):
|
||||
|
||||
<calloutlist>
|
||||
|
||||
<callout arearefs='ex-hello-co-import-nixpkgs'>
|
||||
<para>
|
||||
This defines a variable <varname>pkgs</varname> holding
|
||||
the set of packages provided by <link
|
||||
xlink:href="http://nixos.org/nixpkgs/">Nixpkgs</link>.
|
||||
</para>
|
||||
<para>
|
||||
Since <varname>nixpkgs</varname> appears in angle brackets,
|
||||
there must be a build input of that name in the Nix search
|
||||
path. In this case, the web interface should show a
|
||||
<varname>nixpkgs</varname> build input, which is a checkout
|
||||
of the Nixpkgs source code repository; Hydra then adds this
|
||||
and other build inputs to the Nix search path when
|
||||
evaluating <filename>release.nix</filename>.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-jobs'>
|
||||
<para>
|
||||
This defines a variable holding the two Hydra
|
||||
jobs–an <emphasis>attribute set</emphasis> in Nix.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-tarball'>
|
||||
<para>
|
||||
This is the definition of the first job, named
|
||||
<varname>tarball</varname>. The purpose of this job is to
|
||||
produce a usable source code tarball.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-source-tarball'>
|
||||
<para>
|
||||
The <varname>tarball</varname> job calls the
|
||||
<varname>sourceTarball</varname> function, which (roughly)
|
||||
runs <command>autoreconf && ./configure &&
|
||||
make dist</command> on the checkout. The
|
||||
<varname>buildInputs</varname> attribute specifies
|
||||
additional software dependencies for the
|
||||
job<footnote><para>The package names used in
|
||||
<varname>buildInputs</varname>–e.g.,
|
||||
<varname>texLive</varname>–are the names of the
|
||||
<emphasis>attributes</emphasis> corresponding to these
|
||||
packages in Nixpkgs, specifically in the <link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix"><filename>all-packages.nix</filename></link>
|
||||
file. See the section entitled “Package Naming” in the
|
||||
Nixpkgs manual for more information.</para></footnote>.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-tarball-args'>
|
||||
<para>
|
||||
The <varname>tarball</varname> jobs expects a
|
||||
<varname>hello</varname> build input to be available in the
|
||||
Nix search path. Again, this input is passed by Hydra and
|
||||
is meant to be a checkout of GNU Hello's source code
|
||||
repository.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-build'>
|
||||
<para>
|
||||
This is the definition of the <varname>build</varname>
|
||||
job, whose purpose is to build Hello from the tarball
|
||||
produced above.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-build-args'>
|
||||
<para>
|
||||
The <varname>build</varname> function takes one
|
||||
parameter, <varname>system</varname>, which should be a string
|
||||
defining the Nix system type–e.g.,
|
||||
<literal>"x86_64-linux"</literal>. Additionally, it refers
|
||||
to <varname>jobs.tarball</varname>, seen above.
|
||||
</para>
|
||||
<para>
|
||||
Hydra inspects the formal argument list of the function
|
||||
(here, the <varname>system</varname> argument) and passes it
|
||||
the corresponding parameter specified as a build input on
|
||||
Hydra's web interface. Here, <varname>system</varname> is
|
||||
passed by Hydra when it calls <varname>build</varname>.
|
||||
Thus, it must be defined as a build input of type string in
|
||||
Hydra, which could take one of several values.
|
||||
</para>
|
||||
<para>
|
||||
The question mark after <literal>system</literal> defines
|
||||
the default value for this argument, and is only useful when
|
||||
debugging locally.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-nix-build'>
|
||||
<para>
|
||||
The <varname>build</varname> job calls the
|
||||
<varname>nixBuild</varname> function, which unpacks the
|
||||
tarball, then runs <command>./configure && make
|
||||
&& make check && make install</command>.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-body'>
|
||||
<para>
|
||||
Finally, the set of jobs is returned to Hydra, as a Nix
|
||||
attribute set.
|
||||
</para>
|
||||
</callout>
|
||||
</calloutlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Building from the Command Line</title>
|
||||
|
||||
<para>
|
||||
It is often useful to test a build recipe, for instance before
|
||||
it is actually used by Hydra, when testing changes, or when
|
||||
debugging a build issue. Since build recipes for Hydra jobsets
|
||||
are just plain Nix expressions, they can be evaluated using the
|
||||
standard Nix tools.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To evaluate the <varname>tarball</varname> jobset of <xref
|
||||
linkend='ex-hello' />, just run:
|
||||
|
||||
<screen>
|
||||
$ nix-build release.nix -A tarball
|
||||
</screen>
|
||||
|
||||
However, doing this with <xref linkend='ex-hello' /> as is will
|
||||
probably yield an error like this:
|
||||
|
||||
<screen>
|
||||
error: user-thrown exception: file `hello' was not found in the Nix search path (add it using $NIX_PATH or -I)
|
||||
</screen>
|
||||
|
||||
The error is self-explanatory. Assuming
|
||||
<filename>$HOME/src/hello</filename> points to a checkout of
|
||||
Hello, this can be fixed this way:
|
||||
|
||||
<screen>
|
||||
$ nix-build -I ~/src release.nix -A tarball
|
||||
</screen>
|
||||
|
||||
Similarly, the <varname>build</varname> jobset can be evaluated:
|
||||
|
||||
<screen>
|
||||
$ nix-build -I ~/src release.nix -A build
|
||||
</screen>
|
||||
|
||||
The <varname>build</varname> job reuses the result of the
|
||||
<varname>tarball</varname> job, rebuilding it only if it needs to.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Adding More Jobs</title>
|
||||
|
||||
<para>
|
||||
<xref linkend='ex-hello' /> illustrates how to write the most
|
||||
basic jobs, <varname>tarball</varname> and
|
||||
<varname>build</varname>. In practice, much more can be done by
|
||||
using features readily provided by Nixpkgs or by creating new jobs
|
||||
as customizations of existing jobs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For instance, test coverage report for projects compiled with GCC
|
||||
can be automatically generated using the
|
||||
<varname>coverageAnalysis</varname> function provided by Nixpkgs
|
||||
instead of <varname>nixBuild</varname>. Back to our GNU Hello
|
||||
example, we can define a <varname>coverage</varname> job that
|
||||
produces an HTML code coverage report directly readable from the
|
||||
corresponding Hydra build page:
|
||||
|
||||
<programlisting>
|
||||
coverage =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
pkgs.releaseTools.coverageAnalysis {
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
</programlisting>
|
||||
|
||||
As can be seen, the only difference compared to
|
||||
<varname>build</varname> is the use of
|
||||
<varname>coverageAnalysis</varname>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Nixpkgs provides many more build tools, including the ability to
|
||||
run build in virtual machines, which can themselves run another
|
||||
GNU/Linux distribution, which allows for the creation of packages
|
||||
for these distributions. Please see <link
|
||||
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/pkgs/build-support/release">the
|
||||
<filename>pkgs/build-support/release</filename> directory</link>
|
||||
of Nixpkgs for more. The NixOS manual also contains information
|
||||
about whole-system testing in virtual machine.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Now, assume we want to build Hello with an old version of GCC, and
|
||||
with different <command>configure</command> flags. A new
|
||||
<varname>build_exotic</varname> job can be written that simply
|
||||
<emphasis>overrides</emphasis> the relevant arguments passed to
|
||||
<varname>nixBuild</varname>:
|
||||
|
||||
<programlisting>
|
||||
build_exotic =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
build = jobs.build { inherit system; };
|
||||
in
|
||||
pkgs.lib.overrideDerivation build (attrs: {
|
||||
buildInputs = [ pkgs.gcc33 ];
|
||||
preConfigure = "gcc --version";
|
||||
configureFlags =
|
||||
attrs.configureFlags ++ [ "--disable-nls" ];
|
||||
});
|
||||
</programlisting>
|
||||
|
||||
The <varname>build_exotic</varname> job reuses
|
||||
<varname>build</varname> and overrides some of its arguments: it
|
||||
adds a dependency on GCC 3.3, a pre-configure phase that runs
|
||||
<command>gcc --version</command>, and adds the
|
||||
<literal>--disable-nls</literal> configure flags.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This customization mechanism is very powerful. For instance, it
|
||||
can be used to change the way Hello and <emphasis>all</emphasis>
|
||||
its dependencies–including the C library and compiler used to
|
||||
build it–are built. See the Nixpkgs manual for more.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<xi:include href="declarative-projects.xml" />
|
||||
|
||||
<section>
|
||||
<title>Email Notifications</title>
|
||||
<para>
|
||||
Hydra can send email notifications when the status of a build changes. This provides
|
||||
immediate feedback to maintainers or committers when a change causes build failures.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The simplest approach to enable Email Notifications is to use the ssmtp package, which
|
||||
simply hands off the emails to another SMTP server. For details on how to configure ssmtp,
|
||||
see the documentation for the <varname>networking.defaultMailServer</varname> option.
|
||||
To use ssmtp for the Hydra email notifications, add it to the path option of the Hydra services
|
||||
in your <filename>/etc/nixos/configuration.nix</filename> file:
|
||||
<programlisting>
|
||||
systemd.services.hydra-queue-runner.path = [ pkgs.ssmtp ];
|
||||
systemd.services.hydra-server.path = [ pkgs.ssmtp ];
|
||||
</programlisting>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
19
doc/manual/src/SUMMARY.md
Normal file
19
doc/manual/src/SUMMARY.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Hydra User's Guide
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
- [Installation](installation.md)
|
||||
- [Configuration](configuration.md)
|
||||
- [Creating and Managing Projects](projects.md)
|
||||
- [Hydra jobs](./jobs.md)
|
||||
- [Plugins](./plugins/README.md)
|
||||
- [Declarative Projects](./plugins/declarative-projects.md)
|
||||
- [RunCommand](./plugins/RunCommand.md)
|
||||
- [Using the external API](api.md)
|
||||
- [Webhooks](webhooks.md)
|
||||
- [Monitoring Hydra](./monitoring/README.md)
|
||||
|
||||
## Developer's Guide
|
||||
- [Hacking](hacking.md)
|
||||
- [Hydra Notifications](notifications.md)
|
||||
-----------
|
||||
[About](about.md)
|
||||
6
doc/manual/src/about.md
Normal file
6
doc/manual/src/about.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Authors
|
||||
|
||||
* Eelco Dolstra, Delft University of Technology, Department of Software Technology
|
||||
* Rob Vermaas, Delft University of Technology, Department of Software Technology
|
||||
* Eelco Visser, Delft University of Technology, Department of Software Technology
|
||||
* Ludovic Courtès
|
||||
249
doc/manual/src/api.md
Normal file
249
doc/manual/src/api.md
Normal file
@@ -0,0 +1,249 @@
|
||||
Using the external API
|
||||
======================
|
||||
|
||||
To be able to create integrations with other services, Hydra exposes an
|
||||
external API that you can manage projects with.
|
||||
|
||||
The API is accessed over HTTP(s) where all data is sent and received as
|
||||
JSON.
|
||||
|
||||
Creating resources requires the caller to be authenticated, while
|
||||
retrieving resources does not.
|
||||
|
||||
The API does not have a separate URL structure for it\'s endpoints.
|
||||
Instead you request the pages of the web interface as `application/json`
|
||||
to use the API.
|
||||
|
||||
List projects
|
||||
-------------
|
||||
|
||||
To list all the `projects` of the Hydra install:
|
||||
|
||||
GET /
|
||||
Accept: application/json
|
||||
|
||||
This will give you a list of `projects`, where each `project` contains
|
||||
general information and a list of its `job sets`.
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org
|
||||
|
||||
**Note:** this response is truncated
|
||||
|
||||
GET https://hydra.nixos.org/
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"displayname": "Acoda",
|
||||
"name": "acoda",
|
||||
"description": "Acoda is a tool set for automatic data migration along an evolving data model",
|
||||
"enabled": 0,
|
||||
"owner": "sander",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"trunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"displayname": "cabal2nix",
|
||||
"name": "cabal2nix",
|
||||
"description": "Convert Cabal files into Nix build instructions",
|
||||
"enabled": 0,
|
||||
"owner": "simons@cryp.to",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"master"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
Get a single project
|
||||
--------------------
|
||||
|
||||
To get a single `project` by identifier:
|
||||
|
||||
GET /project/:project-identifier
|
||||
Accept: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/project/hydra
|
||||
|
||||
GET https://hydra.nixos.org/project/hydra
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"description": "Hydra, the Nix-based continuous build system",
|
||||
"hidden": 0,
|
||||
"displayname": "Hydra",
|
||||
"jobsets": [
|
||||
"hydra-master",
|
||||
"hydra-ant-logger-trunk",
|
||||
"master",
|
||||
"build-ng"
|
||||
],
|
||||
"name": "hydra",
|
||||
"enabled": 1,
|
||||
"owner": "eelco"
|
||||
}
|
||||
|
||||
Get a single job set
|
||||
--------------------
|
||||
|
||||
To get a single `job set` by identifier:
|
||||
|
||||
GET /jobset/:project-identifier/:jobset-identifier
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"errormsg": "evaluation failed due to signal 9 (Killed)",
|
||||
"fetcherrormsg": null,
|
||||
"nixexprpath": "release.nix",
|
||||
"nixexprinput": "hydraSrc",
|
||||
"emailoverride": "rob.vermaas@gmail.com, eelco.dolstra@logicblox.com",
|
||||
"jobsetinputs": {
|
||||
"officialRelease": {
|
||||
"jobsetinputalts": [
|
||||
"false"
|
||||
]
|
||||
},
|
||||
"hydraSrc": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/hydra.git build-ng"
|
||||
]
|
||||
},
|
||||
"nixpkgs": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/nixpkgs.git release-14.12"
|
||||
]
|
||||
}
|
||||
},
|
||||
"enabled": 0
|
||||
}
|
||||
|
||||
List evaluations
|
||||
----------------
|
||||
|
||||
To list the `evaluations` of a `job set` by identifier:
|
||||
|
||||
GET /jobset/:project-identifier/:jobset-identifier/evals
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
|
||||
**Note:** this response is truncated
|
||||
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"evals": [
|
||||
{
|
||||
"jobsetevalinputs": {
|
||||
"nixpkgs": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/nixpkgs.git",
|
||||
"revision": "f60e48ce81b6f428d072d3c148f6f2e59f1dfd7a"
|
||||
},
|
||||
"hydraSrc": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/hydra.git",
|
||||
"revision": "48d6f0de2ab94f728d287b9c9670c4d237e7c0f6"
|
||||
},
|
||||
"officialRelease": {
|
||||
"dependency": null,
|
||||
"value": "false",
|
||||
"type": "boolean",
|
||||
"uri": null,
|
||||
"revision": null
|
||||
}
|
||||
},
|
||||
"hasnewbuilds": 1,
|
||||
"builds": [
|
||||
24670686,
|
||||
24670684,
|
||||
24670685,
|
||||
24670687
|
||||
],
|
||||
"id": 1213758
|
||||
}
|
||||
],
|
||||
"first": "?page=1",
|
||||
"last": "?page=1"
|
||||
}
|
||||
|
||||
Get a single build
|
||||
------------------
|
||||
|
||||
To get a single `build` by its id:
|
||||
|
||||
GET /build/:build-id
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/build/24670686
|
||||
|
||||
GET /build/24670686
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"job": "tests.api.x86_64-linux",
|
||||
"jobsetevals": [
|
||||
1213758
|
||||
],
|
||||
"buildstatus": 0,
|
||||
"buildmetrics": null,
|
||||
"project": "hydra",
|
||||
"system": "x86_64-linux",
|
||||
"priority": 100,
|
||||
"releasename": null,
|
||||
"starttime": 1439402853,
|
||||
"nixname": "vm-test-run-unnamed",
|
||||
"timestamp": 1439388618,
|
||||
"id": 24670686,
|
||||
"stoptime": 1439403403,
|
||||
"jobset": "build-ng",
|
||||
"buildoutputs": {
|
||||
"out": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed"
|
||||
}
|
||||
},
|
||||
"buildproducts": {
|
||||
"1": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed",
|
||||
"defaultpath": "log.html",
|
||||
"type": "report",
|
||||
"sha256hash": null,
|
||||
"filesize": null,
|
||||
"name": "",
|
||||
"subtype": "testlog"
|
||||
}
|
||||
},
|
||||
"finished": 1
|
||||
}
|
||||
276
doc/manual/src/configuration.md
Normal file
276
doc/manual/src/configuration.md
Normal file
@@ -0,0 +1,276 @@
|
||||
Configuration
|
||||
=============
|
||||
|
||||
This chapter is a collection of configuration snippets for different
|
||||
scenarios.
|
||||
|
||||
The configuration is parsed by `Config::General` which has [a pretty
|
||||
thorough documentation on their file format](https://metacpan.org/pod/Config::General#CONFIG-FILE-FORMAT).
|
||||
Hydra calls the parser with the following options:
|
||||
- `-UseApacheInclude => 1`
|
||||
- `-IncludeAgain => 1`
|
||||
- `-IncludeRelative => 1`
|
||||
|
||||
Including files
|
||||
---------------
|
||||
|
||||
`hydra.conf` supports Apache-style includes. This is **IMPORTANT**
|
||||
because that is how you keep your **secrets** out of the **Nix store**.
|
||||
Hopefully this got your attention 😌
|
||||
|
||||
This:
|
||||
```
|
||||
<github_authorization>
|
||||
NixOS = Bearer gha-secret😱secret😱secret😱
|
||||
</github_authorization>
|
||||
```
|
||||
should **NOT** be in `hydra.conf`.
|
||||
|
||||
`hydra.conf` is rendered in the Nix store and is therefore world-readable.
|
||||
|
||||
Instead, the above should be written to a file outside the Nix store by
|
||||
other means (manually, using Nixops' secrets feature, etc) and included
|
||||
like so:
|
||||
```
|
||||
Include /run/keys/hydra/github_authorizations.conf
|
||||
```
|
||||
|
||||
Serving behind reverse proxy
|
||||
----------------------------
|
||||
|
||||
To serve hydra web server behind reverse proxy like *nginx* or *httpd*
|
||||
some additional configuration must be made.
|
||||
|
||||
Edit your `hydra.conf` file in a similar way to this example:
|
||||
|
||||
```conf
|
||||
using_frontend_proxy 1
|
||||
base_uri example.com
|
||||
```
|
||||
|
||||
`base_uri` should be your hydra servers proxied URL. If you are using
|
||||
Hydra nixos module then setting `hydraURL` option should be enough.
|
||||
|
||||
You also need to configure your reverse proxy to pass `X-Request-Base`
|
||||
to hydra, with the same value as `base_uri`.
|
||||
This also covers the case of serving Hydra with a prefix path,
|
||||
as in [http://example.com/hydra]().
|
||||
|
||||
For example if you are using nginx, then use configuration similar to
|
||||
following:
|
||||
|
||||
server {
|
||||
listen 433 ssl;
|
||||
server_name example.com;
|
||||
.. other configuration ..
|
||||
location /hydra/ {
|
||||
|
||||
proxy_pass http://127.0.0.1:3000/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Request-Base /hydra;
|
||||
}
|
||||
}
|
||||
|
||||
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
|
||||
strip off the `/hydra/` part of the URL before passing it to hydra.
|
||||
|
||||
Populating a Cache
|
||||
------------------
|
||||
|
||||
A common use for Hydra is to pre-build and cache derivations which
|
||||
take a long time to build. While it is possible to direcly access the
|
||||
Hydra server's store over SSH, a more scalable option is to upload
|
||||
built derivations to a remote store like an [S3-compatible object
|
||||
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||
the `store_uri` parameter will cause Hydra to sign and upload
|
||||
derivations as they are built:
|
||||
|
||||
```
|
||||
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||
```
|
||||
|
||||
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||
compression on derivations to reduce CPU usage on the server, but
|
||||
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||
build logs because it has better browser support.
|
||||
|
||||
See [`nix help
|
||||
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||
for a description of the store URI format.
|
||||
|
||||
Statsd Configuration
|
||||
--------------------
|
||||
|
||||
By default, Hydra will send stats to statsd at `localhost:8125`. Point Hydra to a different server via:
|
||||
|
||||
```
|
||||
<statsd>
|
||||
host = alternative.host
|
||||
port = 18125
|
||||
</statsd>
|
||||
```
|
||||
|
||||
hydra-notify's Prometheus service
|
||||
---------------------------------
|
||||
|
||||
hydra-notify supports running a Prometheus webserver for metrics. The
|
||||
exporter does not run unless a listen address and port are specified
|
||||
in the hydra configuration file, as below:
|
||||
|
||||
```conf
|
||||
<hydra_notify>
|
||||
<prometheus>
|
||||
listen_address = 127.0.0.1
|
||||
port = 9199
|
||||
</prometheus>
|
||||
</hydra_notify>
|
||||
```
|
||||
|
||||
hydra-queue-runner's Prometheus service
|
||||
---------------------------------------
|
||||
|
||||
hydra-queue-runner supports running a Prometheus webserver for metrics. The
|
||||
exporter's address defaults to exposing on `127.0.0.1:9198`, but is also
|
||||
configurable through the hydra configuration file and a command line argument,
|
||||
as below. A port of `:0` will make the exposer choose a random, available port.
|
||||
|
||||
```conf
|
||||
queue_runner_metrics_address = 127.0.0.1:9198
|
||||
# or
|
||||
queue_runner_metrics_address = [::]:9198
|
||||
```
|
||||
|
||||
```shell
|
||||
$ hydra-queue-runner --prometheus-address 127.0.0.1:9198
|
||||
# or
|
||||
$ hydra-queue-runner --prometheus-address [::]:9198
|
||||
```
|
||||
|
||||
Using LDAP as authentication backend (optional)
|
||||
-----------------------------------------------
|
||||
|
||||
Instead of using Hydra's built-in user management you can optionally
|
||||
use LDAP to manage roles and users.
|
||||
|
||||
This is configured by defining the `<ldap>` block in the configuration file.
|
||||
In this block it's possible to configure the authentication plugin in the
|
||||
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||
The documentation for the available settings can be found
|
||||
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
|
||||
Note that the bind password (if needed) should be supplied as an included file to
|
||||
prevent it from leaking to the Nix store.
|
||||
|
||||
Roles can be assigned to users based on their LDAP group membership. For this
|
||||
to work *use\_roles = 1* needs to be defined for the authentication plugin.
|
||||
LDAP groups can then be mapped to Hydra roles using the `<role_mapping>` block.
|
||||
|
||||
Example configuration:
|
||||
```
|
||||
<ldap>
|
||||
<config>
|
||||
<credential>
|
||||
class = Password
|
||||
password_field = password
|
||||
password_type = self_check
|
||||
</credential>
|
||||
<store>
|
||||
class = LDAP
|
||||
ldap_server = localhost
|
||||
<ldap_server_options>
|
||||
timeout = 30
|
||||
</ldap_server_options>
|
||||
binddn = "cn=root,dc=example"
|
||||
include ldap-password.conf
|
||||
start_tls = 0
|
||||
<start_tls_options>
|
||||
verify = none
|
||||
</start_tls_options>
|
||||
user_basedn = "ou=users,dc=example"
|
||||
user_filter = "(&(objectClass=inetOrgPerson)(cn=%s))"
|
||||
user_scope = one
|
||||
user_field = cn
|
||||
<user_search_options>
|
||||
deref = always
|
||||
</user_search_options>
|
||||
# Important for role mappings to work:
|
||||
use_roles = 1
|
||||
role_basedn = "ou=groups,dc=example"
|
||||
role_filter = "(&(objectClass=groupOfNames)(member=%s))"
|
||||
role_scope = one
|
||||
role_field = cn
|
||||
role_value = dn
|
||||
<role_search_options>
|
||||
deref = always
|
||||
</role_search_options>
|
||||
</store>
|
||||
</config>
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
hydra_admin = admin
|
||||
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||
dev = eval-jobset
|
||||
dev = restart-jobs
|
||||
dev = cancel-build
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
Then, place the password to your LDAP server in `/var/lib/hydra/ldap-password.conf`:
|
||||
|
||||
```
|
||||
bindpw = the-ldap-password
|
||||
```
|
||||
|
||||
### Debugging LDAP
|
||||
|
||||
Set the `debug` parameter under `ldap.config.ldap_server_options.debug`:
|
||||
|
||||
```
|
||||
<ldap>
|
||||
<config>
|
||||
<store>
|
||||
<ldap_server_options>
|
||||
debug = 2
|
||||
</ldap_server_options>
|
||||
</store>
|
||||
</config>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
### Legacy LDAP Configuration
|
||||
|
||||
Hydra used to load the LDAP configuration from a YAML file in the
|
||||
`HYDRA_LDAP_CONFIG` environment variable. This behavior is deperecated
|
||||
and will be removed.
|
||||
|
||||
When Hydra uses the deprecated YAML file, Hydra applies the following
|
||||
default role mapping:
|
||||
|
||||
```
|
||||
<ldap>
|
||||
<role_mapping>
|
||||
hydra_admin = admin
|
||||
hydra_bump-to-front = bump-to-front
|
||||
hydra_cancel-build = cancel-build
|
||||
hydra_create-projects = create-projects
|
||||
hydra_restart-jobs = restart-jobs
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
Note that configuring both the LDAP parameters in the hydra.conf and via
|
||||
the environment variable is a fatal error.
|
||||
|
||||
Embedding Extra HTML
|
||||
--------------------
|
||||
|
||||
Embed an analytics widget or other HTML in the `<head>` of each HTML document via:
|
||||
|
||||
```conf
|
||||
tracker = <script src="...">
|
||||
```
|
||||
98
doc/manual/src/hacking.md
Normal file
98
doc/manual/src/hacking.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Hacking
|
||||
|
||||
This section provides some notes on how to hack on Hydra. To get the
|
||||
latest version of Hydra from GitHub:
|
||||
|
||||
```console
|
||||
$ git clone git://github.com/NixOS/hydra.git
|
||||
$ cd hydra
|
||||
```
|
||||
|
||||
To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||
and dependencies can be found:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
```
|
||||
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
foreman:
|
||||
|
||||
```console
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
$ ./src/script/hydra-server
|
||||
```
|
||||
|
||||
You can run Hydra's test suite with the following:
|
||||
|
||||
```console
|
||||
$ meson test
|
||||
# to run as many tests as you have cores:
|
||||
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||
```
|
||||
|
||||
**Warning**: Currently, the tests can fail
|
||||
if run with high parallelism [due to an issue in
|
||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||
causing database ports to collide.
|
||||
|
||||
## Working on the Manual
|
||||
|
||||
By default, `foreman start` runs mdbook in "watch" mode. mdbook listens
|
||||
at [http://localhost:63332/](http://localhost:63332/), and
|
||||
will reload the page every time you save.
|
||||
|
||||
## Building
|
||||
|
||||
To build Hydra and its dependencies:
|
||||
|
||||
```console
|
||||
$ nix build .#packages.x86_64-linux.default
|
||||
```
|
||||
|
||||
## Development Tasks
|
||||
|
||||
### Connecting to the database
|
||||
|
||||
Assuming you're running the default configuration with `foreman start`,
|
||||
open an interactive session with Postgres via:
|
||||
|
||||
```console
|
||||
$ psql --host localhost --port 64444 hydra
|
||||
```
|
||||
|
||||
### Runinng the builder locally
|
||||
|
||||
For `hydra-queue-runner` to successfully build locally, your
|
||||
development user will need to be "trusted" by your Nix store.
|
||||
|
||||
Add yourself to the `trusted_users` option of `/etc/nix/nix.conf`.
|
||||
|
||||
On NixOS:
|
||||
|
||||
```nix
|
||||
{
|
||||
nix.settings.trusted-users = [ "YOURUSER" ];
|
||||
}
|
||||
```
|
||||
|
||||
Off NixOS, change `/etc/nix/nix.conf`:
|
||||
|
||||
```conf
|
||||
trusted-users = root YOURUSERNAME
|
||||
```
|
||||
165
doc/manual/src/installation.md
Normal file
165
doc/manual/src/installation.md
Normal file
@@ -0,0 +1,165 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
This chapter explains how to install Hydra on your own build farm
|
||||
server.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
To install and use Hydra you need to have installed the following
|
||||
dependencies:
|
||||
|
||||
- Nix
|
||||
|
||||
- PostgreSQL
|
||||
|
||||
- many Perl packages, notably Catalyst, EmailSender, and NixPerl (see
|
||||
the [Hydra expression in
|
||||
Nixpkgs](https://github.com/NixOS/hydra/blob/master/release.nix) for
|
||||
the complete list)
|
||||
|
||||
At the moment, Hydra runs only on GNU/Linux (*i686-linux* and
|
||||
*x86\_64\_linux*).
|
||||
|
||||
For small projects, Hydra can be run on any reasonably modern machine.
|
||||
For individual projects you can even run Hydra on a laptop. However, the
|
||||
charm of a buildfarm server is usually that it operates without
|
||||
disturbing the developer\'s working environment and can serve releases
|
||||
over the internet. In conjunction you should typically have your source
|
||||
code administered in a version management system, such as subversion.
|
||||
Therefore, you will probably want to install a server that is connected
|
||||
to the internet. To scale up to large and/or many projects, you will
|
||||
need at least a considerable amount of diskspace to store builds. Since
|
||||
Hydra can schedule multiple simultaneous build jobs, it can be useful to
|
||||
have a multi-core machine, and/or attach multiple build machines in a
|
||||
network to the central Hydra server.
|
||||
|
||||
Of course we think it is a good idea to use the
|
||||
[NixOS](http://nixos.org/nixos) GNU/Linux distribution for your
|
||||
buildfarm server. But this is not a requirement. The Nix software
|
||||
deployment system can be installed on any GNU/Linux distribution in
|
||||
parallel to the regular package management system. Thus, you can use
|
||||
Hydra on a Debian, Fedora, SuSE, or Ubuntu system.
|
||||
|
||||
Getting Nix
|
||||
-----------
|
||||
|
||||
If your server runs NixOS you are all set to continue with installation
|
||||
of Hydra. Otherwise you first need to install Nix. The latest stable
|
||||
version can be found one [the Nix web
|
||||
site](https://nixos.org/download/), along with a manual, which
|
||||
includes installation instructions.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The latest development snapshot of Hydra can be installed by visiting
|
||||
the URL
|
||||
[`http://hydra.nixos.org/view/hydra/unstable`](http://hydra.nixos.org/view/hydra/unstable)
|
||||
and using the one-click install available at one of the build pages. You
|
||||
can also install Hydra through the channel by performing the following
|
||||
commands:
|
||||
|
||||
nix-channel --add http://hydra.nixos.org/jobset/hydra/master/channel/latest
|
||||
nix-channel --update
|
||||
nix-env -i hydra
|
||||
|
||||
Command completion should reveal a number of command-line tools from
|
||||
Hydra, such as `hydra-queue-runner`.
|
||||
|
||||
Creating the database
|
||||
---------------------
|
||||
|
||||
Hydra stores its results in a PostgreSQL database.
|
||||
|
||||
To setup a PostgreSQL database with *hydra* as database name and user
|
||||
name, issue the following commands on the PostgreSQL server:
|
||||
|
||||
```console
|
||||
createuser -S -D -R -P hydra
|
||||
createdb -O hydra hydra
|
||||
```
|
||||
|
||||
Note that *\$prefix* is the location of Hydra in the nix store.
|
||||
|
||||
Hydra uses an environment variable to know which database should be
|
||||
used, and a variable which point to a location that holds some state. To
|
||||
set these variables for a PostgreSQL database, add the following to the
|
||||
file `~/.profile` of the user running the Hydra services.
|
||||
|
||||
```console
|
||||
export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;"
|
||||
export HYDRA_DATA=/var/lib/hydra
|
||||
```
|
||||
|
||||
You can provide the username and password in the file `~/.pgpass`, e.g.
|
||||
|
||||
```
|
||||
dbserver.example.org:*:hydra:hydra:password
|
||||
```
|
||||
|
||||
Make sure that the *HYDRA\_DATA* directory exists and is writable for
|
||||
the user which will run the Hydra services.
|
||||
|
||||
Having set these environment variables, you can now initialise the
|
||||
database by doing:
|
||||
|
||||
```console
|
||||
hydra-init
|
||||
```
|
||||
|
||||
To create projects, you need to create a user with *admin* privileges.
|
||||
This can be done using the command `hydra-create-user`:
|
||||
|
||||
```console
|
||||
$ hydra-create-user alice --full-name 'Alice Q. User' \
|
||||
--email-address 'alice@example.org' --password-prompt --role admin
|
||||
```
|
||||
|
||||
Additional users can be created through the web interface.
|
||||
|
||||
Upgrading
|
||||
---------
|
||||
|
||||
If you\'re upgrading Hydra from a previous version, you should do the
|
||||
following to perform any necessary database schema migrations:
|
||||
|
||||
```console
|
||||
hydra-init
|
||||
```
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
To start the Hydra web server, execute:
|
||||
|
||||
```console
|
||||
hydra-server
|
||||
```
|
||||
|
||||
When the server is started, you can browse to [http://localhost:3000/]()
|
||||
to start configuring your Hydra instance.
|
||||
|
||||
The `hydra-server` command launches the web server. There are two other
|
||||
processes that come into play:
|
||||
|
||||
- The
|
||||
evaluator
|
||||
is responsible for periodically evaluating job sets, checking out
|
||||
their dependencies off their version control systems (VCS), and
|
||||
queueing new builds if the result of the evaluation changed. It is
|
||||
launched by the
|
||||
hydra-evaluator
|
||||
command.
|
||||
- The
|
||||
queue runner
|
||||
launches builds (using Nix) as they are queued by the evaluator,
|
||||
scheduling them onto the configured Nix hosts. It is launched using
|
||||
the
|
||||
hydra-queue-runner
|
||||
command.
|
||||
|
||||
All three processes must be running for Hydra to be fully functional,
|
||||
though it\'s possible to temporarily stop any one of them for
|
||||
maintenance purposes, for instance.
|
||||
173
doc/manual/src/introduction.md
Normal file
173
doc/manual/src/introduction.md
Normal file
@@ -0,0 +1,173 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
About Hydra
|
||||
-----------
|
||||
|
||||
Hydra is a tool for continuous integration testing and software release
|
||||
that uses a purely functional language to describe build jobs and their
|
||||
dependencies. Continuous integration is a simple technique to improve
|
||||
the quality of the software development process. An automated system
|
||||
continuously or periodically checks out the source code of a project,
|
||||
builds it, runs tests, and produces reports for the developers. Thus,
|
||||
various errors that might accidentally be committed into the code base
|
||||
are automatically caught. Such a system allows more in-depth testing
|
||||
than what developers could feasibly do manually:
|
||||
|
||||
- Portability testing
|
||||
: The software may need to be built and tested on many different
|
||||
platforms. It is infeasible for each developer to do this before
|
||||
every commit.
|
||||
- Likewise, many projects have very large test sets (e.g., regression
|
||||
tests in a compiler, or stress tests in a DBMS) that can take hours
|
||||
or days to run to completion.
|
||||
- Many kinds of static and dynamic analyses can be performed as part
|
||||
of the tests, such as code coverage runs and static analyses.
|
||||
- It may also be necessary to build many different
|
||||
variants
|
||||
of the software. For instance, it may be necessary to verify that
|
||||
the component builds with various versions of a compiler.
|
||||
- Developers typically use incremental building to test their changes
|
||||
(since a full build may take too long), but this is unreliable with
|
||||
many build management tools (such as Make), i.e., the result of the
|
||||
incremental build might differ from a full build.
|
||||
- It ensures that the software can be built from the sources under
|
||||
revision control. Users of version management systems such as CVS
|
||||
and Subversion often forget to place source files under revision
|
||||
control.
|
||||
- The machines on which the continuous integration system runs ideally
|
||||
provides a clean, well-defined build environment. If this
|
||||
environment is administered through proper SCM techniques, then
|
||||
builds produced by the system can be reproduced. In contrast,
|
||||
developer work environments are typically not under any kind of SCM
|
||||
control.
|
||||
- In large projects, developers often work on a particular component
|
||||
of the project, and do not build and test the composition of those
|
||||
components (again since this is likely to take too long). To prevent
|
||||
the phenomenon of \`\`big bang integration\'\', where components are
|
||||
only tested together near the end of the development process, it is
|
||||
important to test components together as soon as possible (hence
|
||||
continuous integration
|
||||
).
|
||||
- It allows software to be
|
||||
released
|
||||
by automatically creating packages that users can download and
|
||||
install. To do this manually represents an often prohibitive amount
|
||||
of work, as one may want to produce releases for many different
|
||||
platforms: e.g., installers for Windows and Mac OS X, RPM or Debian
|
||||
packages for certain Linux distributions, and so on.
|
||||
|
||||
In its simplest form, a continuous integration tool sits in a loop
|
||||
building and releasing software components from a version management
|
||||
system. For each component, it performs the following tasks:
|
||||
|
||||
- It obtains the latest version of the component\'s source code from
|
||||
the version management system.
|
||||
- It runs the component\'s build process (which presumably includes
|
||||
the execution of the component\'s test set).
|
||||
- It presents the results of the build (such as error logs and
|
||||
releases) to the developers, e.g., by producing a web page.
|
||||
|
||||
Examples of continuous integration tools include Jenkins, CruiseControl
|
||||
Tinderbox, Sisyphus, Anthill and BuildBot. These tools have various
|
||||
limitations.
|
||||
|
||||
- They do not manage the
|
||||
build environment
|
||||
. The build environment consists of the dependencies necessary to
|
||||
perform a build action, e.g., compilers, libraries, etc. Setting up
|
||||
the environment is typically done manually, and without proper SCM
|
||||
control (so it may be hard to reproduce a build at a later time).
|
||||
Manual management of the environment scales poorly in the number of
|
||||
configurations that must be supported. For instance, suppose that we
|
||||
want to build a component that requires a certain compiler X. We
|
||||
then have to go to each machine and install X. If we later need a
|
||||
newer version of X, the process must be repeated all over again. An
|
||||
ever worse problem occurs if there are conflicting, mutually
|
||||
exclusive versions of the dependencies. Thus, simply installing the
|
||||
latest version is not an option. Of course, we can install these
|
||||
components in different directories and manually pass the
|
||||
appropriate paths to the build processes of the various components.
|
||||
But this is a rather tiresome and error-prone process.
|
||||
- They do not easily support
|
||||
variability in software systems
|
||||
. A system may have a great deal of build-time variability: optional
|
||||
functionality, whether to build a debug or production version,
|
||||
different versions of dependencies, and so on. (For instance, the
|
||||
Linux kernel now has over 2,600 build-time configuration switches.)
|
||||
It is therefore important that a continuous integration tool can
|
||||
easily select and test different instances from the configuration
|
||||
space of the system to reveal problems, such as erroneous
|
||||
interactions between features. In a continuous integration setting,
|
||||
it is also useful to test different combinations of versions of
|
||||
subsystems, e.g., the head revision of a component against stable
|
||||
releases of its dependencies, and vice versa, as this can reveal
|
||||
various integration problems.
|
||||
|
||||
*Hydra*, is a continuous integration tool that solves these problems. It
|
||||
is built on top of the [Nix package manager](http://nixos.org/nix/),
|
||||
which has a purely functional language for describing package build
|
||||
actions and their dependencies. This allows the build environment for
|
||||
projects to be produced automatically and deterministically, and
|
||||
variability in components to be expressed naturally using functions; and
|
||||
as such is an ideal fit for a continuous build system.
|
||||
|
||||
About Us
|
||||
--------
|
||||
|
||||
Hydra is the successor of the Nix Buildfarm, which was developed in
|
||||
tandem with the Nix software deployment system. Nix was originally
|
||||
developed at the Department of Information and Computing Sciences,
|
||||
Utrecht University by the TraCE project (2003-2008). The project was
|
||||
funded by the Software Engineering Research Program Jacquard to improve
|
||||
the support for variability in software systems. Funding for the
|
||||
development of Nix and Hydra is now provided by the NIRICT LaQuSo Build
|
||||
Farm project.
|
||||
|
||||
About this Manual
|
||||
-----------------
|
||||
|
||||
This manual tells you how to install the Hydra buildfarm software on
|
||||
your own server and how to operate that server using its web interface.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Hydra is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
Hydra is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the [GNU General Public
|
||||
License](http://www.gnu.org/licenses/) for more details.
|
||||
|
||||
Hydra at `nixos.org`
|
||||
--------------------
|
||||
|
||||
The `nixos.org` installation of Hydra runs at
|
||||
[`http://hydra.nixos.org/`](http://hydra.nixos.org/). That installation
|
||||
is used to build software components from the [Nix](http://nixos.org),
|
||||
[NixOS](http://nixos.org/nixos), [GNU](http://www.gnu.org/),
|
||||
[Stratego/XT](http://strategoxt.org), and related projects.
|
||||
|
||||
If you are one of the developers on those projects, it is likely that
|
||||
you will be using the NixOS Hydra server in some way. If you need to
|
||||
administer automatic builds for your project, you should pull the right
|
||||
strings to get an account on the server. This manual will tell you how
|
||||
to set up new projects and build jobs within those projects and write a
|
||||
release.nix file to describe the build process of your project to Hydra.
|
||||
You can skip the next chapter.
|
||||
|
||||
If your project does not yet have automatic builds within the NixOS
|
||||
Hydra server, it may actually be eligible. We are in the process of
|
||||
setting up a large buildfarm that should be able to support open source
|
||||
and academic software projects. Get in touch.
|
||||
|
||||
Hydra on your own buildfarm
|
||||
---------------------------
|
||||
|
||||
If you need to run your own Hydra installation,
|
||||
[installation chapter](installation.md) explains how to download and install the
|
||||
system on your own server.
|
||||
21
doc/manual/src/jobs.md
Normal file
21
doc/manual/src/jobs.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Hydra Jobs
|
||||
|
||||
## Derivation Attributes
|
||||
|
||||
Hydra stores the following job attributes in its database:
|
||||
|
||||
* `nixName` - the Derivation's `name` attribute
|
||||
* `system` - the Derivation's `system` attribute
|
||||
* `drvPath` - the Derivation's path in the Nix store
|
||||
* `outputs` - A JSON dictionary of output names and their store path.
|
||||
|
||||
### Meta fields
|
||||
|
||||
* `description` - `meta.description`, a string
|
||||
* `license` - a comma separated list of license names from `meta.license`, expected to be a list of attribute sets with an attribute named `shortName`, ex: `[ { shortName = "licensename"} ]`.
|
||||
* `homepage` - `meta.homepage`, a string
|
||||
* `maintainers` - a comma separated list of maintainer email addresses from `meta.maintainers`, expected to be a list of attribute sets with an attribute named `email`, ex: `[ { email = "alice@example.com"; } ]`.
|
||||
* `schedulingPriority` - `meta.schedulingPriority`, an integer. Default: 100. Slightly prioritizes this job over other jobs within this jobset.
|
||||
* `timeout` - `meta.timeout`, an integer. Default: 36000. Number of seconds this job must complete within.
|
||||
* `maxSilent` - `meta.maxSilent`, an integer. Default: 7200. Number of seconds of no output on stderr / stdout before considering the job failed.
|
||||
* `isChannel` - `meta.isHydraChannel`, bool. Default: false. Deprecated.
|
||||
33
doc/manual/src/monitoring/README.md
Normal file
33
doc/manual/src/monitoring/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Monitoring Hydra
|
||||
|
||||
## Webserver
|
||||
|
||||
The webserver exposes Prometheus metrics for the webserver itself at `/metrics`.
|
||||
|
||||
## Queue Runner
|
||||
|
||||
The queue runner's status is exposed at `/queue-runner-status`:
|
||||
|
||||
```console
|
||||
$ curl --header "Accept: application/json" http://localhost:63333/queue-runner-status
|
||||
... JSON payload ...
|
||||
```
|
||||
|
||||
## Notification Daemon
|
||||
|
||||
The `hydra-notify` process can expose Prometheus metrics for plugin execution. See
|
||||
[hydra-notify's Prometheus service](../configuration.md#hydra-notifys-prometheus-service)
|
||||
for details on enabling and configuring the exporter.
|
||||
|
||||
The notification exporter exposes metrics on a per-plugin, per-event-type basis: execution
|
||||
durations, frequency, successes, and failures.
|
||||
|
||||
### Diagnostic Dump
|
||||
|
||||
The notification daemon can also dump its metrics to stderr whether or not the exporter
|
||||
is configured. This is particularly useful for cases where metrics data is needed but the
|
||||
exporter was not enabled.
|
||||
|
||||
To trigger this diagnostic dump, send a Postgres notification with the
|
||||
`hydra_notify_dump_metrics` channel and no payload. See
|
||||
[Re-sending a notification](../notifications.md#re-sending-a-notification).
|
||||
87
doc/manual/src/notifications.md
Normal file
87
doc/manual/src/notifications.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# `hydra-notify` and Hydra's Notifications
|
||||
|
||||
Hydra uses a notification-based subsystem to implement some features and support plugin development. Notifications are sent to `hydra-notify`, which is responsible for dispatching each notification to each plugin.
|
||||
|
||||
Notifications are passed from `hydra-queue-runner` to `hydra-notify` through Postgres's `NOTIFY` and `LISTEN` feature.
|
||||
|
||||
## Notification Types
|
||||
|
||||
Note that the notification format is subject to change and should not be considered an API. Integrate with `hydra-notify` instead of listening directly.
|
||||
|
||||
### `cached_build_finished`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: The ID of the evaluation which contains the finished build, followed by the ID of the finished build.
|
||||
* **When:** Issued directly after an evaluation completes, when that evaluation includes this finished build.
|
||||
* **Delivery Semantics:** At most once per evaluation.
|
||||
|
||||
|
||||
### `cached_build_queued`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: The ID of the evaluation which contains the finished build, followed by the ID of the queued build.
|
||||
* **When:** Issued directly after an evaluation completes, when that evaluation includes this queued build.
|
||||
* **Delivery Semantics:** At most once per evaluation.
|
||||
|
||||
### `build_queued`
|
||||
|
||||
* **Payload:** Exactly one value, the ID of the build.
|
||||
* **When:** Issued after the transaction inserting the build in to the database is committed. One notification is sent per new build.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `build_started`
|
||||
|
||||
* **Payload:** Exactly one value, the ID of the build.
|
||||
* **When:** Issued directly before building happens, and only if the derivation's outputs cannot be substituted.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `step_finished`
|
||||
|
||||
* **Payload:** Three values, tab separated: The ID of the build which the step is part of, the step number, and the path on disk to the log file.
|
||||
* **When:** Issued directly after a step completes, regardless of success. Is not issued if the step's derivation's outputs can be substituted.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `build_finished`
|
||||
|
||||
* **Payload:** At least one value, tab separated: The ID of the build which finished, followed by IDs of all of the builds which also depended upon this build.
|
||||
* **When:** Issued directly after a build completes, regardless of success and substitutability.
|
||||
* **Delivery Semantics:** At least once.
|
||||
|
||||
`hydra-notify` will call `buildFinished` for each plugin in two ways:
|
||||
|
||||
* The `builds` table's `notificationspendingsince` column stores when the build finished. On startup, `hydra-notify` will query all builds with a non-null `notificationspendingsince` value and treat each row as a received `build_finished` event.
|
||||
|
||||
* Additionally, `hydra-notify` subscribes to `build_finished` events and processes them in real time.
|
||||
|
||||
After processing, the row's `notificationspendingsince` column is set to null.
|
||||
|
||||
It is possible for subsequent deliveries of the same `build_finished` data to imply different outcomes. For example, if the build fails, is restarted, and then succeeds. In this scenario the `build_finished` events will be delivered at least twice, once for the failure and then once for the success.
|
||||
|
||||
### `eval_started`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: an opaque trace ID representing this evaluation, and the ID of the jobset.
|
||||
* **When:** At the beginning of the evaluation phase for the jobset, before any work is done.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_added`
|
||||
|
||||
* **Payload:** Exactly three values, tab separated: an opaque trace ID representing this evaluation, the ID of the jobset, and the ID of the JobsetEval record.
|
||||
* **When:** After the evaluator fetches inputs and completes the evaluation successfully.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_cached`
|
||||
|
||||
* **Payload:** Exactly three values: an opaque trace ID representing this evaluation, the ID of the jobset, and the ID of the previous identical evaluation.
|
||||
* **When:** After the evaluator fetches inputs, if none of the inputs changed.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_failed`
|
||||
|
||||
* **Payload:** Exactly two values: an opaque trace ID representing this evaluation, and the ID of the jobset.
|
||||
* **When:** After any fetching any input fails, or any other evaluation error occurs.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
## Development Notes
|
||||
|
||||
### Re-sending a notification
|
||||
|
||||
Notifications can be experimentally re-sent on the command line with `psql`, with `NOTIFY $notificationname, '$payload'`.
|
||||
|
||||
278
doc/manual/src/plugins/README.md
Normal file
278
doc/manual/src/plugins/README.md
Normal file
@@ -0,0 +1,278 @@
|
||||
# Plugins
|
||||
|
||||
This chapter describes all plugins present in Hydra.
|
||||
|
||||
### Inputs
|
||||
|
||||
Hydra supports the following inputs:
|
||||
|
||||
- Bazaar input
|
||||
- Darcs input
|
||||
- Git input
|
||||
- Mercurial input
|
||||
- Path input
|
||||
|
||||
## Bitbucket pull requests
|
||||
|
||||
Create jobs based on open bitbucket pull requests.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `bitbucket_authorization.<owner>`
|
||||
|
||||
## Bitbucket status
|
||||
|
||||
Sets Bitbucket CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `enable_bitbucket_status`
|
||||
- `bitbucket.username`
|
||||
- `bitbucket.password`
|
||||
|
||||
## CircleCI Notification
|
||||
|
||||
Sets CircleCI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `circleci.[].jobs`
|
||||
- `circleci.[].vcstype`
|
||||
- `circleci.[].token`
|
||||
|
||||
## Compress build logs
|
||||
|
||||
Compresses build logs after a build with bzip2 or zstd.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `compress_build_logs`
|
||||
|
||||
Enable log compression
|
||||
|
||||
- `compress_build_logs_compression`
|
||||
|
||||
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||
|
||||
- `compress_build_logs_silent`
|
||||
|
||||
Whether to compress logs silently.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
compress_build_logs = 1
|
||||
```
|
||||
|
||||
## Coverity Scan
|
||||
|
||||
Uploads source code to [coverity scan](https://scan.coverity.com).
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `coverityscan.[].jobs`
|
||||
- `coverityscan.[].project`
|
||||
- `coverityscan.[].email`
|
||||
- `coverityscan.[].token`
|
||||
- `coverityscan.[].scanurl`
|
||||
|
||||
## Email notification
|
||||
|
||||
Sends email notification if build status changes.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `email_notification`
|
||||
|
||||
## Gitea status
|
||||
|
||||
Sets Gitea CI status
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitea_authorization.<repo-owner>`
|
||||
|
||||
## GitHub pulls
|
||||
|
||||
Create jobs based on open GitHub pull requests
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `github_authorization.<repo-owner>`
|
||||
|
||||
## Github refs
|
||||
|
||||
Hydra plugin for retrieving the list of references (branches or tags) from
|
||||
GitHub following a certain naming scheme.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `github_endpoint` (defaults to https://api.github.com)
|
||||
- `github_authorization.<repo-owner>`
|
||||
|
||||
## Github status
|
||||
|
||||
Sets GitHub CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `githubstatus.[].jobs`
|
||||
|
||||
Regular expression for jobs to match in the format `project:jobset:job`.
|
||||
This field is required and has no default value.
|
||||
|
||||
- `githubstatus.[].excludeBuildFromContext`
|
||||
|
||||
Don't include the build's ID in the status.
|
||||
|
||||
- `githubstatus.[].context`
|
||||
|
||||
Context shown in the status
|
||||
|
||||
- `githubstatus.[].useShortContext`
|
||||
|
||||
Renames `continuous-integration/hydra` to `ci/hydra` and removes the PR suffix
|
||||
from the name. Useful to see the full path in GitHub for long job names.
|
||||
|
||||
- `githubstatus.[].description`
|
||||
|
||||
Description shown in the status. Defaults to `Hydra build #<build-id> of
|
||||
<jobname>`
|
||||
|
||||
- `githubstatus.[].inputs`
|
||||
|
||||
The input which corresponds to the github repo/rev whose
|
||||
status we want to report. Can be repeated.
|
||||
|
||||
- `githubstatus.[].authorization`
|
||||
|
||||
Verbatim contents of the Authorization header. See
|
||||
[GitHub documentation](https://developer.github.com/v3/#authentication) for
|
||||
details. This field is only used if `github_authorization.<repo-owner>` is not set.
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<githubstatus>
|
||||
jobs = test:pr:build
|
||||
## This example will match all jobs
|
||||
#jobs = .*
|
||||
inputs = src
|
||||
authorization = Bearer gha-secret😱secret😱secret😱
|
||||
excludeBuildFromContext = 1
|
||||
</githubstatus>
|
||||
```
|
||||
|
||||
## GitLab pulls
|
||||
|
||||
Create jobs based on open gitlab pull requests.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitlab_authorization.<projectId>`
|
||||
|
||||
## Gitlab status
|
||||
|
||||
Sets Gitlab CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitlab_authorization.<projectId>`
|
||||
|
||||
## InfluxDB notification
|
||||
|
||||
Writes InfluxDB events when a builds finished.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `influxdb.url`
|
||||
- `influxdb.db`
|
||||
|
||||
## RunCommand
|
||||
|
||||
Runs a shell command when the build is finished.
|
||||
|
||||
See [The RunCommand Plugin](./RunCommand.md) for more information.
|
||||
|
||||
### Configuration options:
|
||||
|
||||
- `runcommand.[].job`
|
||||
|
||||
Regular expression for jobs to match in the format `project:jobset:job`.
|
||||
Defaults to `*:*:*`.
|
||||
|
||||
- `runcommand.[].command`
|
||||
|
||||
Command to run. Can use the `$HYDRA_JSON` environment variable to access
|
||||
information about the build.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<runcommand>
|
||||
job = myProject:*:*
|
||||
command = cat $HYDRA_JSON > /tmp/hydra-output
|
||||
</runcommand>
|
||||
```
|
||||
|
||||
## S3 backup
|
||||
|
||||
Upload nars and narinfos to S3 storage.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `s3backup.[].jobs`
|
||||
- `s3backup.[].compression_type`
|
||||
- `s3backup.[].name`
|
||||
- `s3backup.[].prefix`
|
||||
|
||||
## Slack notification
|
||||
|
||||
Sending Slack notifications about build results.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `slack.[].jobs`
|
||||
- `slack.[].force`
|
||||
- `slack.[].url`
|
||||
|
||||
|
||||
## SoTest
|
||||
|
||||
Scheduling hardware tests to SoTest controller
|
||||
|
||||
This plugin submits tests to a SoTest controller for all builds that contain
|
||||
two products matching the subtypes "sotest-binaries" and "sotest-config".
|
||||
|
||||
Build products are declared by the file "nix-support/hydra-build-products"
|
||||
relative to the root of a build, in the following format:
|
||||
|
||||
```
|
||||
file sotest-binaries /nix/store/…/binaries.zip
|
||||
file sotest-config /nix/store/…/config.yaml
|
||||
```
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `sotest.[].uri`
|
||||
|
||||
URL of the controller, defaults to `https://opensource.sotest.io`
|
||||
|
||||
- `sotest.[].authfile`
|
||||
|
||||
File containing `username:password`
|
||||
|
||||
- `sotest.[].priority`
|
||||
|
||||
Optional priority setting.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<sotest>
|
||||
uri = https://sotest.example
|
||||
authfile = /var/lib/hydra/sotest.auth
|
||||
priority = 1
|
||||
</sotest>
|
||||
```
|
||||
83
doc/manual/src/plugins/RunCommand.md
Normal file
83
doc/manual/src/plugins/RunCommand.md
Normal file
@@ -0,0 +1,83 @@
|
||||
## The RunCommand Plugin
|
||||
|
||||
Hydra supports executing a program after certain builds finish.
|
||||
This behavior is disabled by default.
|
||||
|
||||
Hydra executes these commands under the `hydra-notify` service.
|
||||
|
||||
### Static Commands
|
||||
|
||||
Configure specific commands to execute after the specified matching job finishes.
|
||||
|
||||
#### Configuration
|
||||
|
||||
- `runcommand.[].job`
|
||||
|
||||
A matcher for jobs to match in the format `project:jobset:job`. Defaults to `*:*:*`.
|
||||
|
||||
**Note:** This matcher format is not a regular expression.
|
||||
The `*` is a wildcard for that entire section, partial matches are not supported.
|
||||
|
||||
- `runcommand.[].command`
|
||||
|
||||
Command to run. Can use the `$HYDRA_JSON` environment variable to access information about the build.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<runcommand>
|
||||
job = myProject:*:*
|
||||
command = cat $HYDRA_JSON > /tmp/hydra-output
|
||||
</runcommand>
|
||||
```
|
||||
|
||||
### Dynamic Commands
|
||||
|
||||
Hydra can optionally run RunCommand hooks defined dynamically by the jobset. In
|
||||
order to enable dynamic commands, you must enable this feature in your
|
||||
`hydra.conf`, *as well as* in the parent project and jobset configuration.
|
||||
|
||||
#### Behavior
|
||||
|
||||
Hydra will execute any program defined under the `runCommandHook` attribute set. These jobs must have a single output named `out`, and that output must be an executable file located directly at `$out`.
|
||||
|
||||
#### Security Properties
|
||||
|
||||
Safely deploying dynamic commands requires careful design of your Hydra jobs. Allowing arbitrary users to define attributes in your top level attribute set will allow that user to execute code on your Hydra.
|
||||
|
||||
If a jobset has dynamic commands enabled, you must ensure only trusted users can define top level attributes.
|
||||
|
||||
|
||||
#### Configuration
|
||||
|
||||
- `dynamicruncommand.enable`
|
||||
|
||||
Set to 1 to enable dynamic RunCommand program execution.
|
||||
|
||||
#### Example
|
||||
|
||||
In your Hydra configuration, specify:
|
||||
|
||||
```xml
|
||||
<dynamicruncommand>
|
||||
enable = 1
|
||||
</dynamicruncommand>
|
||||
```
|
||||
|
||||
Then create a job named `runCommandHook.example` in your jobset:
|
||||
|
||||
```
|
||||
{ pkgs, ... }: {
|
||||
runCommandHook = {
|
||||
recurseForDerivations = true;
|
||||
|
||||
example = pkgs.writeScript "run-me" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
|
||||
${pkgs.jq}/bin/jq . "$HYDRA_JSON"
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
After the `runcommandHook.example` build finishes that script will execute.
|
||||
146
doc/manual/src/plugins/declarative-projects.md
Normal file
146
doc/manual/src/plugins/declarative-projects.md
Normal file
@@ -0,0 +1,146 @@
|
||||
## Declarative Projects
|
||||
|
||||
Hydra supports declaratively configuring a project\'s jobsets. This
|
||||
configuration can be done statically, or generated by a build job.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Hydra will treat the project\'s declarative input as a static definition
|
||||
> if and only if the spec file contains a dictionary of dictionaries. If
|
||||
> the value of any key in the spec is not a dictionary, it will treat the
|
||||
> spec as a generated declarative spec.
|
||||
|
||||
### Static, Declarative Projects
|
||||
|
||||
Hydra supports declarative projects, where jobsets are configured from a
|
||||
static JSON document in a repository.
|
||||
|
||||
To configure a static declarative project, take the following steps:
|
||||
|
||||
1. Create a Hydra-fetchable source like a Git repository or local path.
|
||||
|
||||
2. In that source, create a file called `spec.json`, and add the
|
||||
specification for all of the jobsets. Each key is jobset and each
|
||||
value is a jobset\'s specification. For example:
|
||||
|
||||
``` {.json}
|
||||
{
|
||||
"nixpkgs": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "Nixpkgs",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "pkgs/top-level/release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"nixos": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "NixOS: Small Evaluation",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "nixos/release-small.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Create a new project, and set the project\'s declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
|
||||
Hydra will create a special jobset named `.jobsets`. When the `.jobsets`
|
||||
jobset is evaluated, this static specification will be used for
|
||||
configuring the rest of the project\'s jobsets.
|
||||
|
||||
|
||||
### Generated, Declarative Projects
|
||||
|
||||
Hydra also supports generated declarative projects, where jobsets are
|
||||
configured automatically from specification files instead of being
|
||||
managed through the UI. A jobset specification is a JSON object
|
||||
containing the configuration of the jobset, for example:
|
||||
|
||||
``` {.json}
|
||||
{
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "js",
|
||||
"nixexprinput": "src",
|
||||
"nixexprpath": "release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false },
|
||||
"nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false }
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To configure a declarative project, take the following steps:
|
||||
|
||||
1. Create a jobset repository in the normal way (e.g. a git repo with a
|
||||
`release.nix` file, any other needed helper files, and taking any
|
||||
kind of hydra input), but without adding it to the UI. The nix
|
||||
expression of this repository should contain a single job, named
|
||||
`jobsets`. The output of the `jobsets` job should be a JSON file
|
||||
containing an object of jobset specifications. Each member of the
|
||||
object will become a jobset of the project, configured by the
|
||||
corresponding jobset specification.
|
||||
|
||||
2. In some hydra-fetchable source (potentially, but not necessarily,
|
||||
the same repo you created in step 1), create a JSON file containing
|
||||
a jobset specification that points to the jobset repository you
|
||||
created in the first step, specifying any needed inputs
|
||||
(e.g. nixpkgs) as necessary.
|
||||
|
||||
3. In the project creation/edit page, set declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
|
||||
Hydra will create a special jobset named `.jobsets`, which whenever
|
||||
evaluated will go through the steps above in reverse order:
|
||||
|
||||
1. Hydra will fetch the input specified by the declarative input type
|
||||
and value.
|
||||
|
||||
2. Hydra will use the configuration given in the declarative spec file
|
||||
as the jobset configuration for this evaluation. In addition to any
|
||||
inputs specified in the spec file, hydra will also pass the
|
||||
`declInput` argument corresponding to the input fetched in step 1 and
|
||||
the `projectName` argument containing the project\'s name.
|
||||
|
||||
3. As normal, hydra will build the jobs specified in the jobset
|
||||
repository, which in this case is the single `jobsets` job. When
|
||||
that job completes, hydra will read the created jobset
|
||||
specifications and create corresponding jobsets in the project,
|
||||
disabling any jobsets that used to exist but are not present in the
|
||||
current spec.
|
||||
413
doc/manual/src/projects.md
Normal file
413
doc/manual/src/projects.md
Normal file
@@ -0,0 +1,413 @@
|
||||
Creating and Managing Projects
|
||||
==============================
|
||||
|
||||
Once Hydra is installed and running, the next step is to add projects to
|
||||
the build farm. We follow the example of the [Patchelf
|
||||
project](http://nixos.org/patchelf.html), a software tool written in C
|
||||
and using the GNU Build System (GNU Autoconf and GNU Automake).
|
||||
|
||||
Log in to the web interface of your Hydra installation using the user
|
||||
name and password you inserted in the database (by default, Hydra\'s web
|
||||
server listens on [`localhost:3000`](http://localhost:3000/)). Then
|
||||
follow the \"Create Project\" link to create a new project.
|
||||
|
||||
Project Information
|
||||
-------------------
|
||||
|
||||
A project definition consists of some general information and a set of
|
||||
job sets. The general information identifies a project, its owner, and
|
||||
current state of activity. Here\'s what we fill in for the patchelf
|
||||
project:
|
||||
|
||||
Identifier: patchelf
|
||||
|
||||
The *identifier* is the identity of the project. It is used in URLs and
|
||||
in the names of build results.
|
||||
|
||||
The identifier should be a unique name (it is the primary database key
|
||||
for the project table in the database). If you try to create a project
|
||||
with an already existing identifier you\'d get an error message from the
|
||||
database. So try to create the project after entering just the general
|
||||
information to figure out if you have chosen a unique name. Job sets can
|
||||
be added once the project has been created.
|
||||
|
||||
Display name: Patchelf
|
||||
|
||||
The *display name* is used in menus.
|
||||
|
||||
Description: A tool for modifying ELF binaries
|
||||
|
||||
The *description* is used as short documentation of the nature of the
|
||||
project.
|
||||
|
||||
Owner: eelco
|
||||
|
||||
The *owner* of a project can create and edit job sets.
|
||||
|
||||
Enabled: Yes
|
||||
|
||||
Only if the project is *enabled* are builds performed.
|
||||
|
||||
Once created there should be an entry for the project in the sidebar. Go
|
||||
to the project page for the
|
||||
[Patchelf](http://localhost:3000/project/patchelf) project.
|
||||
|
||||
Job Sets
|
||||
--------
|
||||
|
||||
A project can consist of multiple *job sets* (hereafter *jobsets*),
|
||||
separate tasks that can be built separately, but may depend on each
|
||||
other (without cyclic dependencies, of course). Go to the
|
||||
[Edit](http://localhost:3000/project/patchelf/edit) page of the Patchelf
|
||||
project and \"Add a new jobset\" by providing the following
|
||||
\"Information\":
|
||||
|
||||
Identifier: trunk
|
||||
Description: Trunk
|
||||
Nix expression: release.nix in input patchelfSrc
|
||||
|
||||
This states that in order to build the `trunk` jobset, the Nix
|
||||
expression in the file `release.nix`, which can be obtained from input
|
||||
`patchelfSrc`, should be evaluated. (We\'ll have a look at `release.nix`
|
||||
later.)
|
||||
|
||||
To realize a job we probably need a number of inputs, which can be
|
||||
declared in the table below. As many inputs as required can be added.
|
||||
For patchelf we declare the following inputs.
|
||||
|
||||
patchelfSrc
|
||||
'Git checkout' https://github.com/NixOS/patchelf
|
||||
|
||||
nixpkgs 'Git checkout' https://github.com/NixOS/nixpkgs
|
||||
|
||||
officialRelease Boolean false
|
||||
|
||||
system String value "i686-linux"
|
||||
|
||||
Building Jobs
|
||||
-------------
|
||||
|
||||
Build Recipes
|
||||
-------------
|
||||
|
||||
Build jobs and *build recipes* for a jobset are specified in a text file
|
||||
written in the [Nix language](http://nixos.org/nix/). The recipe is
|
||||
actually called a *Nix expression* in Nix parlance. By convention this
|
||||
file is often called `release.nix`.
|
||||
|
||||
The `release.nix` file is typically kept under version control, and the
|
||||
repository that contains it one of the build inputs of the
|
||||
corresponding--often called `hydraConfig` by convention. The repository
|
||||
for that file and the actual file name are specified on the web
|
||||
interface of Hydra under the `Setup` tab of the jobset\'s overview page,
|
||||
under the `Nix
|
||||
expression` heading. See, for example, the [jobset overview
|
||||
page](http://hydra.nixos.org/jobset/patchelf/trunk) of the PatchELF
|
||||
project, and [the corresponding Nix
|
||||
file](https://github.com/NixOS/patchelf/blob/master/release.nix).
|
||||
|
||||
Knowledge of the Nix language is recommended, but the example below
|
||||
should already give a good idea of how it works:
|
||||
|
||||
let
|
||||
pkgs = import <nixpkgs> {}; ①
|
||||
|
||||
jobs = rec { ②
|
||||
|
||||
tarball = ③
|
||||
pkgs.releaseTools.sourceTarball { ④
|
||||
name = "hello-tarball";
|
||||
src = <hello>; ⑤
|
||||
buildInputs = (with pkgs; [ gettext texLive texinfo ]);
|
||||
};
|
||||
|
||||
build = ⑥
|
||||
{ system ? builtins.currentSystem }: ⑦
|
||||
|
||||
let pkgs = import <nixpkgs> { inherit system; }; in
|
||||
pkgs.releaseTools.nixBuild { ⑧
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
};
|
||||
in
|
||||
jobs ⑨
|
||||
|
||||
|
||||
This file shows what a `release.nix` file for
|
||||
[GNU Hello](http://www.gnu.org/software/hello/) would look like.
|
||||
GNU Hello is representative of many GNU and non-GNU free software
|
||||
projects:
|
||||
|
||||
- it uses the GNU Build System, namely GNU Autoconf, and GNU Automake;
|
||||
for users, it means it can be installed using the
|
||||
usual
|
||||
./configure && make install
|
||||
procedure
|
||||
;
|
||||
- it uses Gettext for internationalization;
|
||||
- it has a Texinfo manual, which can be rendered as PDF with TeX.
|
||||
|
||||
The file defines a jobset consisting of two jobs: `tarball`, and
|
||||
`build`. It contains the following elements (referenced from the figure
|
||||
by numbers):
|
||||
|
||||
1. This defines a variable `pkgs` holding the set of packages provided
|
||||
by [Nixpkgs](http://nixos.org/nixpkgs/).
|
||||
|
||||
Since `nixpkgs` appears in angle brackets, there must be a build
|
||||
input of that name in the Nix search path. In this case, the web
|
||||
interface should show a `nixpkgs` build input, which is a checkout
|
||||
of the Nixpkgs source code repository; Hydra then adds this and
|
||||
other build inputs to the Nix search path when evaluating
|
||||
`release.nix`.
|
||||
|
||||
2. This defines a variable holding the two Hydra jobs--an *attribute
|
||||
set* in Nix.
|
||||
|
||||
3. This is the definition of the first job, named `tarball`. The
|
||||
purpose of this job is to produce a usable source code tarball.
|
||||
|
||||
4. The `tarball` job calls the `sourceTarball` function, which
|
||||
(roughly) runs `autoreconf && ./configure &&
|
||||
make dist` on the checkout. The `buildInputs` attribute
|
||||
specifies additional software dependencies for the job.
|
||||
|
||||
> The package names used in `buildInputs`--e.g., `texLive`--are the
|
||||
> names of the *attributes* corresponding to these packages in
|
||||
> Nixpkgs, specifically in the
|
||||
> [`all-packages.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix)
|
||||
> file. See the section entitled "Package Naming" in the Nixpkgs
|
||||
> manual for more information.
|
||||
|
||||
5. The `tarball` jobs expects a `hello` build input to be available in
|
||||
the Nix search path. Again, this input is passed by Hydra and is
|
||||
meant to be a checkout of GNU Hello\'s source code repository.
|
||||
|
||||
6. This is the definition of the `build` job, whose purpose is to build
|
||||
Hello from the tarball produced above.
|
||||
|
||||
7. The `build` function takes one parameter, `system`, which should be
|
||||
a string defining the Nix system type--e.g., `"x86_64-linux"`.
|
||||
Additionally, it refers to `jobs.tarball`, seen above.
|
||||
|
||||
Hydra inspects the formal argument list of the function (here, the
|
||||
`system` argument) and passes it the corresponding parameter
|
||||
specified as a build input on Hydra\'s web interface. Here, `system`
|
||||
is passed by Hydra when it calls `build`. Thus, it must be defined
|
||||
as a build input of type string in Hydra, which could take one of
|
||||
several values.
|
||||
|
||||
The question mark after `system` defines the default value for this
|
||||
argument, and is only useful when debugging locally.
|
||||
|
||||
8. The `build` job calls the `nixBuild` function, which unpacks the
|
||||
tarball, then runs `./configure && make
|
||||
&& make check && make install`.
|
||||
|
||||
9. Finally, the set of jobs is returned to Hydra, as a Nix attribute
|
||||
set.
|
||||
|
||||
Building from the Command Line
|
||||
------------------------------
|
||||
|
||||
It is often useful to test a build recipe, for instance before it is
|
||||
actually used by Hydra, when testing changes, or when debugging a build
|
||||
issue. Since build recipes for Hydra jobsets are just plain Nix
|
||||
expressions, they can be evaluated using the standard Nix tools.
|
||||
|
||||
To evaluate the `tarball` jobset of the above example, just
|
||||
run:
|
||||
|
||||
```console
|
||||
$ nix-build release.nix -A tarball
|
||||
```
|
||||
|
||||
However, doing this with the example as is will probably
|
||||
yield an error like this:
|
||||
|
||||
error: user-thrown exception: file `hello' was not found in the Nix search path (add it using $NIX_PATH or -I)
|
||||
|
||||
The error is self-explanatory. Assuming `$HOME/src/hello` points to a
|
||||
checkout of Hello, this can be fixed this way:
|
||||
|
||||
```console
|
||||
$ nix-build -I ~/src release.nix -A tarball
|
||||
```
|
||||
|
||||
Similarly, the `build` jobset can be evaluated:
|
||||
|
||||
```console
|
||||
$ nix-build -I ~/src release.nix -A build
|
||||
```
|
||||
|
||||
The `build` job reuses the result of the `tarball` job, rebuilding it
|
||||
only if it needs to.
|
||||
|
||||
Adding More Jobs
|
||||
----------------
|
||||
|
||||
The example illustrates how to write the most basic
|
||||
jobs, `tarball` and `build`. In practice, much more can be done by using
|
||||
features readily provided by Nixpkgs or by creating new jobs as
|
||||
customizations of existing jobs.
|
||||
|
||||
For instance, test coverage report for projects compiled with GCC can be
|
||||
automatically generated using the `coverageAnalysis` function provided
|
||||
by Nixpkgs instead of `nixBuild`. Back to our GNU Hello example, we can
|
||||
define a `coverage` job that produces an HTML code coverage report
|
||||
directly readable from the corresponding Hydra build page:
|
||||
|
||||
coverage =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
pkgs.releaseTools.coverageAnalysis {
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
|
||||
As can be seen, the only difference compared to `build` is the use of
|
||||
`coverageAnalysis`.
|
||||
|
||||
Nixpkgs provides many more build tools, including the ability to run
|
||||
build in virtual machines, which can themselves run another GNU/Linux
|
||||
distribution, which allows for the creation of packages for these
|
||||
distributions. Please see [the `pkgs/build-support/release`
|
||||
directory](https://github.com/NixOS/nixpkgs/tree/master/pkgs/build-support/release)
|
||||
of Nixpkgs for more. The NixOS manual also contains information about
|
||||
whole-system testing in virtual machine.
|
||||
|
||||
Now, assume we want to build Hello with an old version of GCC, and with
|
||||
different `configure` flags. A new `build_exotic` job can be written
|
||||
that simply *overrides* the relevant arguments passed to `nixBuild`:
|
||||
|
||||
build_exotic =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
build = jobs.build { inherit system; };
|
||||
in
|
||||
pkgs.lib.overrideDerivation build (attrs: {
|
||||
buildInputs = [ pkgs.gcc33 ];
|
||||
preConfigure = "gcc --version";
|
||||
configureFlags =
|
||||
attrs.configureFlags ++ [ "--disable-nls" ];
|
||||
});
|
||||
|
||||
The `build_exotic` job reuses `build` and overrides some of its
|
||||
arguments: it adds a dependency on GCC 3.3, a pre-configure phase that
|
||||
runs `gcc --version`, and adds the `--disable-nls` configure flags.
|
||||
|
||||
This customization mechanism is very powerful. For instance, it can be
|
||||
used to change the way Hello and *all* its dependencies--including the C
|
||||
library and compiler used to build it--are built. See the Nixpkgs manual
|
||||
for more.
|
||||
|
||||
Declarative Projects
|
||||
--------------------
|
||||
|
||||
see this [chapter](./plugins/declarative-projects.md)
|
||||
|
||||
Email Notifications
|
||||
-------------------
|
||||
|
||||
Hydra can send email notifications when the status of a build changes.
|
||||
This provides immediate feedback to maintainers or committers when a
|
||||
change causes build failures.
|
||||
|
||||
The feature can be turned on by adding the following line to `hydra.conf`
|
||||
|
||||
``` conf
|
||||
email_notification = 1
|
||||
```
|
||||
|
||||
By default, Hydra only sends email notifications if a previously successful
|
||||
build starts to fail. In order to force Hydra to send an email for each build
|
||||
(including e.g. successful or cancelled ones), the environment variable
|
||||
`HYDRA_FORCE_SEND_MAIL` can be declared:
|
||||
|
||||
``` nix
|
||||
services.hydra-dev.extraEnv.HYDRA_FORCE_SEND_MAIL = "1";
|
||||
```
|
||||
|
||||
SASL Authentication for the email address that's used to send notifications
|
||||
can be configured like this:
|
||||
|
||||
``` conf
|
||||
EMAIL_SENDER_TRANSPORT_sasl_username=hydra@example.org
|
||||
EMAIL_SENDER_TRANSPORT_sasl_password=verysecret
|
||||
EMAIL_SENDER_TRANSPORT_port=587
|
||||
EMAIL_SENDER_TRANSPORT_ssl=starttls
|
||||
```
|
||||
|
||||
Further information about these environment variables can be found at the
|
||||
[MetaCPAN documentation of `Email::Sender::Manual::QuickStart`](https://metacpan.org/pod/Email::Sender::Manual::QuickStart#specifying-transport-in-the-environment).
|
||||
|
||||
It's recommended to not put this in `services.hydra-dev.extraEnv` as this would
|
||||
leak the secrets into the Nix store. Instead, it should be written into an
|
||||
environment file and configured like this:
|
||||
|
||||
``` nix
|
||||
{ systemd.services.hydra-notify = {
|
||||
serviceConfig.EnvironmentFile = "/etc/secrets/hydra-mail-cfg";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The simplest approach to enable Email Notifications is to use the ssmtp
|
||||
package, which simply hands off the emails to another SMTP server. For
|
||||
details on how to configure ssmtp, see the documentation for the
|
||||
`networking.defaultMailServer` option. To use ssmtp for the Hydra email
|
||||
notifications, add it to the path option of the Hydra services in your
|
||||
`/etc/nixos/configuration.nix` file:
|
||||
|
||||
systemd.services.hydra-queue-runner.path = [ pkgs.ssmtp ];
|
||||
systemd.services.hydra-server.path = [ pkgs.ssmtp ];
|
||||
|
||||
Gitea Integration
|
||||
-----------------
|
||||
|
||||
Hydra can notify Git servers (such as [GitLab](https://gitlab.com/), [GitHub](https://github.com)
|
||||
or [Gitea](https://gitea.io/en-us/)) about the result of a build from a Git checkout.
|
||||
|
||||
This section describes how it can be implemented for `gitea`, but the approach for `gitlab` is
|
||||
analogous:
|
||||
|
||||
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
|
||||
* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information
|
||||
```
|
||||
<gitea_authorization>
|
||||
your_username=your_token
|
||||
</gitea_authorization>
|
||||
```
|
||||
|
||||
* Include the file in your `hydra.conf` like this:
|
||||
``` nix
|
||||
{
|
||||
services.hydra-dev.extraConfig = ''
|
||||
Include /path/to/secret/file
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
* For a jobset with a `Git`-input which points to a `gitea`-instance, add the following
|
||||
additional inputs:
|
||||
|
||||
| Type | Name | Value |
|
||||
| -------------- | ------------------- | ---------------------------------- |
|
||||
| `String value` | `gitea_repo_name` | *Name of the repository to build* |
|
||||
| `String value` | `gitea_repo_owner` | *Owner of the repository* |
|
||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||
|
||||
Content-addressed derivations
|
||||
-----------------------------
|
||||
|
||||
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||
|
||||
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||
27
doc/manual/src/webhooks.md
Normal file
27
doc/manual/src/webhooks.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Webhooks
|
||||
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
- The `Secret` field can stay empty.
|
||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# jobset example file. This file canbe referenced as Nix expression
|
||||
# jobset example file. This file can be referenced as Nix expression
|
||||
# in a jobset configuration along with inputs for nixpkgs and the
|
||||
# repository containing this file.
|
||||
#
|
||||
|
||||
58
flake.lock
generated
58
flake.lock
generated
@@ -1,61 +1,59 @@
|
||||
{
|
||||
"nodes": {
|
||||
"lowdown-src": {
|
||||
"nix": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1598695561,
|
||||
"narHash": "sha256-gyH/5j+h/nWw0W8AcR2WKvNBUsiQ7QuxqSJNXAwV+8E=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "1705b4a26fbf065d9574dce47a94e8c7c79e052f",
|
||||
"lastModified": 1750777360,
|
||||
"narHash": "sha256-nDWFxwhT+fQNgi4rrr55EKjpxDyVKSl1KaNmSXtYj40=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "7bb200199705eddd53cb34660a76567c6f1295d9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"owner": "NixOS",
|
||||
"ref": "2.29-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1609520816,
|
||||
"narHash": "sha256-IGO7tfJXsv9u2wpW76VCzOsHYapRZqH9pHGVsoffPrI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "8a2ce0f455da32bc20978e68c0aad9efb4560abc",
|
||||
"lastModified": 1748680938,
|
||||
"narHash": "sha256-TQk6pEMD0mFw7jZXpg7+2qNKGbAluMQgc55OMgEO8bM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "974a4af3d4a8fd242d8d0e2608da4be87a62b83f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nix",
|
||||
"type": "indirect"
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1602702596,
|
||||
"narHash": "sha256-fqJ4UgOb4ZUnCDIapDb4gCrtAah5Rnr2/At3IzMitig=",
|
||||
"lastModified": 1750736827,
|
||||
"narHash": "sha256-UcNP7BR41xMTe0sfHBH8R79+HdCw0OwkC/ZKrQEuMeo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ad0d20345219790533ebe06571f82ed6b034db31",
|
||||
"rev": "b4a30b08433ad7b6e1dfba0833fb0fe69d43dfec",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-20.09-small",
|
||||
"type": "indirect"
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"nixpkgs": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
"nix-eval-jobs": "nix-eval-jobs",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
622
flake.nix
622
flake.nix
@@ -1,556 +1,130 @@
|
||||
{
|
||||
description = "A Nix-based continuous build system";
|
||||
|
||||
inputs.nixpkgs.follows = "nix/nixpkgs";
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small";
|
||||
|
||||
outputs = { self, nixpkgs, nix }:
|
||||
inputs.nix = {
|
||||
url = "github:NixOS/nix/2.29-maintenance";
|
||||
# We want to control the deps precisely
|
||||
flake = false;
|
||||
};
|
||||
|
||||
inputs.nix-eval-jobs = {
|
||||
url = "github:nix-community/nix-eval-jobs";
|
||||
# We want to control the deps precisely
|
||||
flake = false;
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }:
|
||||
let
|
||||
|
||||
version = "${builtins.readFile ./version}.${builtins.substring 0 8 self.lastModifiedDate}.${self.shortRev or "DIRTY"}";
|
||||
|
||||
pkgs = import nixpkgs {
|
||||
system = "x86_64-linux";
|
||||
overlays = [ self.overlay nix.overlay ];
|
||||
};
|
||||
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ config, pkgs, ... }:
|
||||
{ imports = [ self.nixosModules.hydraTest ];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
binaryCaches = [];
|
||||
};
|
||||
};
|
||||
|
||||
in rec {
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||
in
|
||||
rec {
|
||||
|
||||
# A Nixpkgs overlay that provides a 'hydra' package.
|
||||
overlay = final: prev: {
|
||||
|
||||
# Add LDAP dependencies that aren't currently found within nixpkgs.
|
||||
perlPackages = prev.perlPackages // {
|
||||
NetLDAPServer = prev.perlPackages.buildPerlPackage {
|
||||
pname = "Net-LDAP-Server";
|
||||
version = "0.43";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/A/AA/AAR/Net-LDAP-Server-0.43.tar.gz";
|
||||
sha256 = "0qmh3cri3fpccmwz6bhwp78yskrb3qmalzvqn0a23hqbsfs4qv6x";
|
||||
};
|
||||
propagatedBuildInputs = with final.perlPackages; [ NetLDAP ConvertASN1 ];
|
||||
meta = {
|
||||
description = "LDAP server side protocol handling";
|
||||
license = with final.stdenv.lib.licenses; [ artistic1 ];
|
||||
};
|
||||
};
|
||||
|
||||
NetLDAPSID = prev.perlPackages.buildPerlPackage {
|
||||
pname = "Net-LDAP-SID";
|
||||
version = "0.0001";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-SID-0.001.tar.gz";
|
||||
sha256 = "1mnnpkmj8kpb7qw50sm8h4sd8py37ssy2xi5hhxzr5whcx0cvhm8";
|
||||
};
|
||||
meta = {
|
||||
description= "Active Directory Security Identifier manipulation";
|
||||
license = with final.stdenv.lib.licenses; [ artistic2 ];
|
||||
};
|
||||
};
|
||||
|
||||
NetLDAPServerTest = prev.perlPackages.buildPerlPackage {
|
||||
pname = "Net-LDAP-Server-Test";
|
||||
version = "0.22";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/K/KA/KARMAN/Net-LDAP-Server-Test-0.22.tar.gz";
|
||||
sha256 = "13idip7jky92v4adw60jn2gcc3zf339gsdqlnc9nnvqzbxxp285i";
|
||||
};
|
||||
propagatedBuildInputs = with final.perlPackages; [ NetLDAP NetLDAPServer TestMore DataDump NetLDAPSID ];
|
||||
meta = {
|
||||
description= "test Net::LDAP code";
|
||||
license = with final.stdenv.lib.licenses; [ artistic1 ];
|
||||
};
|
||||
};
|
||||
|
||||
CatalystAuthenticationStoreLDAP = prev.perlPackages.buildPerlPackage {
|
||||
pname = "Catalyst-Authentication-Store-LDAP";
|
||||
version = "1.016";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/I/IL/ILMARI/Catalyst-Authentication-Store-LDAP-1.016.tar.gz";
|
||||
sha256 = "0cm399vxqqf05cjgs1j5v3sk4qc6nmws5nfhf52qvpbwc4m82mq8";
|
||||
};
|
||||
propagatedBuildInputs = with final.perlPackages; [ NetLDAP CatalystPluginAuthentication ClassAccessorFast ];
|
||||
buildInputs = with final.perlPackages; [ TestMore TestMockObject TestException NetLDAPServerTest ];
|
||||
meta = {
|
||||
description= "Authentication from an LDAP Directory";
|
||||
license = with final.stdenv.lib.licenses; [ artistic1 ];
|
||||
};
|
||||
};
|
||||
overlays.default = final: prev: {
|
||||
nixDependenciesForHydra = final.lib.makeScope final.newScope
|
||||
(import (nix + "/packaging/dependencies.nix") {
|
||||
pkgs = final;
|
||||
inherit (final) stdenv;
|
||||
inputs = {};
|
||||
});
|
||||
nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope
|
||||
(import (nix + "/packaging/components.nix") {
|
||||
officialRelease = true;
|
||||
inherit (final) lib;
|
||||
pkgs = final;
|
||||
src = nix;
|
||||
maintainers = [ ];
|
||||
});
|
||||
nix-eval-jobs = final.callPackage nix-eval-jobs {
|
||||
nixComponents = final.nixComponentsForHydra;
|
||||
};
|
||||
|
||||
hydra = with final; let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = with perlPackages; lib.closePropagation
|
||||
[ ModulePluggable
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystDispatchTypeRegex
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystPluginUnicodeEncoding
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXScriptServerStarman
|
||||
CatalystXRoleApplicator
|
||||
CryptRandPasswd
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DataDump
|
||||
DateTime
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileSlurp
|
||||
IOCompress
|
||||
IPCRun
|
||||
JSON
|
||||
JSONAny
|
||||
JSONXS
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
Readonly
|
||||
SQLSplitStatement
|
||||
SetScalar
|
||||
Starman
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TestMore
|
||||
TextDiff
|
||||
TextTable
|
||||
XMLSimple
|
||||
YAML
|
||||
final.nix.perl-bindings
|
||||
git
|
||||
];
|
||||
};
|
||||
|
||||
in stdenv.mkDerivation {
|
||||
|
||||
name = "hydra-${version}";
|
||||
|
||||
src = self;
|
||||
|
||||
buildInputs =
|
||||
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig libpqxx
|
||||
gitAndTools.topGit mercurial darcs subversion breezy openssl bzip2 libxslt
|
||||
final.nix perlDeps perl
|
||||
boost
|
||||
postgresql_11
|
||||
(if lib.versionAtLeast lib.version "20.03pre"
|
||||
then nlohmann_json
|
||||
else nlohmann_json.override { multipleHeaders = true; })
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
foreman
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[ subversion openssh final.nix coreutils findutils pixz
|
||||
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused breezy
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );
|
||||
|
||||
configureFlags = [ "--with-docbook-xsl=${docbook_xsl}/xml/xsl/docbook" ];
|
||||
|
||||
shellHook = ''
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
'';
|
||||
|
||||
preConfigure = "autoreconf -vfi";
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${final.nix.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${system}";
|
||||
passthru = { inherit perlDeps; inherit (final) nix; };
|
||||
hydra = final.callPackage ./package.nix {
|
||||
inherit (final.lib) fileset;
|
||||
rawSrc = self;
|
||||
nixComponents = final.nixComponentsForHydra;
|
||||
};
|
||||
};
|
||||
|
||||
hydraJobs = {
|
||||
build = forEachSystem (system: packages.${system}.hydra);
|
||||
|
||||
build.x86_64-linux = packages.x86_64-linux.hydra;
|
||||
buildNoTests = forEachSystem (system:
|
||||
packages.${system}.hydra.overrideAttrs (_: {
|
||||
doCheck = false;
|
||||
})
|
||||
);
|
||||
|
||||
manual =
|
||||
pkgs.runCommand "hydra-manual-${version}" {}
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
||||
manual = forEachSystem (system: let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
hydra = self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
in
|
||||
pkgs.runCommand "hydra-manual-${hydra.version}" { }
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${hydra.doc}/share/doc $out/share/
|
||||
|
||||
mkdir $out/nix-support
|
||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
mkdir $out/nix-support
|
||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||
'');
|
||||
|
||||
tests.install.x86_64-linux =
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
|
||||
simpleTest {
|
||||
machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port("3000")
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
};
|
||||
|
||||
tests.api.x86_64-linux =
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
|
||||
simpleTest {
|
||||
machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
# No caching for PathInput plugin, otherwise we get wrong values
|
||||
# (as it has a 30s window where no changes to the file are considered).
|
||||
services.hydra-dev.extraConfig = ''
|
||||
path_input_cache_validity_seconds = 0
|
||||
'';
|
||||
};
|
||||
testScript =
|
||||
let dbi = "dbi:Pg:dbname=hydra;user=root;"; in
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset /tmp/nix
|
||||
chmod 755 /run/jobset /tmp/nix
|
||||
cp ${./tests/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset /tmp/nix
|
||||
"""
|
||||
)
|
||||
|
||||
machine.succeed("systemctl stop hydra-evaluator hydra-queue-runner")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port("3000")
|
||||
|
||||
# Run the API tests.
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./tests/api-test.pl}' >&2"
|
||||
)
|
||||
'';
|
||||
tests = import ./nixos-tests.nix {
|
||||
inherit forEachSystem nixpkgs nixosModules;
|
||||
};
|
||||
|
||||
tests.notifications.x86_64-linux =
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
|
||||
simpleTest {
|
||||
machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./tests/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port("8086")
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port("3000")
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./tests/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
||||
tests.ldap.x86_64-linux =
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
|
||||
makeTest {
|
||||
machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
|
||||
services.openldap = {
|
||||
enable = true;
|
||||
suffix = "dc=example";
|
||||
rootdn = "cn=root,dc=example";
|
||||
rootpw = "notapassword";
|
||||
database = "bdb";
|
||||
dataDir = "/var/lib/openldap";
|
||||
extraConfig = ''
|
||||
moduleload pw-sha2
|
||||
'';
|
||||
extraDatabaseConfig = ''
|
||||
'';
|
||||
|
||||
# userPassword generated via `slappasswd -o module-load=pw-sha2 -h '{SSHA256}'`
|
||||
# The admin user has the password `password and `user` has the password `foobar`.
|
||||
declarativeContents = ''
|
||||
dn: dc=example
|
||||
dc: example
|
||||
o: Root
|
||||
objectClass: top
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
|
||||
dn: ou=users,dc=example
|
||||
ou: users
|
||||
description: All users
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: ou=groups,dc=example
|
||||
ou: groups
|
||||
description: All groups
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: cn=hydra_admin,ou=groups,dc=example
|
||||
cn: hydra_admin
|
||||
description: Hydra Admin user group
|
||||
objectClass: groupOfNames
|
||||
member: cn=admin,ou=users,dc=example
|
||||
|
||||
dn: cn=user,ou=users,dc=example
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
sn: user
|
||||
cn: user
|
||||
mail: user@example
|
||||
userPassword: {SSHA256}B9rfUbNgv8nIGn1Hm5qbVQdv6AIQb012ORJwegqELB0DWCzoMCY+4A==
|
||||
|
||||
dn: cn=admin,ou=users,dc=example
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
sn: admin
|
||||
cn: admin
|
||||
mail: admin@example
|
||||
userPassword: {SSHA256}meKP7fSWhkzXFC1f8RWRb8V8ssmN/VQJp7xJrUFFcNUDuwP1PbitMg==
|
||||
'';
|
||||
};
|
||||
systemd.services.hdyra-server.environment.CATALYST_DEBUG = "1";
|
||||
systemd.services.hydra-server.environment.HYDRA_LDAP_CONFIG = pkgs.writeText "config.yaml"
|
||||
# example config based on https://metacpan.org/source/ILMARI/Catalyst-Authentication-Store-LDAP-1.016/README#L103
|
||||
''
|
||||
credential:
|
||||
class: Password
|
||||
password_field: password
|
||||
password_type: self_check
|
||||
store:
|
||||
class: LDAP
|
||||
ldap_server: localhost
|
||||
ldap_server_options.timeout: 30
|
||||
binddn: "cn=root,dc=example"
|
||||
bindpw: notapassword
|
||||
start_tls: 0
|
||||
start_tls_options:
|
||||
verify: none
|
||||
user_basedn: "ou=users,dc=example"
|
||||
user_filter: "(&(objectClass=inetOrgPerson)(cn=%s))"
|
||||
user_scope: one
|
||||
user_field: cn
|
||||
user_search_options:
|
||||
deref: always
|
||||
use_roles: 1
|
||||
role_basedn: "ou=groups,dc=example"
|
||||
role_filter: "(&(objectClass=groupOfNames)(member=%s))"
|
||||
role_scope: one
|
||||
role_field: cn
|
||||
role_value: dn
|
||||
role_search_options:
|
||||
deref: always
|
||||
'';
|
||||
networking.firewall.enable = false;
|
||||
};
|
||||
testScript = ''
|
||||
import json
|
||||
|
||||
machine.wait_for_unit("openldap.service")
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_open_port("3000")
|
||||
response = machine.succeed(
|
||||
"curl --fail http://localhost:3000/login -H 'Accept: application/json' -H 'Referer: http://localhost:3000' --data 'username=user&password=foobar'"
|
||||
)
|
||||
|
||||
response_json = json.loads(response)
|
||||
assert "user" == response_json["username"]
|
||||
assert "user@example" == response_json["emailaddress"]
|
||||
assert len(response_json["userroles"]) == 0
|
||||
|
||||
# logging on with wrong credentials shouldn't work
|
||||
machine.fail(
|
||||
"curl --fail http://localhost:3000/login -H 'Accept: application/json' -H 'Referer: http://localhost:3000' --data 'username=user&password=wrongpassword'"
|
||||
)
|
||||
|
||||
# the admin user should get the admin role from his group membership in `hydra_admin`
|
||||
response = machine.succeed(
|
||||
"curl --fail http://localhost:3000/login -H 'Accept: application/json' -H 'Referer: http://localhost:3000' --data 'username=admin&password=password'"
|
||||
)
|
||||
|
||||
response_json = json.loads(response)
|
||||
assert "admin" == response_json["username"]
|
||||
assert "admin@example" == response_json["emailaddress"]
|
||||
assert "admin" in response_json["userroles"]
|
||||
'';
|
||||
};
|
||||
|
||||
tests.validate-openapi = pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'';
|
||||
|
||||
container = nixosConfigurations.container.config.system.build.toplevel;
|
||||
};
|
||||
|
||||
checks.x86_64-linux.build = hydraJobs.build.x86_64-linux;
|
||||
checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux;
|
||||
checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi;
|
||||
checks = forEachSystem (system: {
|
||||
build = hydraJobs.build.${system};
|
||||
install = hydraJobs.tests.install.${system};
|
||||
validate-openapi = hydraJobs.tests.validate-openapi.${system};
|
||||
});
|
||||
|
||||
packages.x86_64-linux.hydra = pkgs.hydra;
|
||||
defaultPackage.x86_64-linux = pkgs.hydra;
|
||||
|
||||
nixosModules.hydra = {
|
||||
imports = [ ./hydra-module.nix ];
|
||||
nixpkgs.overlays = [ self.overlay nix.overlay ];
|
||||
};
|
||||
|
||||
nixosModules.hydraTest = {
|
||||
imports = [ self.nixosModules.hydra ];
|
||||
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
nixosModules.hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
packages = forEachSystem (system: let
|
||||
inherit (nixpkgs) lib;
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
nixDependencies = lib.makeScope pkgs.newScope
|
||||
(import (nix + "/packaging/dependencies.nix") {
|
||||
inherit pkgs;
|
||||
inherit (pkgs) stdenv;
|
||||
inputs = {};
|
||||
});
|
||||
nixComponents = lib.makeScope nixDependencies.newScope
|
||||
(import (nix + "/packaging/components.nix") {
|
||||
officialRelease = true;
|
||||
inherit lib pkgs;
|
||||
src = nix;
|
||||
maintainers = [ ];
|
||||
});
|
||||
in {
|
||||
nix-eval-jobs = pkgs.callPackage nix-eval-jobs {
|
||||
inherit nixComponents;
|
||||
};
|
||||
hydra = pkgs.callPackage ./package.nix {
|
||||
inherit (nixpkgs.lib) fileset;
|
||||
inherit nixComponents;
|
||||
inherit (self.packages.${system}) nix-eval-jobs;
|
||||
rawSrc = self;
|
||||
};
|
||||
default = self.packages.${system}.hydra;
|
||||
});
|
||||
|
||||
nixosModules = import ./nixos-modules {
|
||||
inherit self;
|
||||
};
|
||||
|
||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules =
|
||||
[ self.nixosModules.hydraTest
|
||||
[
|
||||
self.nixosModules.hydra
|
||||
self.nixosModules.hydraTest
|
||||
self.nixosModules.hydraProxy
|
||||
{ system.configurationRevision = self.rev;
|
||||
{
|
||||
system.configurationRevision = self.lastModifiedDate;
|
||||
|
||||
boot.isContainer = true;
|
||||
networking.useDHCP = false;
|
||||
|
||||
@@ -5,6 +5,12 @@ while ! pg_isready -h $(pwd)/.hydra-data/postgres -p 64444; do sleep 1; done
|
||||
|
||||
createdb -h $(pwd)/.hydra-data/postgres -p 64444 hydra
|
||||
|
||||
# create a db for the default user. Not sure why, but
|
||||
# the terminal is otherwise spammed with:
|
||||
#
|
||||
# FATAL: database "USERNAME" does not exist
|
||||
createdb -h $(pwd)/.hydra-data/postgres -p 64444 "$(whoami)" || true
|
||||
|
||||
hydra-init
|
||||
hydra-create-user alice --password foobar --role admin
|
||||
|
||||
@@ -13,6 +19,13 @@ if [ ! -f ./.hydra-data/hydra.conf ]; then
|
||||
cat << EOF > .hydra-data/hydra.conf
|
||||
# test-time instances likely don't want to bootstrap nixpkgs from scratch
|
||||
use-substitutes = true
|
||||
|
||||
<hydra_notify>
|
||||
<prometheus>
|
||||
listen_address = 127.0.0.1
|
||||
port = 64445
|
||||
</prometheus>
|
||||
</hydra_notify>
|
||||
EOF
|
||||
fi
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333 --restart --debug
|
||||
|
||||
6
foreman/start-manual.sh
Executable file
6
foreman/start-manual.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
mdbook serve \
|
||||
--port 63332 \
|
||||
--dest-dir ./.hydra-data/manual \
|
||||
./doc/manual/
|
||||
411
hydra-api.yaml
411
hydra-api.yaml
@@ -70,7 +70,7 @@ paths:
|
||||
$ref: '#/components/examples/projects-success'
|
||||
|
||||
/api/push:
|
||||
put:
|
||||
post:
|
||||
summary: trigger jobsets
|
||||
parameters:
|
||||
- in: query
|
||||
@@ -160,16 +160,45 @@ paths:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
displayname:
|
||||
name:
|
||||
description: name of the project
|
||||
type: string
|
||||
displayname:
|
||||
description: display name of the project
|
||||
type: string
|
||||
description:
|
||||
description: description of the project
|
||||
type: string
|
||||
homepage:
|
||||
description: homepage of the project
|
||||
type: string
|
||||
owner:
|
||||
description: owner of the project
|
||||
type: string
|
||||
enabled:
|
||||
description: when set to true the project gets scheduled for evaluation
|
||||
type: boolean
|
||||
hidden:
|
||||
enable_dynamic_run_command:
|
||||
description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||
type: boolean
|
||||
visible:
|
||||
description: when set to true the project is displayed in the web interface
|
||||
type: boolean
|
||||
declarative:
|
||||
description: declarative input configured for this project
|
||||
type: object
|
||||
$ref: '#/components/schemas/DeclarativeInput'
|
||||
responses:
|
||||
'400':
|
||||
description: bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
description: error message
|
||||
type: string
|
||||
'403':
|
||||
description: request unauthorized
|
||||
content:
|
||||
@@ -236,6 +265,33 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
delete:
|
||||
summary: Deletes a project
|
||||
parameters:
|
||||
- name: id
|
||||
in: path
|
||||
description: project identifier
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: project deleted
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
redirect:
|
||||
type: string
|
||||
description: root of the Hydra instance
|
||||
'404':
|
||||
description: project could not be found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/jobset/{project-id}/{jobset-id}:
|
||||
put:
|
||||
summary: Creates a jobset in an existing project
|
||||
@@ -257,35 +313,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
'description':
|
||||
description: a description of the jobset
|
||||
type: string
|
||||
checkinterval:
|
||||
description: interval (in seconds) in which to check for evaluation
|
||||
type: integer
|
||||
enabled:
|
||||
description: when true the jobset gets scheduled for evaluation
|
||||
type: boolean
|
||||
visible:
|
||||
description: when true the jobset is visible in the web frontend
|
||||
type: boolean
|
||||
keepnr:
|
||||
description: number or evaluations to keep
|
||||
type: integer
|
||||
nixexprinput:
|
||||
description: the name of the jobset input which contains the nixexprpath
|
||||
type: string
|
||||
nixexprpath:
|
||||
nullable: true
|
||||
description: the path to the file to evaluate
|
||||
type: string
|
||||
inputs:
|
||||
description: inputs for this jobset
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/JobsetInput'
|
||||
$ref: '#/components/schemas/Jobset'
|
||||
responses:
|
||||
'201':
|
||||
description: jobset creation response
|
||||
@@ -349,6 +377,39 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
delete:
|
||||
summary: Deletes a jobset designated by project and jobset id
|
||||
parameters:
|
||||
- name: project-id
|
||||
in: path
|
||||
description: name of the project the jobset belongs to
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: jobset-id
|
||||
in: path
|
||||
description: name of the jobset to retrieve
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: jobset successfully deleted
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
redirect:
|
||||
type: string
|
||||
description: root of the Hydra instance
|
||||
'404':
|
||||
description: jobset couldn't be found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/jobset/{project-id}/{jobset-id}/evals:
|
||||
get:
|
||||
summary: Retrieves all evaluations of a jobset
|
||||
@@ -446,13 +507,39 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/eval/{build-id}:
|
||||
/build/{build-id}/constituents:
|
||||
get:
|
||||
summary: Retrieves evaluations identified by build id
|
||||
summary: Retrieves a build's constituent jobs
|
||||
parameters:
|
||||
- name: build-id
|
||||
- name: build-id
|
||||
in: path
|
||||
description: build identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
'200':
|
||||
description: build
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Build'
|
||||
'404':
|
||||
description: build couldn't be found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/eval/{eval-id}:
|
||||
get:
|
||||
summary: Retrieves evaluations identified by eval id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: build identifier
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
@@ -464,6 +551,24 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEval'
|
||||
|
||||
/eval/{eval-id}/builds:
|
||||
get:
|
||||
summary: Retrieves all builds belonging to an evaluation identified by eval id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
'200':
|
||||
description: builds
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEvalBuilds'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
|
||||
@@ -514,51 +619,134 @@ components:
|
||||
description:
|
||||
description: description of the project
|
||||
type: string
|
||||
homepage:
|
||||
description: homepage of the project
|
||||
type: string
|
||||
hidden:
|
||||
description: when set to true the project is not displayed in the web interface
|
||||
type: boolean
|
||||
enabled:
|
||||
description: when set to true the project gets scheduled for evaluation
|
||||
type: boolean
|
||||
enable_dynamic_run_command:
|
||||
description: when true the project's jobsets support executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||
type: boolean
|
||||
declarative:
|
||||
description: declarative input configured for this project
|
||||
type: object
|
||||
$ref: '#/components/schemas/DeclarativeInput'
|
||||
jobsets:
|
||||
description: list of jobsets belonging to this project
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
|
||||
DeclarativeInput:
|
||||
type: object
|
||||
properties:
|
||||
file:
|
||||
description: The file in `value` which contains the declarative spec file. Relative to the root of `value`.
|
||||
type: string
|
||||
type:
|
||||
description: The type of the declarative input.
|
||||
type: string
|
||||
value:
|
||||
description: The value of the declarative input.
|
||||
type: string
|
||||
|
||||
JobsetInput:
|
||||
type: object
|
||||
properties:
|
||||
jobsetinputalts:
|
||||
type: array
|
||||
description: ???
|
||||
items:
|
||||
type: string
|
||||
name:
|
||||
description: name of the input
|
||||
type: string
|
||||
value:
|
||||
description: value of the input
|
||||
type: string
|
||||
type:
|
||||
description: type of input
|
||||
type: string
|
||||
emailresponsible:
|
||||
description: whether or not to email responsible parties
|
||||
type: boolean
|
||||
|
||||
Jobset:
|
||||
type: object
|
||||
properties:
|
||||
fetcherrormsg:
|
||||
name:
|
||||
description: the name of the jobset
|
||||
type: string
|
||||
project:
|
||||
description: the project this jobset belongs to
|
||||
type: string
|
||||
description:
|
||||
nullable: true
|
||||
description: contains the error message when there was a problem fetching sources for a jobset
|
||||
description: a description of the jobset
|
||||
type: string
|
||||
nixexprinput:
|
||||
nullable: true
|
||||
description: the name of the jobset input which contains the nixexprpath
|
||||
type: string
|
||||
errormsg:
|
||||
description: contains the stderr output of the nix-instantiate command
|
||||
type: string
|
||||
emailoverride:
|
||||
description: email address to send notices to instead of the package maintainer (can be a comma separated list)
|
||||
type: string
|
||||
nixexprpath:
|
||||
nullable: true
|
||||
description: the path to the file to evaluate
|
||||
type: string
|
||||
errormsg:
|
||||
nullable: true
|
||||
description: contains the stderr output of the nix-instantiate command
|
||||
type: string
|
||||
errortime:
|
||||
nullable: true
|
||||
description: timestamp associated with errormsg
|
||||
type: integer
|
||||
lastcheckedtime:
|
||||
nullable: true
|
||||
description: the last time the evaluator looked at this jobset
|
||||
type: integer
|
||||
triggertime:
|
||||
nullable: true
|
||||
description: set to the time we were triggered by a push event
|
||||
type: integer
|
||||
enabled:
|
||||
description: when set to true the jobset gets scheduled for evaluation
|
||||
description: 0 is disabled, 1 is enabled, 2 is one-shot, and 3 is one-at-a-time
|
||||
type: integer
|
||||
enableemail:
|
||||
description: when true the jobset sends emails when previously-successful builds fail
|
||||
type: boolean
|
||||
jobsetinputs:
|
||||
enable_dynamic_run_command:
|
||||
description: when true the jobset supports executing dynamically defined RunCommand hooks. Requires the server and project's configuration to also enable dynamic RunCommand.
|
||||
type: boolean
|
||||
visible:
|
||||
description: when true the jobset is visible in the web frontend
|
||||
type: boolean
|
||||
emailoverride:
|
||||
description: email address to send notices to instead of the package maintainer (can be a comma separated list)
|
||||
type: string
|
||||
keepnr:
|
||||
description: number or evaluations to keep
|
||||
type: integer
|
||||
checkinterval:
|
||||
description: interval (in seconds) in which to check for evaluation
|
||||
type: integer
|
||||
schedulingshares:
|
||||
description: how many shares to be allocated to the jobset
|
||||
type: integer
|
||||
fetcherrormsg:
|
||||
nullable: true
|
||||
description: contains the error message when there was a problem fetching sources for a jobset
|
||||
type: string
|
||||
startime:
|
||||
nullable: true
|
||||
description: set to the time the latest evaluation started (if one is currently running)
|
||||
type: integer
|
||||
type:
|
||||
description: the type of the jobset
|
||||
type: integer
|
||||
flake:
|
||||
nullable: true
|
||||
description: the flake uri to evaluate
|
||||
type: string
|
||||
inputs:
|
||||
description: inputs configured for this jobset
|
||||
type: object
|
||||
additionalProperties:
|
||||
@@ -574,18 +762,6 @@ components:
|
||||
'type':
|
||||
description: The type of this input
|
||||
type: string
|
||||
enum:
|
||||
- bzr
|
||||
- bzr-checkout
|
||||
- bitbucketpulls
|
||||
- darcs
|
||||
- git
|
||||
- githubpulls
|
||||
- gitlabpulls
|
||||
- hg
|
||||
- path
|
||||
- svn
|
||||
- svn-checkout
|
||||
revision:
|
||||
nullable: true
|
||||
description: A Git/Mercurial commit hash or a Subversion revision number.
|
||||
@@ -612,11 +788,24 @@ components:
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
timestamp:
|
||||
description: Time in seconds since the Unix epoch when this evaluation was created.
|
||||
type: integer
|
||||
checkouttime:
|
||||
description: How long it took (in seconds) to fetch the jobset inputs.
|
||||
type: integer
|
||||
evaltime:
|
||||
description: How long it took (in seconds) to evaluate the jobset.
|
||||
type: integer
|
||||
hasnewbuilds:
|
||||
description: is true if the number of JobsetEval members is different from the prior evaluation. (will always be true on the first evaluation)
|
||||
description: Whether the number of JobsetEval members is different from the prior evaluation. This is always true on the first evaluation.
|
||||
type: boolean
|
||||
flake:
|
||||
description: For flake jobsets, the immutable flake reference allowing you to reproduce this evaluation. Null otherwise.
|
||||
nullable: true
|
||||
type: string
|
||||
builds:
|
||||
description: List of builds generated for this jobset evaluation
|
||||
description: List of builds generated for this jobset evaluation.
|
||||
type: array
|
||||
items:
|
||||
type: integer
|
||||
@@ -625,6 +814,13 @@ components:
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/JobsetEvalInput'
|
||||
|
||||
JobsetEvalBuilds:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/Build'
|
||||
|
||||
JobsetOverview:
|
||||
type: array
|
||||
items:
|
||||
@@ -699,7 +895,7 @@ components:
|
||||
description: Size of the produced file
|
||||
type: integer
|
||||
defaultpath:
|
||||
description: This is a Git/Mercurial commit hash or a Subversion revision number
|
||||
description: if path is a directory, the default file relative to path to be served
|
||||
type: string
|
||||
'type':
|
||||
description: Types of build product (user defined)
|
||||
@@ -819,49 +1015,72 @@ components:
|
||||
examples:
|
||||
projects-success:
|
||||
value:
|
||||
- enabled: 1
|
||||
name: example-hello
|
||||
hidden: 0
|
||||
description: hello
|
||||
owner: hydra-user
|
||||
- displayname: Foo Bar
|
||||
description: Foo Bar Baz Qux
|
||||
enabled: true
|
||||
owner: alice
|
||||
jobsets:
|
||||
- hello
|
||||
displayname: example-hello
|
||||
- displayname: foo
|
||||
jobsets:
|
||||
- foobar
|
||||
owner: hydra-user
|
||||
name: foo
|
||||
enabled: 1
|
||||
description: foo project
|
||||
hidden: 0
|
||||
- bar-jobset
|
||||
hidden: false
|
||||
homepage: https://example.com/
|
||||
name: foobar
|
||||
- jobsets:
|
||||
- test-jobset
|
||||
hidden: false
|
||||
name: hello
|
||||
homepage: https://example.com/
|
||||
description: Hi There
|
||||
displayname: Hello
|
||||
enabled: true
|
||||
owner: alice
|
||||
|
||||
project-success:
|
||||
value:
|
||||
name: foo
|
||||
enabled: 1
|
||||
hidden: 0
|
||||
description: foo project
|
||||
displayname: foo
|
||||
owner: gilligan
|
||||
jobsets:
|
||||
- foobar
|
||||
- bar-jobset
|
||||
homepage: https://example.com/
|
||||
name: foobar
|
||||
hidden: false
|
||||
enabled: true
|
||||
displayname: Foo Bar
|
||||
description: Foo Bar Baz Qux
|
||||
owner: alice
|
||||
|
||||
jobset-success:
|
||||
value:
|
||||
nixexprpath: examples/hello.nix
|
||||
enabled: 1
|
||||
triggertime: null
|
||||
enableemail: false
|
||||
jobsetinputs:
|
||||
hydra:
|
||||
jobsetinputalts:
|
||||
- 'https://github.com/gilligan/hydra extend-readme'
|
||||
nixpkgs:
|
||||
type: git
|
||||
name: nixpkgs
|
||||
emailresponsible: false
|
||||
jobsetinputalts:
|
||||
- 'https://github.com/nixos/nixpkgs-channels nixos-20.03'
|
||||
- https://github.com/NixOS/nixpkgs.git
|
||||
officialRelease:
|
||||
jobsetinputalts:
|
||||
- 'false'
|
||||
emailresponsible: false
|
||||
name: officialRelease
|
||||
type: boolean
|
||||
fetcherrormsg: ''
|
||||
hidden: false
|
||||
schedulingshares: 1
|
||||
emailoverride: ''
|
||||
starttime: null
|
||||
description: ''
|
||||
errormsg: ''
|
||||
nixexprinput: hydra
|
||||
fetcherrormsg: null
|
||||
lastcheckedtime: null
|
||||
nixexprinput: nixpkgs
|
||||
checkinterval: 0
|
||||
project: foobar
|
||||
flake: ''
|
||||
type: 0
|
||||
enabled: 1
|
||||
name: bar-jobset
|
||||
keepnr: 0
|
||||
nixexprpath: pkgs/top-level/release.nix
|
||||
errortime: null
|
||||
|
||||
evals-success:
|
||||
value:
|
||||
|
||||
26
meson.build
Normal file
26
meson.build
Normal file
@@ -0,0 +1,26 @@
|
||||
project('hydra', 'cpp',
|
||||
version: files('version.txt'),
|
||||
license: 'GPL-3.0',
|
||||
default_options: [
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'cpp_std=c++20',
|
||||
],
|
||||
)
|
||||
|
||||
nix_util_dep = dependency('nix-util', required: true)
|
||||
nix_store_dep = dependency('nix-store', required: true)
|
||||
nix_main_dep = dependency('nix-main', required: true)
|
||||
|
||||
pqxx_dep = dependency('libpqxx', required: true)
|
||||
|
||||
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||
|
||||
mdbook = find_program('mdbook', native: true)
|
||||
perl = find_program('perl', native: true)
|
||||
|
||||
subdir('doc/manual')
|
||||
subdir('nixos-modules')
|
||||
subdir('src')
|
||||
subdir('t')
|
||||
47
nixos-modules/default.nix
Normal file
47
nixos-modules/default.nix
Normal file
@@ -0,0 +1,47 @@
|
||||
{ self }:
|
||||
|
||||
{
|
||||
hydra = { pkgs, lib,... }: {
|
||||
_file = ./default.nix;
|
||||
imports = [ ./hydra.nix ];
|
||||
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -26,7 +26,7 @@ let
|
||||
} // hydraEnv // cfg.extraEnv;
|
||||
|
||||
serverEnv = env //
|
||||
{ HYDRA_TRACKER = cfg.tracker;
|
||||
{
|
||||
COLUMNS = "80";
|
||||
PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
|
||||
XDG_CACHE_HOME = "${baseDir}/www/.cache";
|
||||
@@ -58,12 +58,16 @@ in
|
||||
example = "dbi:Pg:dbname=hydra;host=postgres.example.org;user=foo;";
|
||||
description = ''
|
||||
The DBI string for Hydra database connection.
|
||||
|
||||
NOTE: Attempts to set `application_name` will be overridden by
|
||||
`hydra-TYPE` (where TYPE is e.g. `evaluator`, `queue-runner`,
|
||||
etc.) in all hydra services to more easily distinguish where
|
||||
queries are coming from.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.hydra;
|
||||
description = "The Hydra package.";
|
||||
};
|
||||
|
||||
@@ -166,6 +170,7 @@ in
|
||||
buildMachinesFiles = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
|
||||
defaultText = literalExpression ''optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
|
||||
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
|
||||
description = "List of files containing build machines.";
|
||||
};
|
||||
@@ -192,13 +197,17 @@ in
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${baseDir} 0750 hydra hydra"
|
||||
];
|
||||
|
||||
users.extraGroups.hydra = { };
|
||||
|
||||
users.extraUsers.hydra =
|
||||
{ description = "Hydra";
|
||||
group = "hydra";
|
||||
createHome = true;
|
||||
home = baseDir;
|
||||
isSystemUser = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
|
||||
@@ -206,16 +215,22 @@ in
|
||||
{ description = "Hydra queue runner";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
isSystemUser = true;
|
||||
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
|
||||
};
|
||||
|
||||
users.extraUsers.hydra-www =
|
||||
{ description = "Hydra web server";
|
||||
group = "hydra";
|
||||
isSystemUser = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
|
||||
nix.trustedUsers = [ "hydra-queue-runner" ];
|
||||
nix.settings = {
|
||||
trusted-users = [ "hydra-queue-runner" ];
|
||||
keep-outputs = true;
|
||||
keep-derivations = true;
|
||||
};
|
||||
|
||||
services.hydra-dev.extraConfig =
|
||||
''
|
||||
@@ -229,40 +244,41 @@ in
|
||||
''}
|
||||
gc_roots_dir = ${cfg.gcRootsDir}
|
||||
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
|
||||
|
||||
${optionalString (cfg.tracker != null) (let
|
||||
indentedTrackerData = lib.concatMapStringsSep "\n" (line: " ${line}") (lib.splitString "\n" cfg.tracker);
|
||||
in ''
|
||||
tracker = <<TRACKER
|
||||
${indentedTrackerData}
|
||||
TRACKER
|
||||
'')}
|
||||
'';
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
environment.variables = hydraEnv;
|
||||
|
||||
nix.extraOptions = ''
|
||||
gc-keep-outputs = true
|
||||
gc-keep-derivations = true
|
||||
|
||||
# The default (`true') slows Nix down a lot since the build farm
|
||||
# has so many GC roots.
|
||||
gc-check-reachability = false
|
||||
'';
|
||||
|
||||
systemd.services.hydra-init =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = optional haveLocalDB "postgresql.service";
|
||||
after = optional haveLocalDB "postgresql.service";
|
||||
environment = env;
|
||||
path = [ pkgs.utillinux ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||
};
|
||||
path = [ pkgs.util-linux ];
|
||||
preStart = ''
|
||||
mkdir -p ${baseDir}
|
||||
chown hydra.hydra ${baseDir}
|
||||
chmod 0750 ${baseDir}
|
||||
|
||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/www
|
||||
chown hydra-www.hydra ${baseDir}/www
|
||||
chown hydra-www:hydra ${baseDir}/www
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/queue-runner
|
||||
mkdir -m 0750 -p ${baseDir}/build-logs
|
||||
chown hydra-queue-runner.hydra ${baseDir}/queue-runner ${baseDir}/build-logs
|
||||
mkdir -m 0750 -p ${baseDir}/runcommand-logs
|
||||
chown hydra-queue-runner:hydra \
|
||||
${baseDir}/queue-runner \
|
||||
${baseDir}/build-logs \
|
||||
${baseDir}/runcommand-logs
|
||||
|
||||
${optionalString haveLocalDB ''
|
||||
if ! [ -e ${baseDir}/.db-created ]; then
|
||||
@@ -290,7 +306,7 @@ in
|
||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||
fi
|
||||
|
||||
chown hydra.hydra ${cfg.gcRootsDir}
|
||||
chown hydra:hydra ${cfg.gcRootsDir}
|
||||
chmod 2775 ${cfg.gcRootsDir}
|
||||
'';
|
||||
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
|
||||
@@ -304,7 +320,9 @@ in
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = serverEnv;
|
||||
environment = serverEnv // {
|
||||
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
|
||||
};
|
||||
restartTriggers = [ hydraConf ];
|
||||
serviceConfig =
|
||||
{ ExecStart =
|
||||
@@ -320,12 +338,14 @@ in
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
IN_SYSTEMD = "1"; # to get log severity levels
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v";
|
||||
@@ -344,8 +364,10 @@ in
|
||||
requires = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = with pkgs; [ nettools cfg.package jq ];
|
||||
environment = env;
|
||||
path = with pkgs; [ hostname-debian cfg.package jq ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator";
|
||||
ExecStopPost = "${cfg.package}/bin/hydra-evaluator --unlock";
|
||||
@@ -358,7 +380,9 @@ in
|
||||
systemd.services.hydra-update-gc-roots =
|
||||
{ requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env;
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
|
||||
User = "hydra";
|
||||
@@ -369,7 +393,9 @@ in
|
||||
systemd.services.hydra-send-stats =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env;
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-send-stats hydra-send-stats";
|
||||
User = "hydra";
|
||||
@@ -381,8 +407,10 @@ in
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-notify hydra-notify";
|
||||
@@ -409,12 +437,12 @@ in
|
||||
if [ $(systemctl is-active $service) == active ]; then
|
||||
echo "stopping $service due to lack of free space..."
|
||||
systemctl stop $service
|
||||
date > /var/lib/hydra/.$service-stopped-minspace
|
||||
date > ${baseDir}/.$service-stopped-minspace
|
||||
fi
|
||||
else
|
||||
if [ $spaceleft -gt $(( ($minFreeGB + 10) * 1024**3)) -a \
|
||||
-r /var/lib/hydra/.$service-stopped-minspace ] ; then
|
||||
rm /var/lib/hydra/.$service-stopped-minspace
|
||||
-r ${baseDir}/.$service-stopped-minspace ] ; then
|
||||
rm ${baseDir}/.$service-stopped-minspace
|
||||
echo "restarting $service due to newly available free space..."
|
||||
systemctl start $service
|
||||
fi
|
||||
@@ -430,10 +458,17 @@ in
|
||||
# logs automatically after a step finishes, but this doesn't work
|
||||
# if the queue runner is stopped prematurely.
|
||||
systemd.services.hydra-compress-logs =
|
||||
{ path = [ pkgs.bzip2 ];
|
||||
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||
script =
|
||||
''
|
||||
find /var/lib/hydra/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" || $compression == bzip2 ]]; then
|
||||
compressionCmd=(bzip2)
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compressionCmd=(zstd --rm)
|
||||
fi
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
};
|
||||
4
nixos-modules/meson.build
Normal file
4
nixos-modules/meson.build
Normal file
@@ -0,0 +1,4 @@
|
||||
install_data('hydra.nix',
|
||||
install_dir: get_option('datadir') / 'nix',
|
||||
rename: ['hydra-module.nix'],
|
||||
)
|
||||
306
nixos-tests.nix
Normal file
306
nixos-tests.nix
Normal file
@@ -0,0 +1,306 @@
|
||||
{ forEachSystem, nixpkgs, nixosModules }:
|
||||
|
||||
let
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
nixosModules.hydra
|
||||
nixosModules.hydraTest
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
install = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
notifications = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = { nodes, ... }: ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
gitea = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
server.HTTP_PORT = 3001;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
validate-openapi = forEachSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
}
|
||||
284
package.nix
Normal file
284
package.nix
Normal file
@@ -0,0 +1,284 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fileset
|
||||
|
||||
, rawSrc
|
||||
|
||||
, buildEnv
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nixComponents
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, meson
|
||||
, ninja
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
|
||||
, unzip
|
||||
, libpqxx
|
||||
, top-git
|
||||
, mercurial
|
||||
, darcs
|
||||
, subversion
|
||||
, breezy
|
||||
, openssl
|
||||
, bzip2
|
||||
, libxslt
|
||||
, perl
|
||||
, pixz
|
||||
, boost
|
||||
, postgresql_13
|
||||
, nlohmann_json
|
||||
, prometheus-cpp
|
||||
|
||||
, cacert
|
||||
, foreman
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
, python3
|
||||
|
||||
, openssh
|
||||
, coreutils
|
||||
, findutils
|
||||
, gzip
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
, nix-eval-jobs
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
, cdrkit
|
||||
}:
|
||||
|
||||
let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nixComponents.nix-perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DBIxClassHelpers
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileCopyRecursive
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
]));
|
||||
};
|
||||
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "hydra";
|
||||
inherit version;
|
||||
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./doc
|
||||
./meson.build
|
||||
./nixos-modules
|
||||
./src
|
||||
./t
|
||||
./version.txt
|
||||
./.perlcriticrc
|
||||
]);
|
||||
};
|
||||
|
||||
outputs = [ "out" "doc" ];
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
meson
|
||||
ninja
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nixComponents.nix-cli
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nixComponents.nix-util
|
||||
nixComponents.nix-store
|
||||
nixComponents.nix-main
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
nlohmann_json
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
bzip2
|
||||
darcs
|
||||
foreman
|
||||
top-git
|
||||
mercurial
|
||||
subversion
|
||||
breezy
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
nix-eval-jobs
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
nixComponents.nix-cli
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nixComponents.nix-cli
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
nix-eval-jobs
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
mesonBuildType = "release";
|
||||
|
||||
postPatch = ''
|
||||
patchShebangs .
|
||||
'';
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
mesonCheckFlags = [ "--verbose" ];
|
||||
|
||||
preCheck = ''
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
|
||||
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = {
|
||||
inherit perlDeps;
|
||||
nix = nixComponents.nix-cli;
|
||||
};
|
||||
})
|
||||
@@ -1,6 +1,6 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).shellNix
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# IMPORTANT: if you delete this file your app will not work as
|
||||
# expected. you have been warned
|
||||
use strict;
|
||||
use warnings;
|
||||
use inc::Module::Install;
|
||||
|
||||
name 'Hydra';
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-eval-jobs
|
||||
|
||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
||||
hydra_eval_jobs_LDADD = $(NIX_LIBS)
|
||||
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
|
||||
@@ -1,491 +0,0 @@
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
#include "common-eval-args.hh"
|
||||
#include "flake/flakeref.hh"
|
||||
#include "flake/flake.hh"
|
||||
#include "attr-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-fs-store.hh"
|
||||
|
||||
#include "hydra-config.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
bool dryRun = false;
|
||||
|
||||
MyArgs() : MixCommonArgs("hydra-eval-jobs")
|
||||
{
|
||||
addFlag({
|
||||
.longName = "help",
|
||||
.description = "show usage information",
|
||||
.handler = {[&]() {
|
||||
printHelp(programName, std::cout);
|
||||
throw Exit();
|
||||
}}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "gc-roots-dir",
|
||||
.description = "garbage collector roots directory",
|
||||
.labels = {"path"},
|
||||
.handler = {&gcRootsDir}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "dry-run",
|
||||
.description = "don't create store derivations",
|
||||
.handler = {&dryRun, true}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "flake",
|
||||
.description = "build a flake",
|
||||
.handler = {&flake, true}
|
||||
});
|
||||
|
||||
expectArg("expr", &releaseExpr);
|
||||
}
|
||||
};
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v);
|
||||
if (v.type() == nString)
|
||||
res.push_back(v.string.s);
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(state.forceString(*a->value));
|
||||
}
|
||||
};
|
||||
|
||||
Value * v = drv.queryMeta(name);
|
||||
if (v) rec(*v);
|
||||
|
||||
return concatStringsSep(", ", res);
|
||||
}
|
||||
|
||||
static void worker(
|
||||
EvalState & state,
|
||||
Bindings & autoArgs,
|
||||
AutoCloseFD & to,
|
||||
AutoCloseFD & from)
|
||||
{
|
||||
Value vTop;
|
||||
|
||||
if (myArgs.flake) {
|
||||
using namespace flake;
|
||||
|
||||
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
|
||||
|
||||
auto vFlake = state.allocValue();
|
||||
|
||||
auto lockedFlake = lockFlake(state, flakeRef,
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = false,
|
||||
.allowMutable = false,
|
||||
});
|
||||
|
||||
callFlake(state, lockedFlake, *vFlake);
|
||||
|
||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
||||
state.forceValue(*vOutputs);
|
||||
|
||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
||||
if (!aHydraJobs)
|
||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
||||
if (!aHydraJobs)
|
||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||
|
||||
vTop = *aHydraJobs->value;
|
||||
|
||||
} else {
|
||||
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
|
||||
}
|
||||
|
||||
auto vRoot = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, vTop, *vRoot);
|
||||
|
||||
while (true) {
|
||||
/* Wait for the master to send us a job name. */
|
||||
writeLine(to.get(), "next");
|
||||
|
||||
auto s = readLine(from.get());
|
||||
if (s == "exit") break;
|
||||
if (!hasPrefix(s, "do ")) abort();
|
||||
std::string attrPath(s, 3);
|
||||
|
||||
debug("worker process %d at '%s'", getpid(), attrPath);
|
||||
|
||||
/* Evaluate it and send info back to the master. */
|
||||
nlohmann::json reply;
|
||||
|
||||
try {
|
||||
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
|
||||
|
||||
auto v = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, *vTmp, *v);
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
|
||||
auto drvPath = drv->queryDrvPath();
|
||||
|
||||
nlohmann::json job;
|
||||
|
||||
job["nixName"] = drv->queryName();
|
||||
job["system"] =drv->querySystem();
|
||||
job["drvPath"] = drvPath;
|
||||
job["description"] = drv->queryMetaString("description");
|
||||
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
|
||||
job["homepage"] = drv->queryMetaString("homepage");
|
||||
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
|
||||
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
|
||||
job["timeout"] = drv->queryMetaInt("timeout", 36000);
|
||||
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
|
||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, *a->pos)) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
|
||||
|
||||
PathSet context;
|
||||
state.coerceToString(*a->pos, *a->value, context, true, false);
|
||||
for (auto & i : context)
|
||||
if (i.at(0) == '!') {
|
||||
size_t index = i.find("!", 1);
|
||||
job["constituents"].push_back(string(i, index + 1));
|
||||
}
|
||||
|
||||
state.forceList(*a->value, *a->pos);
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
||||
}
|
||||
}
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & j : outputs)
|
||||
out[j.first] = j.second;
|
||||
job["outputs"] = std::move(out);
|
||||
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
else if (v->type() == nAttrs) {
|
||||
auto attrs = nlohmann::json::array();
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder()) {
|
||||
std::string name(i->name);
|
||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
attrs.push_back(name);
|
||||
}
|
||||
reply["attrs"] = std::move(attrs);
|
||||
}
|
||||
|
||||
else if (v->type() == nNull)
|
||||
;
|
||||
|
||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
||||
|
||||
} catch (EvalError & e) {
|
||||
// Transmits the error we got from the previous evaluation
|
||||
// in the JSON output.
|
||||
reply["error"] = filterANSIEscapes(e.msg(), true);
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError("error: %s", reply["error"]);
|
||||
}
|
||||
|
||||
writeLine(to.get(), reply.dump());
|
||||
|
||||
/* If our RSS exceeds the maximum, exit. The master will
|
||||
start a new process. */
|
||||
struct rusage r;
|
||||
getrusage(RUSAGE_SELF, &r);
|
||||
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
|
||||
}
|
||||
|
||||
writeLine(to.get(), "restart");
|
||||
}
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
/* Prevent undeclared dependencies in the evaluation via
|
||||
$NIX_PATH. */
|
||||
unsetenv("NIX_PATH");
|
||||
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
|
||||
auto config = std::make_unique<HydraConfig>();
|
||||
|
||||
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
|
||||
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
|
||||
|
||||
initNix();
|
||||
initGC();
|
||||
|
||||
myArgs.parseCmdline(argvToStrings(argc, argv));
|
||||
|
||||
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
|
||||
settings.builders = "";
|
||||
|
||||
/* Prevent access to paths outside of the Nix search path and
|
||||
to the environment. */
|
||||
evalSettings.restrictEval = true;
|
||||
|
||||
/* When building a flake, use pure evaluation (no access to
|
||||
'getEnv', 'currentSystem' etc. */
|
||||
evalSettings.pureEval = myArgs.flake;
|
||||
|
||||
if (myArgs.dryRun) settings.readOnlyMode = true;
|
||||
|
||||
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
|
||||
|
||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
||||
|
||||
struct State
|
||||
{
|
||||
std::set<std::string> todo{""};
|
||||
std::set<std::string> active;
|
||||
nlohmann::json jobs;
|
||||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
/* Start a handler thread per worker process. */
|
||||
auto handler = [&]()
|
||||
{
|
||||
try {
|
||||
pid_t pid = -1;
|
||||
AutoCloseFD from, to;
|
||||
|
||||
while (true) {
|
||||
|
||||
/* Start a new worker process if necessary. */
|
||||
if (pid == -1) {
|
||||
Pipe toPipe, fromPipe;
|
||||
toPipe.create();
|
||||
fromPipe.create();
|
||||
pid = startProcess(
|
||||
[&,
|
||||
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
|
||||
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
|
||||
]()
|
||||
{
|
||||
try {
|
||||
EvalState state(myArgs.searchPath, openStore());
|
||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||
worker(state, autoArgs, *to, *from);
|
||||
} catch (std::exception & e) {
|
||||
nlohmann::json err;
|
||||
err["error"] = e.what();
|
||||
writeLine(to->get(), err.dump());
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError("error: %s", err["error"]);
|
||||
}
|
||||
},
|
||||
ProcessOptions { .allowVfork = false });
|
||||
from = std::move(fromPipe.readSide);
|
||||
to = std::move(toPipe.writeSide);
|
||||
debug("created worker process %d", pid);
|
||||
}
|
||||
|
||||
/* Check whether the existing worker process is still there. */
|
||||
auto s = readLine(from.get());
|
||||
if (s == "restart") {
|
||||
pid = -1;
|
||||
continue;
|
||||
} else if (s != "next") {
|
||||
auto json = nlohmann::json::parse(s);
|
||||
throw Error("worker error: %s", (std::string) json["error"]);
|
||||
}
|
||||
|
||||
/* Wait for a job name to become available. */
|
||||
std::string attrPath;
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
auto state(state_.lock());
|
||||
if ((state->todo.empty() && state->active.empty()) || state->exc) {
|
||||
writeLine(to.get(), "exit");
|
||||
return;
|
||||
}
|
||||
if (!state->todo.empty()) {
|
||||
attrPath = *state->todo.begin();
|
||||
state->todo.erase(state->todo.begin());
|
||||
state->active.insert(attrPath);
|
||||
break;
|
||||
} else
|
||||
state.wait(wakeup);
|
||||
}
|
||||
|
||||
/* Tell the worker to evaluate it. */
|
||||
writeLine(to.get(), "do " + attrPath);
|
||||
|
||||
/* Wait for the response. */
|
||||
auto response = nlohmann::json::parse(readLine(from.get()));
|
||||
|
||||
/* Handle the response. */
|
||||
StringSet newAttrs;
|
||||
|
||||
if (response.find("job") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath] = response["job"];
|
||||
}
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.find("error") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath]["error"] = response["error"];
|
||||
}
|
||||
|
||||
/* Add newly discovered job names to the queue. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->active.erase(attrPath);
|
||||
for (auto & s : newAttrs)
|
||||
state->todo.insert(s);
|
||||
wakeup.notify_all();
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
auto state(state_.lock());
|
||||
state->exc = std::current_exception();
|
||||
wakeup.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (size_t i = 0; i < nrWorkers; i++)
|
||||
threads.emplace_back(std::thread(handler));
|
||||
|
||||
for (auto & thread : threads)
|
||||
thread.join();
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (state->exc)
|
||||
std::rethrow_exception(state->exc);
|
||||
|
||||
/* For aggregate jobs that have named consistuents
|
||||
(i.e. constituents that are a job name rather than a
|
||||
derivation), look up the referenced job and add it to the
|
||||
dependencies of the aggregate derivation. */
|
||||
auto store = openStore();
|
||||
|
||||
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
|
||||
auto jobName = i.key();
|
||||
auto & job = i.value();
|
||||
|
||||
auto named = job.find("namedConstituents");
|
||||
if (named == job.end()) continue;
|
||||
|
||||
if (myArgs.dryRun) {
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = state->jobs.find(jobName2);
|
||||
if (job2 == state->jobs.end())
|
||||
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
|
||||
std::string drvPath2 = (*job2)["drvPath"];
|
||||
job["constituents"].push_back(drvPath2);
|
||||
}
|
||||
} else {
|
||||
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
|
||||
auto drv = store->readDerivation(drvPath);
|
||||
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = state->jobs.find(jobName2);
|
||||
if (job2 == state->jobs.end())
|
||||
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
std::string drvName(drvPath.name());
|
||||
assert(hasSuffix(drvName, drvExtension));
|
||||
drvName.resize(drvName.size() - drvExtension.size());
|
||||
auto h = std::get<Hash>(hashDerivationModulo(*store, drv, true));
|
||||
auto outPath = store->makeOutputPath("out", h, drvName);
|
||||
drv.env["out"] = store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput { .output = DerivationOutputInputAddressed { .path = outPath } });
|
||||
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
|
||||
|
||||
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
|
||||
|
||||
job["drvPath"] = newDrvPath;
|
||||
job["outputs"]["out"] = store->printStorePath(outPath);
|
||||
}
|
||||
|
||||
job.erase("namedConstituents");
|
||||
}
|
||||
|
||||
std::cout << state->jobs.dump(2) << "\n";
|
||||
});
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-evaluator
|
||||
|
||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
||||
@@ -1,7 +1,8 @@
|
||||
#include "db.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include "pool.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
#include <nix/util/signals.hh>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -37,7 +38,7 @@ class JobsetId {
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
@@ -233,12 +234,12 @@ struct Evaluator
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) {
|
||||
auto evaluation_res = txn.parameterized
|
||||
auto evaluation_res = txn.exec_params
|
||||
("select id from JobsetEvals "
|
||||
"where jobset_id = $1 "
|
||||
"order by id desc limit 1")
|
||||
(jobset.name.id)
|
||||
.exec();
|
||||
"order by id desc limit 1"
|
||||
,jobset.name.id
|
||||
);
|
||||
|
||||
if (evaluation_res.empty()) {
|
||||
// First evaluation, so allow scheduling.
|
||||
@@ -249,15 +250,15 @@ struct Evaluator
|
||||
|
||||
auto evaluation_id = evaluation_res[0][0].as<int>();
|
||||
|
||||
auto unfinished_build_res = txn.parameterized
|
||||
auto unfinished_build_res = txn.exec_params
|
||||
("select id from Builds "
|
||||
"join JobsetEvalMembers "
|
||||
" on (JobsetEvalMembers.build = Builds.id) "
|
||||
"where JobsetEvalMembers.eval = $1 "
|
||||
" and builds.finished = 0 "
|
||||
" limit 1")
|
||||
(evaluation_id)
|
||||
.exec();
|
||||
" limit 1"
|
||||
,evaluation_id
|
||||
);
|
||||
|
||||
// If the previous evaluation has no unfinished builds
|
||||
// schedule!
|
||||
@@ -366,6 +367,9 @@ struct Evaluator
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
@@ -473,6 +477,9 @@ struct Evaluator
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
|
||||
10
src/hydra-evaluator/meson.build
Normal file
10
src/hydra-evaluator/meson.build
Normal file
@@ -0,0 +1,10 @@
|
||||
hydra_evaluator = executable('hydra-evaluator',
|
||||
'hydra-evaluator.cc',
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,8 +0,0 @@
|
||||
bin_PROGRAMS = hydra-queue-runner
|
||||
|
||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
||||
builder.cc build-result.cc build-remote.cc \
|
||||
build-result.hh counter.hh state.hh db.hh \
|
||||
nar-extractor.cc nar-extractor.hh
|
||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
||||
@@ -5,106 +5,94 @@
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "serve-protocol.hh"
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/path.hh>
|
||||
#include <nix/store/legacy-ssh-store.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include "state.hh"
|
||||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "finally.hh"
|
||||
#include <nix/util/current-process.hh>
|
||||
#include <nix/util/processes.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/ssh.hh>
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/util/url.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct Child
|
||||
bool ::Machine::isLocalhost() const
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
};
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
return storeUri.params.empty() && std::visit(overloaded {
|
||||
[](const StoreReference::Auto &) {
|
||||
return true;
|
||||
},
|
||||
[](const StoreReference::Specified & s) {
|
||||
return
|
||||
(s.scheme == "local" || s.scheme == "unix") ||
|
||||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
|
||||
s.authority == "localhost");
|
||||
},
|
||||
}, storeUri.variant);
|
||||
}
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
child.pid = startProcess([&]() {
|
||||
|
||||
restoreSignals();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
Strings argv;
|
||||
if (machine->isLocalhost()) {
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
}
|
||||
else {
|
||||
pgmName = "ssh";
|
||||
argv = {"ssh", machine->sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||
if (remoteStore != machine->storeUri.params.end()) {
|
||||
command.push_back("--store");
|
||||
command.push_back(escapeShellArgAlways(remoteStore->second));
|
||||
}
|
||||
}
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
child.to = to.writeSide.release();
|
||||
child.from = from.readSide.release();
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
||||
bool useSubstitutes = false)
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
for (auto & path : paths)
|
||||
destStore->computeFSClosure(path, closure);
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
|
||||
/* Send the "query valid paths" command with the "lock" option
|
||||
enabled. This prevents a race where the remote host
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
||||
worker_proto::write(*destStore, to, closure);
|
||||
to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = worker_proto::read(*destStore, from, Phantom<StorePathSet> {});
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
auto sorted = destStore->topoSortPaths(closure);
|
||||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
@@ -112,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
to << cmdImportPaths;
|
||||
destStore->exportPaths(missing, to);
|
||||
to.flush();
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(from) != 1)
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
@@ -153,40 +141,304 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
stepStatus = bsSuccess;
|
||||
isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
default:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||
template <typename T>
|
||||
class SemaphoreReleaser {
|
||||
public:
|
||||
SemaphoreReleaser(T* s) : sem(s) {}
|
||||
~SemaphoreReleaser() { sem->release(); }
|
||||
|
||||
private:
|
||||
T* sem;
|
||||
};
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
string base(step->drvPath.to_string());
|
||||
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
|
||||
AutoDelete autoDelete(result.logFile, false);
|
||||
|
||||
createDirs(dirOf(result.logFile));
|
||||
|
||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
auto storeRef = machine->completeStoreReference();
|
||||
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
|
||||
LegacySSHStoreConfig storeConfig {
|
||||
pSpecified->scheme,
|
||||
pSpecified->authority,
|
||||
storeRef.params
|
||||
};
|
||||
|
||||
auto master = storeConfig.createSSHMaster(
|
||||
false, // no SSH master yet
|
||||
logFD.get());
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
Child child;
|
||||
openConnection(machine, tmpDir, logFD.get(), child);
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child.pid;
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -201,41 +453,33 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
FdSource from(child.from.get());
|
||||
FdSink to(child.to.get());
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += from.read;
|
||||
bytesSent += to.written;
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
/* Handshake. */
|
||||
bool sendDerivation = true;
|
||||
unsigned int remoteVersion;
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
to << SERVE_MAGIC_1 << 0x204;
|
||||
to.flush();
|
||||
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
||||
remoteVersion = readInt(from);
|
||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
||||
// Always send the derivation to localhost, since it's a
|
||||
// no-op anyway but we might not be privileged to use
|
||||
// cmdBuildDerivation (e.g. if we're running in a NixOS
|
||||
// container).
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 1 && !machine->isLocalhost())
|
||||
sendDerivation = false;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
||||
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->storeUri.render());
|
||||
} catch (EndOfFile & e) {
|
||||
child.pid.wait();
|
||||
string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -249,55 +493,12 @@ void State::buildRemote(ref<Store> destStore,
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
StorePathSet inputs;
|
||||
BasicDerivation basicDrv(*step->drv);
|
||||
|
||||
if (sendDerivation)
|
||||
inputs.insert(step->drvPath);
|
||||
else
|
||||
for (auto & p : step->drv->inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & input : step->drv->inputDrvs) {
|
||||
auto drv2 = localStore->readDerivation(input.first);
|
||||
for (auto & name : input.second) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*localStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (localStore != std::shared_ptr<Store>(destStore))
|
||||
copyClosure(ref<Store>(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs);
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (!machine->isLocalhost()) {
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
copyClosureTo(machine->state->sendLock, destStore, from, to, inputs, true);
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
autoDelete.cancel();
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
@@ -309,111 +510,22 @@ void State::buildRemote(ref<Store> destStore,
|
||||
/* Do the build. */
|
||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
machine->sshName);
|
||||
machine->storeUri.render());
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
if (sendDerivation) {
|
||||
to << cmdBuildPaths;
|
||||
worker_proto::write(*localStore, to, StorePathSet{step->drvPath});
|
||||
} else {
|
||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
||||
writeDerivation(to, *localStore, basicDrv);
|
||||
}
|
||||
to << maxSilentTime << buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
||||
to << maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
to << repeats // == build-repeat
|
||||
<< step->isDeterministic; // == enforce-determinism
|
||||
}
|
||||
to.flush();
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.startTime = time(0);
|
||||
int res;
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
res = readInt(from);
|
||||
}
|
||||
result.stopTime = time(0);
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
if (sendDerivation) {
|
||||
if (res) {
|
||||
result.errorMsg = fmt("%s on ‘%s’", readString(from), machine->sshName);
|
||||
if (res == 100) {
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
}
|
||||
else if (res == 101) {
|
||||
result.stepStatus = bsTimedOut;
|
||||
}
|
||||
else {
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
}
|
||||
return;
|
||||
}
|
||||
result.stepStatus = bsSuccess;
|
||||
} else {
|
||||
result.errorMsg = readString(from);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
result.timesBuilt = readInt(from);
|
||||
result.isNonDeterministic = readInt(from);
|
||||
auto start = readInt(from);
|
||||
auto stop = readInt(from);
|
||||
if (start && start) {
|
||||
/* Note: this represents the duration of a single
|
||||
round, rather than all rounds. */
|
||||
result.startTime = start;
|
||||
result.stopTime = stop;
|
||||
}
|
||||
}
|
||||
switch ((BuildResult::Status) res) {
|
||||
case BuildResult::Built:
|
||||
result.stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
result.stepStatus = bsSuccess;
|
||||
result.isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canRetry = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
result.stepStatus = bsTimedOut;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
result.stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
result.stepStatus = bsNotDeterministic;
|
||||
result.canRetry = false;
|
||||
result.canCache = true;
|
||||
break;
|
||||
default:
|
||||
result.stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
|
||||
@@ -421,11 +533,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
get a build log. */
|
||||
if (result.isCached) {
|
||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||
unlink(result.logFile.c_str());
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||
* step, since this requires a DB roundtrip. */
|
||||
if (!localWorkThrottler.try_acquire()) {
|
||||
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||
updateStep(ssWaitingForLocalSlot);
|
||||
localWorkThrottler.acquire();
|
||||
}
|
||||
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||
|
||||
/* Once we've started copying outputs, release the machine reservation
|
||||
* so further builds can happen. We do not release the machine earlier
|
||||
* to avoid situations where the queue runner is bottlenecked on
|
||||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
reservation.reset();
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
@@ -434,39 +567,10 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
size_t totalNarSize = 0;
|
||||
to << cmdQueryPathInfos;
|
||||
worker_proto::write(*localStore, to, outputs);
|
||||
to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(from); // deriver
|
||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
||||
readLongLong(from); // download size
|
||||
auto narSize = readLongLong(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
auto ca = parseContentAddressOpt(readString(from));
|
||||
readStrings<StringSet>(from); // sigs
|
||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore->parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
@@ -475,35 +579,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
/* Copy each path. */
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
||||
to.flush();
|
||||
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
TeeSource tee(from, sink);
|
||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
||||
});
|
||||
|
||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child.to = -1;
|
||||
child.pid.wait();
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
@@ -517,7 +618,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
throw;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/util/source-accessor.hh>
|
||||
|
||||
#include <regex>
|
||||
|
||||
@@ -11,18 +11,18 @@ using namespace nix;
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const Derivation & drv)
|
||||
const OutputPathMap derivationOutputs)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
||||
if (i.second.second) {
|
||||
store->computeFSClosure(*i.second.second, closure);
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
@@ -78,7 +78,7 @@ BuildOutput getBuildOutput(
|
||||
product.type = match[1];
|
||||
product.subtype = match[2];
|
||||
std::string s(match[3]);
|
||||
product.path = s[0] == '"' ? string(s, 1, s.size() - 2) : s;
|
||||
product.path = s[0] == '"' ? std::string(s, 1, s.size() - 2) : s;
|
||||
product.defaultPath = match[5];
|
||||
|
||||
/* Ensure that the path exists and points into the Nix
|
||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
|
||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : drv.outputs) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
BuildProduct product;
|
||||
auto outPath = output.path(*store, drv.name, name);
|
||||
product.path = store->printStorePath(*outPath);
|
||||
product.path = store->printStorePath(output);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = outPath->name();
|
||||
product.name = output.name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
res.releaseName = trim(file->second.contents.value());
|
||||
// FIXME: validate release name
|
||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#include <cmath>
|
||||
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "finally.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/store/binary-cache-store.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
|
||||
}
|
||||
|
||||
|
||||
void State::builder(MachineReservation::ptr reservation)
|
||||
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||
{
|
||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||
|
||||
@@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
activeSteps_.lock()->erase(activeStep);
|
||||
});
|
||||
|
||||
std::string machine = reservation->machine->storeUri.render();
|
||||
|
||||
try {
|
||||
auto destStore = getDestStore();
|
||||
res = doBuildStep(destStore, reservation, activeStep);
|
||||
// Might release the reservation.
|
||||
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(reservation->step->drvPath),
|
||||
reservation->machine->sshName,
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
machine,
|
||||
e.what());
|
||||
}
|
||||
}
|
||||
|
||||
/* Release the machine and wake up the dispatcher. */
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
wakeDispatcher();
|
||||
|
||||
/* If there was a temporary failure, retry the step after an
|
||||
exponentially increasing interval. */
|
||||
Step::ptr step = wstep.lock();
|
||||
@@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
|
||||
|
||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep)
|
||||
{
|
||||
auto & step(reservation->step);
|
||||
auto & machine(reservation->machine);
|
||||
auto step(reservation->step);
|
||||
auto machine(reservation->machine);
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
@@ -98,8 +96,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -134,21 +137,22 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
repeats = std::max(repeats, i->second);
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
maxSilentTime = build->maxSilentTime;
|
||||
buildTimeout = build->buildTimeout;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
bool quit = buildId == buildOne && step->drvPath == *buildDrvPath;
|
||||
if (!buildOneDone)
|
||||
buildOneDone = buildId == buildOne && step->drvPath == *buildDrvPath;
|
||||
|
||||
RemoteResult result;
|
||||
BuildOutput res;
|
||||
@@ -172,7 +176,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -190,7 +194,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -205,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
||||
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
@@ -220,7 +224,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +251,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -255,7 +259,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
issue). Retry a number of times. */
|
||||
if (result.canRetry) {
|
||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||
assert(stepNr);
|
||||
bool retry;
|
||||
{
|
||||
@@ -265,7 +269,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
if (retry) {
|
||||
auto mc = startDbUpdate();
|
||||
stepFinished = true;
|
||||
if (quit) exit(1);
|
||||
if (buildOneDone) exit(1);
|
||||
return sRetry;
|
||||
}
|
||||
}
|
||||
@@ -274,9 +278,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
@@ -322,7 +329,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
@@ -376,7 +383,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
}
|
||||
|
||||
} else
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished, quit);
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished);
|
||||
|
||||
// FIXME: keep stats about aborted steps?
|
||||
nrStepsDone++;
|
||||
@@ -386,7 +393,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
machine->state->totalStepTime += stepStopTime - stepStartTime;
|
||||
machine->state->totalStepBuildTime += result.stopTime - result.startTime;
|
||||
|
||||
if (quit) exit(0); // testing hack; FIXME: this won't run plugins
|
||||
if (buildOneDone) exit(0); // testing hack; FIXME: this won't run plugins
|
||||
|
||||
return sDone;
|
||||
}
|
||||
@@ -397,9 +404,8 @@ void State::failStep(
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished,
|
||||
bool & quit)
|
||||
::Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
directly or indirectly depend on this step. */
|
||||
@@ -444,14 +450,14 @@ void State::failStep(
|
||||
build->finishedInDB)
|
||||
continue;
|
||||
createBuildStep(txn,
|
||||
0, build->id, step, machine ? machine->sshName : "",
|
||||
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||
}
|
||||
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
||||
printError("marking build %1% as failed", build->id);
|
||||
txn.exec_params0
|
||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
@@ -481,7 +487,7 @@ void State::failStep(
|
||||
b->finishedInDB = true;
|
||||
builds_->erase(b->id);
|
||||
dependentIDs.push_back(b->id);
|
||||
if (buildOne == b->id) quit = true;
|
||||
if (!buildOneDone && buildOne == b->id) buildOneDone = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "state.hh"
|
||||
|
||||
@@ -31,34 +32,42 @@ void State::makeRunnable(Step::ptr step)
|
||||
|
||||
void State::dispatcher()
|
||||
{
|
||||
while (true) {
|
||||
printMsg(lvlDebug, "Waiting for the machines parsing to have completed at least once");
|
||||
machinesReadyLock.lock();
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
printMsg(lvlDebug, "dispatcher woken up");
|
||||
nrDispatcherWakeups++;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
auto sleepUntil = doDispatch();
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
prom.dispatcher_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||
|
||||
/* Sleep until we're woken up (either because a runnable build
|
||||
is added, or because a build finishes). */
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
*dispatcherWakeup_ = false;
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.dispatcher_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
||||
printError("dispatcher: %s", e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -78,17 +87,124 @@ system_time State::doDispatch()
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
size_t numRequiredSystemFeatures;
|
||||
size_t numRevDeps;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
|
||||
numRevDeps = step_.rdeps.size();
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
|
||||
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
@@ -96,7 +212,7 @@ system_time State::doDispatch()
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
Machine::ptr machine;
|
||||
::Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
@@ -136,104 +252,6 @@ system_time State::doDispatch()
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
@@ -243,12 +261,14 @@ system_time State::doDispatch()
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
if (!mi.machine->supportsStep(step)) {
|
||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -269,10 +289,12 @@ system_time State::doDispatch()
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
std::make_shared<MachineReservation>(*this, step, mi.machine));
|
||||
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||
builderThread.detach(); // FIXME?
|
||||
|
||||
keepGoing = true;
|
||||
@@ -374,7 +396,6 @@ void State::abortUnsupported()
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
bool stepFinished = false;
|
||||
bool quit = false;
|
||||
|
||||
failStep(
|
||||
*conn, step, build->id,
|
||||
@@ -385,9 +406,9 @@ void State::abortUnsupported()
|
||||
.startTime = now2,
|
||||
.stopTime = now2,
|
||||
},
|
||||
nullptr, stepFinished, quit);
|
||||
nullptr, stepFinished);
|
||||
|
||||
if (quit) exit(1);
|
||||
if (buildOneDone) exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,7 +448,7 @@ void Jobset::pruneSteps()
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "hash.hh"
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include <nix/util/hash.hh>
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
struct BuildProduct
|
||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::Derivation & drv);
|
||||
const nix::OutputPathMap derivationOutputs);
|
||||
@@ -1,32 +1,29 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "remote-store.hh"
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include "globals.hh"
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <nix/util/signals.hh>
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-open.hh>
|
||||
#include <nix/store/remote-store.hh>
|
||||
|
||||
#include <nix/store/globals.hh>
|
||||
#include "hydra-config.hh"
|
||||
#include "json.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "shared.hh"
|
||||
#include <nix/store/s3-binary-cache-store.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
}
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
@@ -36,20 +33,94 @@ std::string getEnvOrDie(const std::string & key)
|
||||
return *value;
|
||||
}
|
||||
|
||||
State::PromMetrics::PromMetrics()
|
||||
: registry(std::make_shared<prometheus::Registry>())
|
||||
, queue_checks_started(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_started_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was started")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_build_loads(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_build_loads_total")
|
||||
.Help("Number of builds loaded")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_steps_created(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_steps_created_total")
|
||||
.Help("Number of steps created")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_early_exits(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_early_exits_total")
|
||||
.Help("Number of times State::getQueuedBuilds() yielded to potential bumps")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_finished(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_finished_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was completed")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||
.Help("Time (in micros) spent running the dispatcher")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||
.Help("Time (in micros) spent running the queue monitor")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
{
|
||||
|
||||
State::State()
|
||||
}
|
||||
|
||||
State::State(std::optional<std::string> metricsAddrOpt)
|
||||
: config(std::make_unique<HydraConfig>())
|
||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
|
||||
, metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"}))
|
||||
{
|
||||
hydraData = getEnvOrDie("HYDRA_DATA");
|
||||
|
||||
logDir = canonPath(hydraData + "/build-logs");
|
||||
|
||||
if (metricsAddrOpt.has_value()) {
|
||||
metricsAddr = metricsAddrOpt.value();
|
||||
}
|
||||
|
||||
/* handle deprecated store specification */
|
||||
if (config->getStrOption("store_mode") != "")
|
||||
throw Error("store_mode in hydra.conf is deprecated, please use store_uri");
|
||||
@@ -86,50 +157,29 @@ void State::parseMachines(const std::string & contents)
|
||||
oldMachines = *machines_;
|
||||
}
|
||||
|
||||
for (auto line : tokenizeString<Strings>(contents, "\n")) {
|
||||
line = trim(string(line, 0, line.find('#')));
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(line);
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
auto machine = std::make_shared<Machine>();
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->sshKey = tokens[2] == "-" ? string("") : tokens[2];
|
||||
if (tokens[3] != "")
|
||||
string2Int(tokens[3], machine->maxJobs);
|
||||
else
|
||||
machine->maxJobs = 1;
|
||||
machine->speedFactor = atof(tokens[4].c_str());
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
for (auto & f : machine->mandatoryFeatures)
|
||||
machine->supportedFeatures.insert(f);
|
||||
if (tokens[7] != "" && tokens[7] != "-")
|
||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
||||
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
auto i = oldMachines.find(machine->storeUri.variant);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||
else
|
||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<Machine::State>()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
newMachines[machine->storeUri.variant] = machine;
|
||||
}
|
||||
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
||||
/* Add a disabled Machine object to make sure stats are
|
||||
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<Machine>(*(m.second));
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
@@ -149,14 +199,16 @@ void State::parseMachines(const std::string & contents)
|
||||
|
||||
void State::monitorMachinesFile()
|
||||
{
|
||||
string defaultMachinesFile = "/etc/nix/machines";
|
||||
std::string defaultMachinesFile = "/etc/nix/machines";
|
||||
auto machinesFiles = tokenizeString<std::vector<Path>>(
|
||||
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
|
||||
|
||||
if (machinesFiles.empty()) {
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1");
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -189,7 +241,7 @@ void State::monitorMachinesFile()
|
||||
|
||||
debug("reloading machines files");
|
||||
|
||||
string contents;
|
||||
std::string contents;
|
||||
for (auto & machinesFile : machinesFiles) {
|
||||
try {
|
||||
contents += readFile(machinesFile);
|
||||
@@ -202,9 +254,15 @@ void State::monitorMachinesFile()
|
||||
parseMachines(contents);
|
||||
};
|
||||
|
||||
auto firstParse = true;
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
readMachinesFiles();
|
||||
if (firstParse) {
|
||||
machinesReadyLock.unlock();
|
||||
firstParse = false;
|
||||
}
|
||||
// FIXME: use inotify.
|
||||
sleep(30);
|
||||
} catch (std::exception & e) {
|
||||
@@ -256,10 +314,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : step->drv->outputs)
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
||||
buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt);
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
@@ -296,11 +357,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
buildId, stepNr, name, localStore->printStorePath(output));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath)
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
@@ -320,7 +393,7 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto
|
||||
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
build->id, stepNr, outputName,
|
||||
build->id, stepNr, outputName,
|
||||
localStore->printStorePath(storePath));
|
||||
|
||||
return stepNr;
|
||||
@@ -401,6 +474,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0);
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec_params0
|
||||
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)
|
||||
);
|
||||
}
|
||||
|
||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||
|
||||
unsigned int productNr = 1;
|
||||
@@ -412,7 +494,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
@@ -480,182 +562,174 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
std::ostringstream out;
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
JSONObject root(out);
|
||||
time_t now = time(0);
|
||||
root.attr("status", "up");
|
||||
root.attr("time", time(0));
|
||||
root.attr("uptime", now - startedAt);
|
||||
root.attr("pid", getpid());
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
root.attr("nrQueuedBuilds", builds_->size());
|
||||
}
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
root.attr("nrUnfinishedSteps", steps_->size());
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
root.attr("nrRunnableSteps", runnable_->size());
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
}
|
||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsStarted", nrStepsStarted);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
root.attr("nrRetries", nrRetries);
|
||||
root.attr("maxNrRetries", maxNrRetries);
|
||||
if (nrStepsDone) {
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
||||
root.attr("nrDbConnections", dbPool.count());
|
||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
||||
|
||||
{
|
||||
auto nested = root.object("machines");
|
||||
auto machines_json = json::object();
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto nested2 = nested.object(m->sshName);
|
||||
nested2.attr("enabled", m->enabled);
|
||||
|
||||
{
|
||||
auto list = nested2.list("systemTypes");
|
||||
for (auto & s : m->systemTypes)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("supportedFeatures");
|
||||
for (auto & s : m->supportedFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("mandatoryFeatures");
|
||||
for (auto & s : m->mandatoryFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
nested2.attr("currentJobs", s->currentJobs);
|
||||
if (s->currentJobs == 0)
|
||||
nested2.attr("idleSince", s->idleSince);
|
||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
||||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
|
||||
auto info(m->state->connectInfo.lock());
|
||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
machines_json[m->storeUri.render()] = machine;
|
||||
}
|
||||
statusJson["machines"] = machines_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("jobsets");
|
||||
auto jobsets_json = json::object();
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("machineTypes");
|
||||
auto machineTypesJson = json::object();
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto nested2 = nested.object(i.first);
|
||||
nested2.attr("runnable", i.second.runnable);
|
||||
nested2.attr("running", i.second.running);
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
if (i.second.runnable > 0)
|
||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto nested = root.object("store");
|
||||
|
||||
auto & stats = store->getStats();
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto nested2 = nested.object("s3");
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
@@ -663,7 +737,7 @@ void State::dumpStatus(Connection & conn)
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
@@ -675,14 +749,14 @@ void State::showStatus()
|
||||
auto conn(dbPool.get());
|
||||
receiver statusDumped(*conn, "status_dumped");
|
||||
|
||||
string status;
|
||||
std::string status;
|
||||
bool barf = false;
|
||||
|
||||
/* Get the last JSON status dump from the database. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<string>();
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
if (status != "") {
|
||||
@@ -702,7 +776,7 @@ void State::showStatus()
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<string>();
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -746,7 +820,19 @@ void State::run(BuildID buildOne)
|
||||
if (!lock)
|
||||
throw Error("hydra-queue-runner is already running");
|
||||
|
||||
Store::Params localParams;
|
||||
std::cout << "Starting the Prometheus exporter on " << metricsAddr << std::endl;
|
||||
|
||||
/* Set up simple exporter, to show that we're still alive. */
|
||||
prometheus::Exposer promExposer{metricsAddr};
|
||||
auto exposerPort = promExposer.GetListeningPorts().front();
|
||||
|
||||
promExposer.RegisterCollectable(prom.registry);
|
||||
|
||||
std::cout << "Started the Prometheus exporter, listening on "
|
||||
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||
<< std::endl;
|
||||
|
||||
Store::Config::Params localParams;
|
||||
localParams["max-connections"] = "16";
|
||||
localParams["max-connection-age"] = "600";
|
||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||
@@ -769,6 +855,7 @@ void State::run(BuildID buildOne)
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
|
||||
machinesReadyLock.lock();
|
||||
std::thread(&State::monitorMachinesFile, this).detach();
|
||||
|
||||
std::thread(&State::queueMonitor, this).detach();
|
||||
@@ -827,10 +914,17 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
@@ -855,6 +949,7 @@ int main(int argc, char * * argv)
|
||||
bool unlock = false;
|
||||
bool status = false;
|
||||
BuildID buildOne = 0;
|
||||
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
||||
|
||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||
if (*arg == "--unlock")
|
||||
@@ -862,17 +957,20 @@ int main(int argc, char * * argv)
|
||||
else if (*arg == "--status")
|
||||
status = true;
|
||||
else if (*arg == "--build-one") {
|
||||
if (!string2Int<BuildID>(getArg(*arg, arg, end), buildOne))
|
||||
if (auto b = string2Int<BuildID>(getArg(*arg, arg, end)))
|
||||
buildOne = *b;
|
||||
else
|
||||
throw Error("‘--build-one’ requires a build ID");
|
||||
} else if (*arg == "--prometheus-address") {
|
||||
metricsAddrOpt = getArg(*arg, arg, end);
|
||||
} else
|
||||
return false;
|
||||
return true;
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
settings.lockCPU = false;
|
||||
|
||||
State state;
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
state.showStatus();
|
||||
else if (unlock)
|
||||
|
||||
24
src/hydra-queue-runner/meson.build
Normal file
24
src/hydra-queue-runner/meson.build
Normal file
@@ -0,0 +1,24 @@
|
||||
srcs = files(
|
||||
'builder.cc',
|
||||
'build-remote.cc',
|
||||
'build-result.cc',
|
||||
'dispatcher.cc',
|
||||
'hydra-queue-runner.cc',
|
||||
'nar-extractor.cc',
|
||||
'queue-monitor.cc',
|
||||
)
|
||||
|
||||
hydra_queue_runner = executable('hydra-queue-runner',
|
||||
'hydra-queue-runner.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_store_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
prom_cpp_core_dep,
|
||||
prom_cpp_pull_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,12 +1,51 @@
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
#include "archive.hh"
|
||||
#include <nix/util/archive.hh>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -15,58 +54,40 @@ struct Extractor : ParseSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
std::filesystem::path prefix;
|
||||
|
||||
Path toKey(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::path p = prefix;
|
||||
// Conditional to avoid trailing slash
|
||||
if (!path.isRoot()) p /= path.rel();
|
||||
return p;
|
||||
}
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(toKey(path), NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
#include "types.hh"
|
||||
#include "serialise.hh"
|
||||
#include "hash.hh"
|
||||
#include <nix/util/source-accessor.hh>
|
||||
#include <nix/util/types.hh>
|
||||
#include <nix/util/serialise.hh>
|
||||
#include <nix/util/hash.hh>
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::FSAccessor::Type type;
|
||||
nix::SourceAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#include "state.hh"
|
||||
#include "build-result.hh"
|
||||
#include "globals.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/globals.hh>
|
||||
#include <nix/store/parsed-derivations.hh>
|
||||
#include <nix/util/thread-pool.hh>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
@@ -10,61 +12,77 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop();
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop()
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
unsigned int lastBuildId = 0;
|
||||
bool quit = false;
|
||||
while (!quit) {
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
while (true) {
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(conn, destStore);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.queue_monitor_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done) {
|
||||
conn->await_notification();
|
||||
if (done && !quit) {
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn->get_notifs();
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||
}
|
||||
if (buildsRestarted.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||
lastBuildId = 0; // check all builds
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(*conn);
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(*conn);
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.queue_monitor_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
||||
@@ -75,45 +93,47 @@ struct PreviousFailure : public std::exception {
|
||||
|
||||
|
||||
bool State::getQueuedBuilds(Connection & conn,
|
||||
ref<Store> destStore, unsigned int & lastBuildId)
|
||||
ref<Store> destStore)
|
||||
{
|
||||
printInfo("checking the queue for builds > %d...", lastBuildId);
|
||||
prom.queue_checks_started.Increment();
|
||||
|
||||
printInfo("checking the queue for builds...");
|
||||
|
||||
/* Grab the queued builds from the database, but don't process
|
||||
them yet (since we don't want a long-running transaction). */
|
||||
std::vector<BuildID> newIDs;
|
||||
std::map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||
|
||||
unsigned int newLastBuildId = lastBuildId;
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
auto res = txn.exec_params
|
||||
("select id, project, jobset, job, drvPath, maxsilent, timeout, timestamp, globalPriority, priority from Builds "
|
||||
"where id > $1 and finished = 0 order by globalPriority desc, id",
|
||||
lastBuildId);
|
||||
("select builds.id, builds.jobset_id, jobsets.project as project, "
|
||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||
"globalPriority, priority from Builds "
|
||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||
"where finished = 0 order by globalPriority desc, random()");
|
||||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (id > newLastBuildId) newLastBuildId = id;
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
auto build = std::make_shared<Build>(
|
||||
localStore->parseStorePath(row["drvPath"].as<string>()));
|
||||
localStore->parseStorePath(row["drvPath"].as<std::string>()));
|
||||
build->id = id;
|
||||
build->projectName = row["project"].as<string>();
|
||||
build->jobsetName = row["jobset"].as<string>();
|
||||
build->jobName = row["job"].as<string>();
|
||||
build->jobsetId = row["jobset_id"].as<JobsetID>();
|
||||
build->projectName = row["project"].as<std::string>();
|
||||
build->jobsetName = row["jobset"].as<std::string>();
|
||||
build->jobName = row["job"].as<std::string>();
|
||||
build->maxSilentTime = row["maxsilent"].as<int>();
|
||||
build->buildTimeout = row["timeout"].as<int>();
|
||||
build->timestamp = row["timestamp"].as<time_t>();
|
||||
build->globalPriority = row["globalPriority"].as<int>();
|
||||
build->localPriority = row["priority"].as<int>();
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName);
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName, build->jobsetId);
|
||||
|
||||
newIDs.push_back(id);
|
||||
newBuildsByID[id] = build;
|
||||
@@ -127,13 +147,14 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
std::set<StorePath> finishedDrvs;
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
@@ -160,6 +181,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Some step previously failed, so mark the build as
|
||||
failed right away. */
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlError, "marking build %d as cached failure due to ‘%s’",
|
||||
build->id, localStore->printStorePath(ex.step->drvPath));
|
||||
if (!build->finishedInDB) {
|
||||
@@ -176,15 +198,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second) {
|
||||
auto res = txn.exec_params
|
||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
localStore->printStorePath(*i.second.second));
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec_params(
|
||||
std::string { common } + " and path = $1",
|
||||
localStore->printStorePath(*optOutputPath))
|
||||
: txn.exec_params(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
localStore->printStorePath(ex.step->drvPath),
|
||||
outputName);
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -220,17 +246,16 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
auto drv = localStore->readDerivation(build->drvPath);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
|
||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
time_t now = time(0);
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||
markSucceededBuild(txn, build, res, true, now, now);
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
@@ -275,7 +300,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
@@ -285,18 +310,23 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
if (buildOne && newRunnable.size() == 0) buildOneDone = true;
|
||||
|
||||
nrBuildsRead += nrAdded;
|
||||
|
||||
/* Stop after a certain time to allow priority bumps to be
|
||||
processed. */
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) break;
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
|
||||
prom.queue_checks_finished.Increment();
|
||||
return newBuildsByID.empty();
|
||||
}
|
||||
|
||||
@@ -334,13 +364,13 @@ void State::processQueueChange(Connection & conn)
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
@@ -375,6 +405,34 @@ void State::processQueueChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||
ref<Store> destStore,
|
||||
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||
{
|
||||
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||
ThreadPool tp;
|
||||
|
||||
for (auto & [output, maybeOutputPath] : paths) {
|
||||
if (!maybeOutputPath) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
} else {
|
||||
tp.enqueue([&] {
|
||||
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
tp.process();
|
||||
|
||||
auto missing(missing_.lock());
|
||||
return *missing;
|
||||
}
|
||||
|
||||
|
||||
Step::ptr State::createStep(ref<Store> destStore,
|
||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
@@ -424,6 +482,8 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (!isNew) return step;
|
||||
|
||||
prom.queue_steps_created.Increment();
|
||||
|
||||
printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Initialize the step. Note that the step may be visible in
|
||||
@@ -431,17 +491,23 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
it's not runnable yet, and other threads won't make it
|
||||
runnable while step->created == false. */
|
||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
|
||||
{
|
||||
auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
|
||||
try {
|
||||
step->drvOptions = std::make_unique<DerivationOptions>(
|
||||
DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
|
||||
step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
|
||||
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
||||
StringSet features;
|
||||
if (i != step->drv->env.end())
|
||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
||||
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
@@ -455,26 +521,40 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
bool valid = true;
|
||||
DerivationOutputs missing;
|
||||
for (auto & i : step->drv->outputs)
|
||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
||||
valid = false;
|
||||
missing.insert_or_assign(i.first, i.second);
|
||||
}
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
bool valid = missing.empty();
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
@@ -482,38 +562,43 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(*path))
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(*path);
|
||||
localStore->ensurePath(path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(ref<Store>(localStore), destStore, {*path});
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
@@ -533,7 +618,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs) {
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
@@ -558,7 +643,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
|
||||
Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName)
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID jobsetID)
|
||||
{
|
||||
auto p = std::make_pair(projectName, jobsetName);
|
||||
|
||||
@@ -569,9 +654,8 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
}
|
||||
|
||||
auto res = txn.exec_params1
|
||||
("select schedulingShares from Jobsets where project = $1 and name = $2",
|
||||
projectName,
|
||||
jobsetName);
|
||||
("select schedulingShares from Jobsets where id = $1",
|
||||
jobsetID);
|
||||
if (res.empty()) throw Error("missing jobset - can't happen");
|
||||
|
||||
auto shares = res["schedulingShares"].as<unsigned int>();
|
||||
@@ -582,10 +666,9 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
/* Load the build steps from the last 24 hours. */
|
||||
auto res2 = txn.exec_params
|
||||
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||
"where s.startTime is not null and s.stopTime > $1 and project = $2 and jobset = $3",
|
||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||
time(0) - Jobset::schedulingWindow * 10,
|
||||
projectName,
|
||||
jobsetName);
|
||||
jobsetID);
|
||||
for (auto const & row : res2) {
|
||||
time_t startTime = row["startTime"].as<time_t>();
|
||||
time_t stopTime = row["stopTime"].as<time_t>();
|
||||
@@ -607,28 +690,30 @@ void State::processJobsetSharesChange(Connection & conn)
|
||||
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
|
||||
for (auto const & row : res) {
|
||||
auto jobsets_(jobsets.lock());
|
||||
auto i = jobsets_->find(std::make_pair(row["project"].as<string>(), row["name"].as<string>()));
|
||||
auto i = jobsets_->find(std::make_pair(row["project"].as<std::string>(), row["name"].as<std::string>()));
|
||||
if (i == jobsets_->end()) continue;
|
||||
i->second->setShares(row["schedulingShares"].as<unsigned int>());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
auto r = txn.exec_params
|
||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(*output.second));
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
@@ -651,7 +736,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
@@ -678,5 +763,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, drv);
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
}
|
||||
|
||||
@@ -6,19 +6,33 @@
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
#include <prometheus/registry.h>
|
||||
|
||||
#include "db.hh"
|
||||
|
||||
#include "parsed-derivations.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "pool.hh"
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/derivation-options.hh>
|
||||
#include <nix/store/pathlocks.hh>
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/sync.hh>
|
||||
#include "nar-extractor.hh"
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/serve-protocol-connection.hh>
|
||||
#include <nix/store/machines.hh>
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
|
||||
typedef unsigned int JobsetID;
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
||||
|
||||
typedef std::atomic<unsigned long> counter;
|
||||
@@ -46,6 +60,7 @@ typedef enum {
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
ssBuilding = 30,
|
||||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
@@ -70,6 +85,8 @@ struct RemoteResult
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
@@ -123,6 +140,7 @@ struct Build
|
||||
BuildID id;
|
||||
nix::StorePath drvPath;
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
JobsetID jobsetId;
|
||||
std::string projectName, jobsetName, jobName;
|
||||
time_t timestamp;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
@@ -153,8 +171,8 @@ struct Step
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
|
||||
std::set<std::string> requiredSystemFeatures;
|
||||
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||
nix::StringSet requiredSystemFeatures;
|
||||
bool preferLocalBuild;
|
||||
bool isDeterministic;
|
||||
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
||||
@@ -222,18 +240,10 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
bool enabled{true};
|
||||
|
||||
std::string sshName, sshKey;
|
||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
||||
unsigned int maxJobs = 1;
|
||||
float speedFactor = 1.0;
|
||||
std::string sshPublicHostKey;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
counter currentJobs{0};
|
||||
@@ -283,10 +293,13 @@ struct Machine
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isLocalhost()
|
||||
{
|
||||
return sshName == "localhost";
|
||||
}
|
||||
bool isLocalhost() const;
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -339,9 +352,14 @@ private:
|
||||
nix::Pool<Connection> dbPool;
|
||||
|
||||
/* The build machines. */
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||
|
||||
/* Various stats. */
|
||||
time_t startedAt;
|
||||
counter nrBuildsRead{0};
|
||||
@@ -351,6 +369,7 @@ private:
|
||||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsWaitingForDownloadSlot{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
counter nrUnsupportedSteps{0};
|
||||
@@ -367,6 +386,7 @@ private:
|
||||
|
||||
/* Specific build to do for --build-one (testing only). */
|
||||
BuildID buildOne;
|
||||
bool buildOneDone = false;
|
||||
|
||||
/* Statistics per machine type for the Hydra auto-scaler. */
|
||||
struct MachineType
|
||||
@@ -380,7 +400,6 @@ private:
|
||||
|
||||
struct MachineReservation
|
||||
{
|
||||
typedef std::shared_ptr<MachineReservation> ptr;
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
@@ -418,7 +437,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -427,8 +446,30 @@ private:
|
||||
via gc_roots_dir. */
|
||||
nix::Path rootsDir;
|
||||
|
||||
std::string metricsAddr;
|
||||
|
||||
struct PromMetrics
|
||||
{
|
||||
std::shared_ptr<prometheus::Registry> registry;
|
||||
|
||||
prometheus::Counter& queue_checks_started;
|
||||
prometheus::Counter& queue_build_loads;
|
||||
prometheus::Counter& queue_steps_created;
|
||||
prometheus::Counter& queue_checks_early_exits;
|
||||
prometheus::Counter& queue_checks_finished;
|
||||
|
||||
prometheus::Counter& dispatcher_time_spent_running;
|
||||
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||
|
||||
prometheus::Counter& queue_monitor_time_spent_running;
|
||||
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||
|
||||
PromMetrics();
|
||||
};
|
||||
PromMetrics prom;
|
||||
|
||||
public:
|
||||
State();
|
||||
State(std::optional<std::string> metricsAddrOpt);
|
||||
|
||||
private:
|
||||
|
||||
@@ -456,23 +497,28 @@ private:
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop();
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
|
||||
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||
|
||||
/* Handle cancellation, deletion and priority bumps. */
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::Derivation & drv);
|
||||
const nix::StorePath & drvPath);
|
||||
|
||||
/* Returns paths missing from the remote store. Paths are processed in
|
||||
* parallel to work around the possible latency of remote stores. */
|
||||
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||
nix::ref<nix::Store> destStore,
|
||||
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
@@ -485,11 +531,10 @@ private:
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished,
|
||||
bool & quit);
|
||||
bool & stepFinished);
|
||||
|
||||
Jobset::ptr createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName);
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID);
|
||||
|
||||
void processJobsetSharesChange(Connection & conn);
|
||||
|
||||
@@ -504,19 +549,19 @@ private:
|
||||
|
||||
void abortUnsupported();
|
||||
|
||||
void builder(MachineReservation::ptr reservation);
|
||||
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||
|
||||
/* Perform the given build step. Return true if the step is to be
|
||||
retried. */
|
||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||
MachineReservation::ptr reservation,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep);
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
||||
unsigned int repeats,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
@@ -539,6 +584,8 @@ private:
|
||||
|
||||
void addRoot(const nix::StorePath & storePath);
|
||||
|
||||
void runMetricsExporter();
|
||||
|
||||
public:
|
||||
|
||||
void showStatus();
|
||||
|
||||
@@ -6,9 +6,9 @@ use parent 'Catalyst';
|
||||
use Moose;
|
||||
use Hydra::Plugin;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Catalyst::Runtime '5.70';
|
||||
use Catalyst qw/ConfigLoader
|
||||
Unicode::Encoding
|
||||
Static::Simple
|
||||
StackTrace
|
||||
Authentication
|
||||
@@ -16,10 +16,10 @@ use Catalyst qw/ConfigLoader
|
||||
Session
|
||||
Session::Store::FastMmap
|
||||
Session::State::Cookie
|
||||
Captcha/,
|
||||
Captcha
|
||||
PrometheusTiny/,
|
||||
'-Log=warn,fatal,error';
|
||||
use CatalystX::RoleApplicator;
|
||||
use YAML qw(LoadFile);
|
||||
use Path::Class 'file';
|
||||
|
||||
our $VERSION = '0.01';
|
||||
@@ -27,27 +27,31 @@ our $VERSION = '0.01';
|
||||
__PACKAGE__->config(
|
||||
name => 'Hydra',
|
||||
default_view => "TT",
|
||||
authentication => {
|
||||
'Plugin::Authentication' => {
|
||||
default_realm => "dbic",
|
||||
realms => {
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "hashed",
|
||||
password_hash_type => "SHA-1",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
},
|
||||
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "self_check",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
},
|
||||
ldap => $ENV{'HYDRA_LDAP_CONFIG'} ? LoadFile(
|
||||
file($ENV{'HYDRA_LDAP_CONFIG'})
|
||||
) : undef
|
||||
},
|
||||
ldap => getLDAPConfigAmbient()->{'config'}
|
||||
},
|
||||
'Plugin::ConfigLoader' => {
|
||||
driver => {
|
||||
'General' => \%Hydra::Config::configGeneralOpts
|
||||
}
|
||||
},
|
||||
'Plugin::PrometheusTiny' => {
|
||||
include_action_labels => 1,
|
||||
},
|
||||
'Plugin::Static::Simple' => {
|
||||
send_etag => 1,
|
||||
|
||||
@@ -3,8 +3,7 @@ package Hydra::Base::Controller::NixChannel;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::MoreUtils qw(any);
|
||||
use Nix::Store;
|
||||
use List::SomeUtils qw(any);
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !isValidPath($outPath);
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
|
||||
@@ -5,10 +5,15 @@ use strict;
|
||||
use warnings;
|
||||
|
||||
use base 'DBIx::Class';
|
||||
use JSON::MaybeXS;
|
||||
|
||||
sub TO_JSON {
|
||||
my $self = shift;
|
||||
|
||||
if ($self->can("as_json")) {
|
||||
return $self->as_json();
|
||||
}
|
||||
|
||||
my $hint = $self->json_hint;
|
||||
|
||||
my %json = ();
|
||||
@@ -17,6 +22,14 @@ sub TO_JSON {
|
||||
$json{$column} = $self->get_column($column);
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{string_columns}}) {
|
||||
$json{$column} = $self->get_column($column) // "";
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{boolean_columns}}) {
|
||||
$json{$column} = $self->get_column($column) ? JSON::MaybeXS::true : JSON::MaybeXS::false;
|
||||
}
|
||||
|
||||
foreach my $relname (keys %{$hint->{relations}}) {
|
||||
my $key = $hint->{relations}->{$relname};
|
||||
$json{$relname} = [ map { $_->$key } $self->$relname ];
|
||||
|
||||
168
src/lib/Hydra/Config.pm
Normal file
168
src/lib/Hydra/Config.pm
Normal file
@@ -0,0 +1,168 @@
|
||||
package Hydra::Config;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Config::General;
|
||||
use List::SomeUtils qw(none);
|
||||
use YAML qw(LoadFile);
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
getHydraConfig
|
||||
getLDAPConfig
|
||||
getLDAPConfigAmbient
|
||||
);
|
||||
|
||||
our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1);
|
||||
|
||||
my $hydraConfigCache;
|
||||
|
||||
sub getHydraConfig {
|
||||
return $hydraConfigCache if defined $hydraConfigCache;
|
||||
|
||||
my $conf;
|
||||
|
||||
if ($ENV{"HYDRA_CONFIG"}) {
|
||||
$conf = $ENV{"HYDRA_CONFIG"};
|
||||
} else {
|
||||
require Hydra::Model::DB;
|
||||
$conf = Hydra::Model::DB::getHydraPath() . "/hydra.conf"
|
||||
};
|
||||
|
||||
if (-f $conf) {
|
||||
$hydraConfigCache = loadConfig($conf);
|
||||
} else {
|
||||
$hydraConfigCache = {};
|
||||
}
|
||||
|
||||
return $hydraConfigCache;
|
||||
}
|
||||
|
||||
sub loadConfig {
|
||||
my ($sourceFile) = @_;
|
||||
|
||||
my %opts = (%configGeneralOpts, -ConfigFile => $sourceFile);
|
||||
|
||||
return { Config::General->new(%opts)->getall };
|
||||
}
|
||||
|
||||
sub is_ldap_in_legacy_mode {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $legacy_defined = defined $env{"HYDRA_LDAP_CONFIG"};
|
||||
|
||||
if (defined $config->{"ldap"}) {
|
||||
if ($legacy_defined) {
|
||||
die "The legacy environment variable HYDRA_LDAP_CONFIG is set, but config is also specified in hydra.conf. Please unset the environment variable.";
|
||||
}
|
||||
|
||||
return 0;
|
||||
} elsif ($legacy_defined) {
|
||||
warn "Hydra is configured to use LDAP via the HYDRA_LDAP_CONFIG, a deprecated method. Please see the docs about configuring LDAP in the hydra.conf.";
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
sub getLDAPConfigAmbient {
|
||||
return getLDAPConfig(getHydraConfig(), %ENV);
|
||||
}
|
||||
|
||||
sub getLDAPConfig {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $ldap_config;
|
||||
|
||||
if (is_ldap_in_legacy_mode($config, %env)) {
|
||||
$ldap_config = get_legacy_ldap_config($env{"HYDRA_LDAP_CONFIG"});
|
||||
} else {
|
||||
$ldap_config = $config->{"ldap"};
|
||||
}
|
||||
|
||||
$ldap_config->{"role_mapping"} = normalize_ldap_role_mappings($ldap_config->{"role_mapping"});
|
||||
|
||||
return $ldap_config;
|
||||
}
|
||||
|
||||
sub get_legacy_ldap_config {
|
||||
my ($ldap_yaml_file) = @_;
|
||||
|
||||
return {
|
||||
config => LoadFile($ldap_yaml_file),
|
||||
role_mapping => {
|
||||
"hydra_admin" => [ "admin" ],
|
||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub normalize_ldap_role_mappings {
|
||||
my ($input_map) = @_;
|
||||
|
||||
my $mapping = {};
|
||||
|
||||
my @errors;
|
||||
|
||||
for my $group (keys %{$input_map}) {
|
||||
my $input = $input_map->{$group};
|
||||
|
||||
if (ref $input eq "ARRAY") {
|
||||
$mapping->{$group} = $input;
|
||||
} elsif (ref $input eq "") {
|
||||
$mapping->{$group} = [ $input ];
|
||||
} else {
|
||||
push @errors, "On group '$group': the value is of type ${\ref $input}. Only strings and lists are acceptable.";
|
||||
$mapping->{$group} = [ ];
|
||||
}
|
||||
|
||||
eval {
|
||||
validate_roles($mapping->{$group});
|
||||
};
|
||||
if ($@) {
|
||||
push @errors, "On group '$group': $@";
|
||||
}
|
||||
}
|
||||
|
||||
if (@errors) {
|
||||
die "Failed to normalize LDAP role mappings:\n" . (join "\n", @errors);
|
||||
}
|
||||
|
||||
return $mapping;
|
||||
}
|
||||
|
||||
sub validate_roles {
|
||||
my ($roles) = @_;
|
||||
|
||||
my @invalid;
|
||||
my $valid = valid_roles();
|
||||
|
||||
for my $role (@$roles) {
|
||||
if (none { $_ eq $role } @$valid) {
|
||||
push @invalid, "'$role'";
|
||||
}
|
||||
}
|
||||
|
||||
if (@invalid) {
|
||||
die "Invalid roles: ${\join ', ', @invalid}. Valid roles are: ${\join ', ', @$valid}.";
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub valid_roles {
|
||||
return [
|
||||
"admin",
|
||||
"bump-to-front",
|
||||
"cancel-build",
|
||||
"create-projects",
|
||||
"eval-jobset",
|
||||
"restart-jobs",
|
||||
];
|
||||
}
|
||||
|
||||
1;
|
||||
@@ -7,12 +7,10 @@ use base 'Hydra::Base::Controller::REST';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Controller::Project;
|
||||
use JSON;
|
||||
use JSON::Any;
|
||||
use JSON::MaybeXS;
|
||||
use DateTime;
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use Text::Diff;
|
||||
use File::Slurp;
|
||||
use IPC::Run qw(run);
|
||||
|
||||
|
||||
@@ -26,8 +24,8 @@ sub buildToHash {
|
||||
my ($build) = @_;
|
||||
my $result = {
|
||||
id => $build->id,
|
||||
project => $build->get_column("project"),
|
||||
jobset => $build->get_column("jobset"),
|
||||
project => $build->jobset->get_column("project"),
|
||||
jobset => $build->jobset->get_column("name"),
|
||||
job => $build->get_column("job"),
|
||||
system => $build->system,
|
||||
nixname => $build->nixname,
|
||||
@@ -56,18 +54,24 @@ sub latestbuilds : Chained('api') PathPart('latestbuilds') Args(0) {
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
my @latest = $c->model('DB::Builds')->search($filter, {rows => $nr, order_by => ["id DESC"] });
|
||||
my @latest = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
rows => $nr,
|
||||
order_by => ["id DESC"],
|
||||
join => [ "jobset" ]
|
||||
});
|
||||
|
||||
my @list;
|
||||
push @list, buildToHash($_) foreach @latest;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -88,7 +92,7 @@ sub jobsetToHash {
|
||||
triggertime => $jobset->triggertime,
|
||||
fetcherrormsg => $jobset->fetcherrormsg,
|
||||
errortime => $jobset->errortime,
|
||||
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::true : JSON::false
|
||||
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::MaybeXS::true : JSON::MaybeXS::false
|
||||
};
|
||||
}
|
||||
|
||||
@@ -108,7 +112,7 @@ sub jobsets : Chained('api') PathPart('jobsets') Args(0) {
|
||||
push @list, jobsetToHash($_) foreach @jobsets;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -126,7 +130,7 @@ sub queue : Chained('api') PathPart('queue') Args(0) {
|
||||
push @list, buildToHash($_) foreach @builds;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
data => scalar (encode_json(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -156,21 +160,31 @@ sub nrbuilds : Chained('api') PathPart('nrbuilds') Args(0) {
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
$base = 60*60 if($period eq "hour");
|
||||
$base = 24*60*60 if($period eq "day");
|
||||
|
||||
my @stats = $c->model('DB::Builds')->search($filter, {select => [{ count => "*" }], as => ["nr"], group_by => ["timestamp - timestamp % $base"], order_by => "timestamp - timestamp % $base DESC", rows => $nr});
|
||||
my @stats = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
select => [{ count => "*" }],
|
||||
as => ["nr"],
|
||||
group_by => ["timestamp - timestamp % $base"],
|
||||
order_by => "timestamp - timestamp % $base DESC",
|
||||
rows => $nr,
|
||||
join => [ "jobset" ]
|
||||
}
|
||||
);
|
||||
my @arr;
|
||||
push @arr, int($_->get_column("nr")) foreach @stats;
|
||||
@arr = reverse(@arr);
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@arr))
|
||||
data => scalar (encode_json(\@arr))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -202,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||
} elsif ($type eq "git") {
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
die if ! -d $clonePath;
|
||||
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||
@@ -225,6 +239,8 @@ sub triggerJobset {
|
||||
sub push : Chained('api') PathPart('push') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requirePost($c);
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
@@ -232,17 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
foreach my $s (@jobsets) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
||||
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ]
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
triggerJobset($self, $c, $jobset, $force)
|
||||
}
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
@@ -251,7 +274,6 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
@@ -270,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
@@ -6,7 +6,6 @@ use base 'Catalyst::Controller';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Data::Dump qw(dump);
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Config::General;
|
||||
|
||||
|
||||
@@ -33,7 +32,7 @@ sub machines : Chained('admin') PathPart('machines') Args(0) {
|
||||
|
||||
sub clear_queue_non_current : Chained('admin') PathPart('clear-queue-non-current') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $builds = $c->model('DB::Builds')->search(
|
||||
my $builds = $c->model('DB::Builds')->search_rs(
|
||||
{ id => { -in => \ "select id from Builds where id in ((select id from Builds where finished = 0) except (select build from JobsetEvalMembers where eval in (select max(id) from JobsetEvals where hasNewBuilds = 1 group by jobset_id)))" }
|
||||
});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $builds);
|
||||
|
||||
@@ -7,16 +7,15 @@ use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Basename;
|
||||
use File::LibMagic;
|
||||
use File::stat;
|
||||
use File::Slurp;
|
||||
use Data::Dump qw(dump);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use List::MoreUtils qw(all);
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use MIME::Types;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
my ($self, $c, $id) = @_;
|
||||
@@ -38,6 +37,18 @@ sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
$c->stash->{project} = $c->stash->{build}->project;
|
||||
$c->stash->{jobset} = $c->stash->{build}->jobset;
|
||||
$c->stash->{job} = $c->stash->{build}->job;
|
||||
$c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})];
|
||||
|
||||
$c->stash->{runcommandlogProblem} = undef;
|
||||
if ($c->stash->{job} =~ qr/^runCommandHook\..*/) {
|
||||
if (!$c->config->{dynamicruncommand}->{enable}) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-server";
|
||||
} elsif (!$c->stash->{project}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-project";
|
||||
} elsif (!$c->stash->{jobset}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-jobset";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -66,14 +77,16 @@ sub build_GET {
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or die;
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
@@ -81,26 +94,6 @@ sub build_GET {
|
||||
}
|
||||
}
|
||||
|
||||
if ($build->finished) {
|
||||
$c->stash->{prevBuilds} = [$c->model('DB::Builds')->search(
|
||||
{ project => $c->stash->{project}->name
|
||||
, jobset => $c->stash->{jobset}->name
|
||||
, job => $c->stash->{job}
|
||||
, 'me.system' => $build->system
|
||||
, finished => 1
|
||||
, buildstatus => 0
|
||||
, 'me.id' => { '<=' => $build->id }
|
||||
}
|
||||
, { join => "actualBuildStep"
|
||||
, "+select" => ["actualBuildStep.stoptime - actualBuildStep.starttime"]
|
||||
, "+as" => ["actualBuildTime"]
|
||||
, order_by => "me.id DESC"
|
||||
, rows => 50
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
||||
# Get the first eval of which this build was a part.
|
||||
($c->stash->{nrEvals}) = $build->jobsetevals->search({ hasnewbuilds => 1 })->count;
|
||||
$c->stash->{eval} = getFirstEval($build);
|
||||
@@ -124,6 +117,19 @@ sub build_GET {
|
||||
$c->stash->{binaryCachePublicUri} = $c->config->{binary_cache_public_uri};
|
||||
}
|
||||
|
||||
sub constituents :Chained('buildChain') :PathPart('constituents') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub constituents_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [$build->constituents_->search({}, {order_by => ["job"]})]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
my ($self, $c, $stepnr, $mode) = @_;
|
||||
@@ -133,23 +139,35 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
showLog($c, $mode, $step->busy == 0, $step->drvpath);
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
|
||||
sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
showLog($c, $mode, $c->stash->{build}->finished,
|
||||
$c->stash->{build}->drvpath);
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
|
||||
sub view_runcommandlog : Chained('buildChain') PathPart('runcommandlog') {
|
||||
my ($self, $c, $uuid, $mode) = @_;
|
||||
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("runcommandlog"), $uuid);
|
||||
showLog($c, $mode, $log_uri);
|
||||
$c->stash->{template} = 'runcommand-log.tt';
|
||||
$c->stash->{runcommandlog} = $c->stash->{build}->runcommandlogs->find({ uuid => $uuid });
|
||||
}
|
||||
|
||||
|
||||
sub showLog {
|
||||
my ($c, $mode, $finished, $drvPath) = @_;
|
||||
my ($c, $mode, $log_uri) = @_;
|
||||
$mode //= "pretty";
|
||||
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
|
||||
if ($mode eq "pretty") {
|
||||
$c->stash->{log_uri} = $log_uri;
|
||||
$c->stash->{template} = 'log.tt';
|
||||
@@ -216,17 +234,24 @@ sub serveFile {
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"cat-store", "--store", getStoreUri(), "$path"]) };
|
||||
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple.
|
||||
# Detect MIME type.
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
@@ -272,29 +297,7 @@ sub download : Chained('buildChain') PathPart {
|
||||
my $path = $product->path;
|
||||
$path .= "/" . join("/", @path) if scalar @path > 0;
|
||||
|
||||
if (isLocalStore) {
|
||||
|
||||
notFound($c, "File '" . $product->path . "' does not exist.") unless -e $product->path;
|
||||
|
||||
# Make sure the file is in the Nix store.
|
||||
$path = checkPath($self, $c, $path);
|
||||
|
||||
# If this is a directory but no "/" is attached, then redirect.
|
||||
if (-d $path && substr($c->request->uri, -1) ne "/") {
|
||||
return $c->res->redirect($c->request->uri . "/");
|
||||
}
|
||||
|
||||
$path = "$path/index.html" if -d $path && -e "$path/index.html";
|
||||
|
||||
notFound($c, "File '$path' does not exist.") if !-e $path;
|
||||
|
||||
notFound($c, "Path '$path' is a directory.") if -d $path;
|
||||
|
||||
$c->serve_static_file($path);
|
||||
|
||||
} else {
|
||||
serveFile($c, $path);
|
||||
}
|
||||
serveFile($c, $path);
|
||||
|
||||
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
||||
}
|
||||
@@ -307,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
@@ -350,7 +353,7 @@ sub contents : Chained('buildChain') PathPart Args(1) {
|
||||
|
||||
# FIXME: don't use shell invocations below.
|
||||
|
||||
# FIXME: use nix cat-store
|
||||
# FIXME: use nix store cat
|
||||
|
||||
my $res;
|
||||
|
||||
@@ -424,7 +427,7 @@ sub getDependencyGraph {
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref (queryReferences($path)) {
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
@@ -432,7 +435,7 @@ sub getDependencyGraph {
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse topoSortPaths(@refs);
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
}
|
||||
|
||||
@@ -445,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
@@ -460,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
@@ -480,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless isValidPath($out->path);
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -495,7 +498,7 @@ sub restart : Chained('buildChain') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
requireRestartPrivileges($c, $build->project);
|
||||
my $n = restartBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search({ id => $build->id }));
|
||||
my $n = restartBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be restarted.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been restarted.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
@@ -506,7 +509,7 @@ sub cancel : Chained('buildChain') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
requireCancelBuildPrivileges($c, $build->project);
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search({ id => $build->id }));
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be cancelled.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been cancelled.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
@@ -578,7 +581,7 @@ sub evals : Chained('buildChain') PathPart('evals') Args(0) {
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage)
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage)
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -22,8 +22,7 @@ sub channel : Chained('/') PathPart('channel/custom') CaptureArgs(3) {
|
||||
|
||||
my $lastSuccessful = $c->model('DB::Builds')->find(
|
||||
{ 'eval.hasnewbuilds' => 1
|
||||
, project => $projectName
|
||||
, jobset => $jobsetName
|
||||
, jobset_id => $c->stash->{jobset}->id,
|
||||
, job => $channelName
|
||||
, buildstatus => 0
|
||||
},
|
||||
|
||||
@@ -6,6 +6,7 @@ use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use JSON::MaybeXS;
|
||||
use Net::Prometheus;
|
||||
|
||||
sub job : Chained('/') PathPart('job') CaptureArgs(3) {
|
||||
@@ -50,7 +51,7 @@ sub shield :Chained('job') PathPart('shield') Args(0) {
|
||||
|
||||
$c->response->content_type('application/json');
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(
|
||||
data => scalar (encode_json(
|
||||
{
|
||||
schemaVersion => 1,
|
||||
label => "hydra build",
|
||||
@@ -68,7 +69,7 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
|
||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||
{ job => $c->stash->{job}, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 1, columns => [@buildListColumns] }
|
||||
{ order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] }
|
||||
);
|
||||
|
||||
$prometheus->new_counter(
|
||||
@@ -91,6 +92,26 @@ sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->buildstatus > 0);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_closure_size",
|
||||
help => "Closure size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->closuresize);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_output_size",
|
||||
help => "Output size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->size);
|
||||
|
||||
$c->stash->{'plain'} = { data => $prometheus->render };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -121,10 +142,10 @@ sub overview : Chained('job') PathPart('') Args(0) {
|
||||
|
||||
my $aggregates = {};
|
||||
my %constituentJobs;
|
||||
foreach my $b (@constituents) {
|
||||
$aggregates->{$b->get_column('aggregate')}->{constituents}->{$b->job} =
|
||||
{ id => $b->id, finished => $b->finished, buildstatus => $b->buildstatus };
|
||||
$constituentJobs{$b->job} = 1;
|
||||
foreach my $build (@constituents) {
|
||||
$aggregates->{$build->get_column('aggregate')}->{constituents}->{$build->job} =
|
||||
{ id => $build->id, finished => $build->finished, buildstatus => $build->buildstatus };
|
||||
$constituentJobs{$build->job} = 1;
|
||||
}
|
||||
|
||||
foreach my $agg (keys %$aggregates) {
|
||||
@@ -144,7 +165,7 @@ sub overview : Chained('job') PathPart('') Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub metrics_tab : Chained('job') PathPart('metrics-tab') Args(0) {
|
||||
sub metrics_tab : Chained('job') PathPart('metric-tab') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'job-metrics-tab.tt';
|
||||
$c->stash->{metrics} = [ $c->stash->{jobset}->buildmetrics->search(
|
||||
|
||||
@@ -41,7 +41,7 @@ sub jobset_GET {
|
||||
|
||||
$c->stash->{template} = 'jobset.tt';
|
||||
|
||||
$c->stash->{evals} = getEvals($self, $c, scalar $c->stash->{jobset}->jobsetevals, 0, 10);
|
||||
$c->stash->{evals} = getEvals($c, scalar $c->stash->{jobset}->jobsetevals, 0, 10);
|
||||
|
||||
$c->stash->{latestEval} = $c->stash->{jobset}->jobsetevals->search({ hasnewbuilds => 1 }, { rows => 1, order_by => ["id desc"] })->single;
|
||||
|
||||
@@ -213,6 +213,22 @@ sub checkInputValue {
|
||||
}
|
||||
|
||||
|
||||
sub knownInputTypes {
|
||||
my ($c) = @_;
|
||||
|
||||
my @keys = keys %{$c->stash->{inputTypes}};
|
||||
my $types = "";
|
||||
my $counter = 0;
|
||||
|
||||
foreach my $key (@keys) {
|
||||
$types = $types . "and ‘$key’" if ++$counter == scalar(@keys);
|
||||
$types = $types . "‘$key’, " if $counter != scalar(@keys);
|
||||
}
|
||||
|
||||
return $types;
|
||||
}
|
||||
|
||||
|
||||
sub updateJobset {
|
||||
my ($c, $jobset) = @_;
|
||||
|
||||
@@ -223,7 +239,7 @@ sub updateJobset {
|
||||
error($c, "Cannot rename jobset to ‘$jobsetName’ since that identifier is already taken.")
|
||||
if $jobsetName ne $oldName && defined $c->stash->{project}->jobsets->find({ name => $jobsetName });
|
||||
|
||||
my $type = int($c->stash->{params}->{"type"}) // 0;
|
||||
my $type = int($c->stash->{params}->{"type"} // 0);
|
||||
|
||||
my ($nixExprPath, $nixExprInput);
|
||||
my $flake;
|
||||
@@ -231,7 +247,7 @@ sub updateJobset {
|
||||
if ($type == 0) {
|
||||
($nixExprPath, $nixExprInput) = nixExprPathFromParams $c;
|
||||
} elsif ($type == 1) {
|
||||
$flake = trim($c->stash->{params}->{"flakeref"});
|
||||
$flake = trim($c->stash->{params}->{"flake"});
|
||||
error($c, "Invalid flake URI ‘$flake’.") if $flake !~ /^[a-zA-Z]/;
|
||||
} else {
|
||||
error($c, "Invalid jobset type.");
|
||||
@@ -245,6 +261,14 @@ sub updateJobset {
|
||||
|
||||
my $checkinterval = int(trim($c->stash->{params}->{checkinterval}));
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($c->config->{dynamicruncommand}->{enable}
|
||||
&& $jobset->project->enable_dynamic_run_command))
|
||||
{
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server or the parent project.");
|
||||
}
|
||||
|
||||
$jobset->update(
|
||||
{ name => $jobsetName
|
||||
, description => trim($c->stash->{params}->{"description"})
|
||||
@@ -252,9 +276,10 @@ sub updateJobset {
|
||||
, nixexprinput => $nixExprInput
|
||||
, enabled => $enabled
|
||||
, enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, emailoverride => trim($c->stash->{params}->{emailoverride}) || ""
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, keepnr => int(trim($c->stash->{params}->{keepnr}))
|
||||
, keepnr => int(trim($c->stash->{params}->{keepnr} // "0"))
|
||||
, checkinterval => $checkinterval
|
||||
, triggertime => ($enabled && $checkinterval > 0) ? $jobset->triggertime // time() : undef
|
||||
, schedulingshares => $shares
|
||||
@@ -275,9 +300,10 @@ sub updateJobset {
|
||||
my $type = $inputData->{type};
|
||||
my $value = $inputData->{value};
|
||||
my $emailresponsible = defined $inputData->{emailresponsible} ? 1 : 0;
|
||||
my $types = knownInputTypes($c);
|
||||
|
||||
error($c, "Invalid input name ‘$name’.") unless $name =~ /^[[:alpha:]][\w-]*$/;
|
||||
error($c, "Invalid input type ‘$type’.") unless defined $c->stash->{inputTypes}->{$type};
|
||||
badRequest($c, "Invalid input name ‘$name’.") unless $name =~ /^[[:alpha:]][\w-]*$/;
|
||||
badRequest($c, "Invalid input type ‘$type’; valid types: $types.") unless defined $c->stash->{inputTypes}->{$type};
|
||||
|
||||
my $input = $jobset->jobsetinputs->create(
|
||||
{ name => $name,
|
||||
@@ -320,7 +346,7 @@ sub evals_GET {
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
my $offset = ($page - 1) * $resultsPerPage;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, $offset, $resultsPerPage);
|
||||
$c->stash->{evals} = getEvals($c, $evals, $offset, $resultsPerPage);
|
||||
my %entity = (
|
||||
evals => [ map { $_->{eval} } @{$c->stash->{evals}} ],
|
||||
first => "?page=1",
|
||||
@@ -338,6 +364,21 @@ sub evals_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
my $jobsetName = $c->stash->{params}->{name};
|
||||
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||
{ name => $jobsetName },
|
||||
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||
);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||
}
|
||||
|
||||
# Redirect to the latest finished evaluation of this jobset.
|
||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||
|
||||
@@ -6,7 +6,8 @@ use warnings;
|
||||
use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use List::MoreUtils qw(uniq);
|
||||
use Hydra::Helper::BuildDiff;
|
||||
use List::SomeUtils qw(uniq);
|
||||
|
||||
|
||||
sub evalChain : Chained('/') PathPart('eval') CaptureArgs(1) {
|
||||
@@ -63,63 +64,21 @@ sub view_GET {
|
||||
|
||||
$c->stash->{otherEval} = $eval2 if defined $eval2;
|
||||
|
||||
sub cmpBuilds {
|
||||
my ($a, $b) = @_;
|
||||
return $a->get_column('job') cmp $b->get_column('job')
|
||||
|| $a->get_column('system') cmp $b->get_column('system')
|
||||
}
|
||||
|
||||
my @builds = $eval->builds->search($filter, { columns => [@buildListColumns] });
|
||||
my @builds2 = defined $eval2 ? $eval2->builds->search($filter, { columns => [@buildListColumns] }) : ();
|
||||
|
||||
@builds = sort { cmpBuilds($a, $b) } @builds;
|
||||
@builds2 = sort { cmpBuilds($a, $b) } @builds2;
|
||||
|
||||
$c->stash->{stillSucceed} = [];
|
||||
$c->stash->{stillFail} = [];
|
||||
$c->stash->{nowSucceed} = [];
|
||||
$c->stash->{nowFail} = [];
|
||||
$c->stash->{new} = [];
|
||||
$c->stash->{removed} = [];
|
||||
$c->stash->{unfinished} = [];
|
||||
$c->stash->{aborted} = [];
|
||||
|
||||
my $n = 0;
|
||||
foreach my $build (@builds) {
|
||||
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
|
||||
my $d;
|
||||
my $found = 0;
|
||||
while ($n < scalar(@builds2)) {
|
||||
my $build2 = $builds2[$n];
|
||||
my $d = cmpBuilds($build, $build2);
|
||||
last if $d == -1;
|
||||
if ($d == 0) {
|
||||
$n++;
|
||||
$found = 1;
|
||||
if ($aborted) {
|
||||
# do nothing
|
||||
} elsif ($build->finished == 0 || $build2->finished == 0) {
|
||||
push @{$c->stash->{unfinished}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus == 0) {
|
||||
push @{$c->stash->{stillSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus != 0) {
|
||||
push @{$c->stash->{stillFail}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus != 0) {
|
||||
push @{$c->stash->{nowSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus == 0) {
|
||||
push @{$c->stash->{nowFail}}, $build;
|
||||
} else { die; }
|
||||
last;
|
||||
}
|
||||
push @{$c->stash->{removed}}, { job => $build2->get_column('job'), system => $build2->get_column('system') };
|
||||
$n++;
|
||||
}
|
||||
if ($aborted) {
|
||||
push @{$c->stash->{aborted}}, $build;
|
||||
} else {
|
||||
push @{$c->stash->{new}}, $build if !$found;
|
||||
}
|
||||
}
|
||||
my $diff = buildDiff([@builds], [@builds2]);
|
||||
$c->stash->{stillSucceed} = $diff->{stillSucceed};
|
||||
$c->stash->{stillFail} = $diff->{stillFail};
|
||||
$c->stash->{nowSucceed} = $diff->{nowSucceed};
|
||||
$c->stash->{nowFail} = $diff->{nowFail};
|
||||
$c->stash->{new} = $diff->{new};
|
||||
$c->stash->{removed} = $diff->{removed};
|
||||
$c->stash->{unfinished} = $diff->{unfinished};
|
||||
$c->stash->{aborted} = $diff->{aborted};
|
||||
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||
|
||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||
|
||||
@@ -129,6 +88,17 @@ sub view_GET {
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{eval});
|
||||
}
|
||||
|
||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
@@ -144,7 +114,7 @@ sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
sub cancel : Chained('evalChain') PathPart('cancel') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireCancelBuildPrivileges($c, $c->stash->{project});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->stash->{eval}->builds);
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->stash->{eval}->builds->search_rs({}));
|
||||
$c->flash->{successMsg} = "$n builds have been cancelled.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
}
|
||||
@@ -153,7 +123,7 @@ sub cancel : Chained('evalChain') PathPart('cancel') Args(0) {
|
||||
sub restart {
|
||||
my ($self, $c, $condition) = @_;
|
||||
requireRestartPrivileges($c, $c->stash->{project});
|
||||
my $builds = $c->stash->{eval}->builds->search({ finished => 1, buildstatus => $condition });
|
||||
my $builds = $c->stash->{eval}->builds->search_rs({ finished => 1, buildstatus => $condition });
|
||||
my $n = restartBuilds($c->model('DB')->schema, $builds);
|
||||
$c->flash->{successMsg} = "$n builds have been restarted.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
|
||||
@@ -78,8 +78,8 @@ sub project_DELETE {
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$c->stash->{project}->jobsetevals->delete;
|
||||
$c->stash->{project}->builds->delete;
|
||||
$c->stash->{project}->jobsets->delete;
|
||||
$c->stash->{project}->delete;
|
||||
});
|
||||
|
||||
@@ -126,6 +126,7 @@ sub create_jobset : Chained('projectChain') PathPart('create-jobset') Args(0) {
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
$c->stash->{create} = 1;
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
$c->stash->{emailNotification} = $c->config->{email_notification} // 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -135,7 +136,7 @@ sub updateProject {
|
||||
my $owner = $project->owner;
|
||||
if ($c->check_user_roles('admin') and defined $c->stash->{params}->{owner}) {
|
||||
$owner = trim $c->stash->{params}->{owner};
|
||||
error($c, "The user name ‘$owner’ does not exist.")
|
||||
badRequest($c, "The user name ‘$owner’ does not exist.")
|
||||
unless defined $c->model('DB::Users')->find($owner);
|
||||
}
|
||||
|
||||
@@ -148,6 +149,11 @@ sub updateProject {
|
||||
my $displayName = trim $c->stash->{params}->{displayname};
|
||||
error($c, "You must specify a display name.") if $displayName eq "";
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command && !$c->config->{dynamicruncommand}->{enable}) {
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server.");
|
||||
}
|
||||
|
||||
$project->update(
|
||||
{ name => $projectName
|
||||
, displayname => $displayName
|
||||
@@ -156,11 +162,14 @@ sub updateProject {
|
||||
, enabled => defined $c->stash->{params}->{enabled} ? 1 : 0
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, owner => $owner
|
||||
, declfile => trim($c->stash->{params}->{declfile})
|
||||
, decltype => trim($c->stash->{params}->{decltype})
|
||||
, declvalue => trim($c->stash->{params}->{declvalue})
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, declfile => trim($c->stash->{params}->{declarative}->{file})
|
||||
, decltype => trim($c->stash->{params}->{declarative}->{type})
|
||||
, declvalue => trim($c->stash->{params}->{declarative}->{value})
|
||||
});
|
||||
if (length($project->declfile)) {
|
||||
# This logic also exists in the DeclarativeJobets tests.
|
||||
# TODO: refactor and deduplicate.
|
||||
$project->jobsets->update_or_create(
|
||||
{ name=> ".jobsets"
|
||||
, nixexprinput => ""
|
||||
@@ -168,6 +177,12 @@ sub updateProject {
|
||||
, emailoverride => ""
|
||||
, triggertime => time
|
||||
});
|
||||
} else {
|
||||
$project->jobsets->search({ name => ".jobsets" })->delete;
|
||||
$project->update(
|
||||
{ decltype => ""
|
||||
, declvalue => ""
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,15 +7,20 @@ use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::View::TT;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use Encode;
|
||||
use File::Basename;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use List::Util qw[min max];
|
||||
use List::MoreUtils qw{any};
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
@@ -30,6 +35,7 @@ sub noLoginNeeded {
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
@@ -45,11 +51,13 @@ sub begin :Private {
|
||||
$c->stash->{curUri} = $c->request->uri;
|
||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||
$c->stash->{curTime} = time;
|
||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||
$c->stash->{tracker} = $ENV{"HYDRA_TRACKER"};
|
||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||
$c->stash->{flashMsg} = $c->flash->{flashMsg};
|
||||
$c->stash->{successMsg} = $c->flash->{successMsg};
|
||||
$c->stash->{localStore} = isLocalStore;
|
||||
|
||||
$c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0";
|
||||
|
||||
@@ -75,9 +83,9 @@ sub begin :Private {
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
my $referer = $c->req->header('Origin');
|
||||
$referer //= $c->req->header('Referer');
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
die unless $base =~ /\/$/;
|
||||
$referer .= "/";
|
||||
@@ -104,7 +112,7 @@ sub deserialize :ActionClass('Deserialize') { }
|
||||
sub index :Path :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'overview.tt';
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search({}, {order_by => 'name'})];
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search({}, {order_by => ['enabled DESC', 'name']})];
|
||||
$c->stash->{newsItems} = [$c->model('DB::NewsItems')->search({}, { order_by => ['createtime DESC'], rows => 5 })];
|
||||
$self->status_ok($c,
|
||||
entity => $c->stash->{projects}
|
||||
@@ -134,8 +142,9 @@ sub queue_summary :Local :Path('queue-summary') :Args(0) {
|
||||
$c->stash->{template} = 'queue-summary.tt';
|
||||
|
||||
$c->stash->{queued} = dbh($c)->selectall_arrayref(
|
||||
"select project, jobset, count(*) as queued, min(timestamp) as oldest, max(timestamp) as newest from Builds " .
|
||||
"where finished = 0 group by project, jobset order by queued desc",
|
||||
"select jobsets.project as project, jobsets.name as jobset, count(*) as queued, min(timestamp) as oldest, max(timestamp) as newest from Builds " .
|
||||
"join Jobsets jobsets on jobsets.id = builds.jobset_id " .
|
||||
"where finished = 0 group by jobsets.project, jobsets.name order by queued desc",
|
||||
{ Slice => {} });
|
||||
|
||||
$c->stash->{systems} = dbh($c)->selectall_arrayref(
|
||||
@@ -154,7 +163,7 @@ sub status_GET {
|
||||
{ "buildsteps.busy" => { '!=', 0 } },
|
||||
{ order_by => ["globalpriority DESC", "id"],
|
||||
join => "buildsteps",
|
||||
columns => [@buildListColumns]
|
||||
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||
})]
|
||||
);
|
||||
}
|
||||
@@ -166,7 +175,7 @@ sub queue_runner_status_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
#my $status = from_json($c->model('DB::SystemStatus')->find('queue-runner')->status);
|
||||
my $status = from_json(`hydra-queue-runner --status`);
|
||||
my $status = decode_json(`hydra-queue-runner --status`);
|
||||
if ($?) { $status->{status} = "unknown"; }
|
||||
my $json = JSON->new->pretty()->canonical();
|
||||
|
||||
@@ -197,8 +206,10 @@ sub machines :Local Args(0) {
|
||||
|
||||
$c->stash->{machines} = $machines;
|
||||
$c->stash->{steps} = dbh($c)->selectall_arrayref(
|
||||
"select build, stepnr, s.system as system, s.drvpath as drvpath, machine, s.starttime as starttime, project, jobset, job, s.busy as busy " .
|
||||
"from BuildSteps s join Builds b on s.build = b.id " .
|
||||
"select build, stepnr, s.system as system, s.drvpath as drvpath, machine, s.starttime as starttime, jobsets.project as project, jobsets.name as jobset, job, s.busy as busy " .
|
||||
"from BuildSteps s " .
|
||||
"join Builds b on s.build = b.id " .
|
||||
"join Jobsets jobsets on jobsets.id = b.jobset_id " .
|
||||
"where busy != 0 order by machine, stepnr",
|
||||
{ Slice => {} });
|
||||
$c->stash->{template} = 'machine-status.tt';
|
||||
@@ -321,7 +332,7 @@ sub nar :Local :Args(1) {
|
||||
else {
|
||||
$path = $Nix::Config::storeDir . "/$path";
|
||||
|
||||
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
|
||||
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
@@ -350,18 +361,45 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :LocalRegex('^([a-z0-9]+).narinfo$') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my $hash = $c->req->captures->[0];
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
|
||||
die if length($hash) != 32;
|
||||
my $path = queryPathFromHashPart($hash);
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
@@ -399,7 +437,7 @@ sub evals :Local Args(0) {
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($self, $c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage);
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{evals});
|
||||
}
|
||||
@@ -466,8 +504,10 @@ sub search :Local Args(0) {
|
||||
, "jobset.hidden" => 0
|
||||
, iscurrent => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"], join => ["project", "jobset"]
|
||||
, rows => $c->stash->{limit} + 1
|
||||
{
|
||||
order_by => ["jobset.project", "jobset.name", "job"],
|
||||
join => { "jobset" => "project" },
|
||||
rows => $c->stash->{limit} + 1
|
||||
} )
|
||||
];
|
||||
|
||||
@@ -517,10 +557,29 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
sub runcommandlog :Local :Args(1) {
|
||||
my ($self, $c, $uuid) = @_;
|
||||
|
||||
my $tail = $c->request->params->{"tail"};
|
||||
|
||||
die if defined $tail && $tail !~ /^[0-9]+$/;
|
||||
|
||||
my $runlog = $c->model('DB')->resultset('RunCommandLogs')->find({ uuid => $uuid })
|
||||
or notFound($c, "The RunCommand log is not available.");
|
||||
|
||||
my $logFile = constructRunCommandLogPath($runlog);
|
||||
if (-f $logFile) {
|
||||
serveLogFile($c, $logFile, $tail);
|
||||
return;
|
||||
} else {
|
||||
notFound($c, "The RunCommand log is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
@@ -4,14 +4,15 @@ use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use File::Slurp;
|
||||
use File::Slurper qw(read_text);
|
||||
use Crypt::RandPasswd;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Email;
|
||||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use HTML::Entities;
|
||||
use Encode qw(decode);
|
||||
|
||||
@@ -27,8 +28,8 @@ sub login_POST {
|
||||
my $username = $c->stash->{params}->{username} // "";
|
||||
my $password = $c->stash->{params}->{password} // "";
|
||||
|
||||
error($c, "You must specify a user name.") if $username eq "";
|
||||
error($c, "You must specify a password.") if $password eq "";
|
||||
badRequest($c, "You must specify a user name.") if $username eq "";
|
||||
badRequest($c, "You must specify a password.") if $password eq "";
|
||||
|
||||
if ($c->get_auth_realm('ldap') && $c->authenticate({username => $username, password => $password}, 'ldap')) {
|
||||
doLDAPLogin($self, $c, $username);
|
||||
@@ -37,7 +38,11 @@ sub login_POST {
|
||||
accessDenied($c, "Bad username or password.")
|
||||
}
|
||||
|
||||
currentUser_GET($self, $c);
|
||||
$self->status_found(
|
||||
$c,
|
||||
location => $c->uri_for("current-user"),
|
||||
entity => $c->model("DB::Users")->find($c->user->username)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -52,10 +57,10 @@ sub logout_POST {
|
||||
|
||||
sub doLDAPLogin {
|
||||
my ($self, $c, $username) = @_;
|
||||
|
||||
my $user = $c->find_user({ username => $username });
|
||||
my $LDAPUser = $c->find_user({ username => $username }, 'ldap');
|
||||
my @LDAPRoles = grep { (substr $_, 0, 5) eq "hydra" } $LDAPUser->roles;
|
||||
my @LDAPRoles = $LDAPUser->roles;
|
||||
my $role_mapping = getLDAPConfigAmbient()->{"role_mapping"};
|
||||
|
||||
if (!$user) {
|
||||
$c->model('DB::Users')->create(
|
||||
@@ -75,8 +80,13 @@ sub doLDAPLogin {
|
||||
});
|
||||
}
|
||||
$user->userroles->delete;
|
||||
if (@LDAPRoles) {
|
||||
$user->userroles->create({ role => (substr $_, 6) }) for @LDAPRoles;
|
||||
foreach my $ldap_role (@LDAPRoles) {
|
||||
if (defined($role_mapping->{$ldap_role})) {
|
||||
my $roles = $role_mapping->{$ldap_role};
|
||||
for my $mapped_role (@$roles) {
|
||||
$user->userroles->create({ role => $mapped_role });
|
||||
}
|
||||
}
|
||||
}
|
||||
$c->set_authenticated($user);
|
||||
}
|
||||
@@ -139,7 +149,7 @@ sub google_login :Path('/google-login') Args(0) {
|
||||
|
||||
error($c, "Logging in via Google is not enabled.") unless $c->config->{enable_google_login};
|
||||
|
||||
my $ua = new LWP::UserAgent;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://www.googleapis.com/oauth2/v3/tokeninfo',
|
||||
{ id_token => ($c->stash->{params}->{id_token} // die "No token."),
|
||||
@@ -161,13 +171,13 @@ sub github_login :Path('/github-login') Args(0) {
|
||||
my $client_id = $c->config->{github_client_id} or die "github_client_id not configured.";
|
||||
my $client_secret = $c->config->{github_client_secret} // do {
|
||||
my $client_secret_file = $c->config->{github_client_secret_file} or die "github_client_secret nor github_client_secret_file is configured.";
|
||||
my $client_secret = read_file($client_secret_file);
|
||||
my $client_secret = read_text($client_secret_file);
|
||||
$client_secret =~ s/\s+//;
|
||||
$client_secret;
|
||||
};
|
||||
die "No github secret configured" unless $client_secret;
|
||||
|
||||
my $ua = new LWP::UserAgent;
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://github.com/login/oauth/access_token',
|
||||
{
|
||||
@@ -229,12 +239,6 @@ sub isValidPassword {
|
||||
}
|
||||
|
||||
|
||||
sub setPassword {
|
||||
my ($user, $password) = @_;
|
||||
$user->update({ password => sha1_hex($password) });
|
||||
}
|
||||
|
||||
|
||||
sub register :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
@@ -294,7 +298,7 @@ sub updatePreferences {
|
||||
error($c, "The passwords you specified did not match.")
|
||||
if $password ne trim $c->stash->{params}->{password2};
|
||||
|
||||
setPassword($user, $password);
|
||||
$user->setPassword($password);
|
||||
}
|
||||
|
||||
my $emailAddress = trim($c->stash->{params}->{emailaddress} // "");
|
||||
@@ -394,7 +398,7 @@ sub reset_password :Chained('user') :PathPart('reset-password') :Args(0) {
|
||||
unless $user->emailaddress;
|
||||
|
||||
my $password = Crypt::RandPasswd->word(8,10);
|
||||
setPassword($user, $password);
|
||||
$user->setPassword($password);
|
||||
sendEmail(
|
||||
$c->config,
|
||||
$user->emailaddress,
|
||||
@@ -459,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => ["project", "jobset"]
|
||||
, join => {"jobset" => "project"}
|
||||
})];
|
||||
}
|
||||
|
||||
|
||||
62
src/lib/Hydra/Event.pm
Normal file
62
src/lib/Hydra/Event.pm
Normal file
@@ -0,0 +1,62 @@
|
||||
package Hydra::Event;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Hydra::Event::BuildFinished;
|
||||
use Hydra::Event::BuildQueued;
|
||||
use Hydra::Event::BuildStarted;
|
||||
use Hydra::Event::CachedBuildFinished;
|
||||
use Hydra::Event::CachedBuildQueued;
|
||||
use Hydra::Event::EvalAdded;
|
||||
use Hydra::Event::EvalCached;
|
||||
use Hydra::Event::EvalFailed;
|
||||
use Hydra::Event::EvalStarted;
|
||||
use Hydra::Event::StepFinished;
|
||||
|
||||
my %channels_to_events = (
|
||||
build_finished => \&Hydra::Event::BuildFinished::parse,
|
||||
build_queued => \&Hydra::Event::BuildQueued::parse,
|
||||
build_started => \&Hydra::Event::BuildStarted::parse,
|
||||
cached_build_finished => \&Hydra::Event::CachedBuildFinished::parse,
|
||||
cached_build_queued => \&Hydra::Event::CachedBuildQueued::parse,
|
||||
eval_added => \&Hydra::Event::EvalAdded::parse,
|
||||
eval_cached => \&Hydra::Event::EvalCached::parse,
|
||||
eval_failed => \&Hydra::Event::EvalFailed::parse,
|
||||
eval_started => \&Hydra::Event::EvalStarted::parse,
|
||||
step_finished => \&Hydra::Event::StepFinished::parse,
|
||||
);
|
||||
|
||||
|
||||
sub parse_payload :prototype($$) {
|
||||
my ($channel_name, $payload) = @_;
|
||||
my @payload = split /\t/, $payload;
|
||||
|
||||
my $parser = $channels_to_events{$channel_name};
|
||||
unless (defined $parser) {
|
||||
die "Invalid channel name: '$channel_name'";
|
||||
}
|
||||
|
||||
return $parser->(@payload);
|
||||
}
|
||||
|
||||
|
||||
sub new_event {
|
||||
my ($self, $channel_name, $payload) = @_;
|
||||
|
||||
return bless {
|
||||
"channel_name" => $channel_name,
|
||||
"payload" => $payload,
|
||||
"event" => parse_payload($channel_name, $payload),
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
|
||||
return $self->{"event"}->interestedIn($plugin);
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
return $self->{"event"}->execute($db, $plugin);
|
||||
}
|
||||
70
src/lib/Hydra/Event/BuildFinished.pm
Normal file
70
src/lib/Hydra/Event/BuildFinished.pm
Normal file
@@ -0,0 +1,70 @@
|
||||
package Hydra::Event::BuildFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ == 0) {
|
||||
die "build_finished: payload takes at least one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "build_finished: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($build_id, @dependents) = map int, @_;
|
||||
return Hydra::Event::BuildFinished->new($build_id, \@dependents);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $build_id, $dependent_ids) = @_;
|
||||
return bless {
|
||||
"build_id" => $build_id,
|
||||
"dependent_ids" => $dependent_ids,
|
||||
"build" => undef,
|
||||
"dependents" => [],
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
|
||||
foreach my $id (@{$self->{"dependent_ids"}}) {
|
||||
my $dep = $db->resultset('Builds')->find($id)
|
||||
or die "dependent build $id does not exist\n";
|
||||
push @{$self->{"dependents"}}, $dep;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildFinished($self->{"build"}, $self->{"dependents"});
|
||||
|
||||
# Mark the build and all dependents as having their notifications "finished".
|
||||
#
|
||||
# Otherwise, the dependent builds will remain with notificationpendingsince set
|
||||
# until hydra-notify is started, as buildFinished is never emitted for them.
|
||||
foreach my $build ($self->{"build"}, @{$self->{"dependents"}}) {
|
||||
if ($build->finished && defined($build->notificationpendingsince)) {
|
||||
$build->update({ notificationpendingsince => undef })
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
52
src/lib/Hydra/Event/BuildQueued.pm
Normal file
52
src/lib/Hydra/Event/BuildQueued.pm
Normal file
@@ -0,0 +1,52 @@
|
||||
package Hydra::Event::BuildQueued;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 1) {
|
||||
die "build_queued: payload takes only one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "build_queued: payload argument should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::BuildQueued->new(int($build_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $id) = @_;
|
||||
return bless {
|
||||
"build_id" => $id,
|
||||
"build" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildQueued')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildQueued($self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
52
src/lib/Hydra/Event/BuildStarted.pm
Normal file
52
src/lib/Hydra/Event/BuildStarted.pm
Normal file
@@ -0,0 +1,52 @@
|
||||
package Hydra::Event::BuildStarted;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 1) {
|
||||
die "build_started: payload takes only one argument, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "build_started: payload argument should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::BuildStarted->new(int($build_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $id) = @_;
|
||||
return bless {
|
||||
"build_id" => $id,
|
||||
"build" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('buildStarted')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->buildStarted($self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
59
src/lib/Hydra/Event/CachedBuildFinished.pm
Normal file
59
src/lib/Hydra/Event/CachedBuildFinished.pm
Normal file
@@ -0,0 +1,59 @@
|
||||
package Hydra::Event::CachedBuildFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ != 2) {
|
||||
die "cached_build_finished: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "cached_build_finished: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($evaluation_id, $build_id) = map int, @_;
|
||||
return Hydra::Event::CachedBuildFinished->new($evaluation_id, $build_id);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $evaluation_id, $build_id) = @_;
|
||||
return bless {
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"build_id" => $build_id,
|
||||
"evaluation" => undef,
|
||||
"build" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('cachedBuildFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find($self->{"evaluation_id"})
|
||||
or die "evaluation $self->{'evaluation_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->cachedBuildFinished($self->{"evaluation"}, $self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
59
src/lib/Hydra/Event/CachedBuildQueued.pm
Normal file
59
src/lib/Hydra/Event/CachedBuildQueued.pm
Normal file
@@ -0,0 +1,59 @@
|
||||
package Hydra::Event::CachedBuildQueued;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
if (@_ != 2) {
|
||||
die "cached_build_queued: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my @failures = grep(!/^\d+$/, @_);
|
||||
if (@failures > 0) {
|
||||
die "cached_build_queued: payload arguments should be integers, but we received the following non-integers:", @failures;
|
||||
}
|
||||
|
||||
my ($evaluation_id, $build_id) = map int, @_;
|
||||
return Hydra::Event::CachedBuildQueued->new($evaluation_id, $build_id);
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $evaluation_id, $build_id) = @_;
|
||||
return bless {
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"build_id" => $build_id,
|
||||
"evaluation" => undef,
|
||||
"build" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('cachedBuildQueued')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"build"})) {
|
||||
$self->{"build"} = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find($self->{"evaluation_id"})
|
||||
or die "evaluation $self->{'evaluation_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->cachedBuildQueued($self->{"evaluation"}, $self->{"build"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
63
src/lib/Hydra/Event/EvalAdded.pm
Normal file
63
src/lib/Hydra/Event/EvalAdded.pm
Normal file
@@ -0,0 +1,63 @@
|
||||
package Hydra::Event::EvalAdded;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "eval_added: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_added: payload argument jobset_id should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
unless ($evaluation_id =~ /^\d+$/) {
|
||||
die "eval_added: payload argument evaluation_id should be an integer, but '", $evaluation_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalAdded->new($trace_id, int($jobset_id), int($evaluation_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"jobset" => undef,
|
||||
"evaluation" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalAdded')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find({ id => $self->{"evaluation_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalAdded($self->{"trace_id"}, $self->{"jobset"}, $self->{"evaluation"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
63
src/lib/Hydra/Event/EvalCached.pm
Normal file
63
src/lib/Hydra/Event/EvalCached.pm
Normal file
@@ -0,0 +1,63 @@
|
||||
package Hydra::Event::EvalCached;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "eval_cached: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_cached: payload argument jobset_id should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
unless ($evaluation_id =~ /^\d+$/) {
|
||||
die "eval_cached: payload argument evaluation_id should be an integer, but '", $evaluation_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalCached->new($trace_id, int($jobset_id), int($evaluation_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id, $evaluation_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"evaluation_id" => $evaluation_id,
|
||||
"jobset" => undef,
|
||||
"evaluation" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalCached')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
|
||||
if (!defined($self->{"evaluation"})) {
|
||||
$self->{"evaluation"} = $db->resultset('JobsetEvals')->find({ id => $self->{"evaluation_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalCached($self->{"trace_id"}, $self->{"jobset"}, $self->{"evaluation"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
53
src/lib/Hydra/Event/EvalFailed.pm
Normal file
53
src/lib/Hydra/Event/EvalFailed.pm
Normal file
@@ -0,0 +1,53 @@
|
||||
package Hydra::Event::EvalFailed;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 2) {
|
||||
die "eval_failed: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_failed: payload argument should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalFailed->new($trace_id, int($jobset_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"jobset" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalFailed')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalFailed($self->{"trace_id"}, $self->{"jobset"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
53
src/lib/Hydra/Event/EvalStarted.pm
Normal file
53
src/lib/Hydra/Event/EvalStarted.pm
Normal file
@@ -0,0 +1,53 @@
|
||||
package Hydra::Event::EvalStarted;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 2) {
|
||||
die "eval_started: payload takes two arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($trace_id, $jobset_id) = @_;
|
||||
|
||||
unless ($jobset_id =~ /^\d+$/) {
|
||||
die "eval_started: payload argument should be an integer, but '", $jobset_id, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::EvalStarted->new($trace_id, int($jobset_id));
|
||||
}
|
||||
|
||||
sub new {
|
||||
my ($self, $trace_id, $jobset_id) = @_;
|
||||
return bless {
|
||||
"trace_id" => $trace_id,
|
||||
"jobset_id" => $jobset_id,
|
||||
"jobset" => undef
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('evalStarted')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"jobset"})) {
|
||||
$self->{"jobset"} = $db->resultset('Jobsets')->find({ id => $self->{"jobset_id"}})
|
||||
or die "Jobset $self->{'jobset_id'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->evalStarted($self->{"trace_id"}, $self->{"jobset"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
64
src/lib/Hydra/Event/StepFinished.pm
Normal file
64
src/lib/Hydra/Event/StepFinished.pm
Normal file
@@ -0,0 +1,64 @@
|
||||
package Hydra::Event::StepFinished;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
|
||||
sub parse :prototype(@) {
|
||||
unless (@_ == 3) {
|
||||
die "step_finished: payload takes exactly three arguments, but ", scalar(@_), " were given";
|
||||
}
|
||||
|
||||
my ($build_id, $step_number, $log_path) = @_;
|
||||
|
||||
unless ($build_id =~ /^\d+$/) {
|
||||
die "step_finished: payload argument build_id should be an integer, but '", $build_id, "' was given"
|
||||
}
|
||||
unless ($step_number =~ /^\d+$/) {
|
||||
die "step_finished: payload argument step_number should be an integer, but '", $step_number, "' was given"
|
||||
}
|
||||
|
||||
return Hydra::Event::StepFinished->new(int($build_id), int($step_number), $log_path);
|
||||
}
|
||||
|
||||
sub new :prototype($$$) {
|
||||
my ($self, $build_id, $step_number, $log_path) = @_;
|
||||
|
||||
$log_path = undef if $log_path eq "-";
|
||||
|
||||
return bless {
|
||||
"build_id" => $build_id,
|
||||
"step_number" => $step_number,
|
||||
"log_path" => $log_path,
|
||||
"step" => undef,
|
||||
}, $self;
|
||||
}
|
||||
|
||||
sub interestedIn {
|
||||
my ($self, $plugin) = @_;
|
||||
return int(defined($plugin->can('stepFinished')));
|
||||
}
|
||||
|
||||
sub load {
|
||||
my ($self, $db) = @_;
|
||||
|
||||
if (!defined($self->{"step"})) {
|
||||
my $build = $db->resultset('Builds')->find($self->{"build_id"})
|
||||
or die "build $self->{'build_id'} does not exist\n";
|
||||
|
||||
$self->{"step"} = $build->buildsteps->find({stepnr => $self->{"step_number"}})
|
||||
or die "step $self->{'step_number'} does not exist\n";
|
||||
}
|
||||
}
|
||||
|
||||
sub execute {
|
||||
my ($self, $db, $plugin) = @_;
|
||||
|
||||
$self->load($db);
|
||||
|
||||
$plugin->stepFinished($self->{"step"}, $self->{"log_path"});
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
1;
|
||||
@@ -1,9 +1,10 @@
|
||||
package Hydra::Helper::AddBuilds;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use utf8;
|
||||
use Encode;
|
||||
use JSON;
|
||||
use JSON::MaybeXS;
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use Hydra::Model::DB;
|
||||
@@ -14,19 +15,20 @@ use File::stat;
|
||||
use File::Path;
|
||||
use File::Temp;
|
||||
use File::Spec;
|
||||
use File::Slurp;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
validateDeclarativeJobset
|
||||
createJobsetInputsRowAndData
|
||||
updateDeclarativeJobset
|
||||
handleDeclarativeJobsetBuild
|
||||
handleDeclarativeJobsetJson
|
||||
);
|
||||
|
||||
|
||||
sub updateDeclarativeJobset {
|
||||
my ($db, $project, $jobsetName, $declSpec) = @_;
|
||||
sub validateDeclarativeJobset {
|
||||
my ($config, $project, $jobsetName, $declSpec) = @_;
|
||||
|
||||
my @allowed_keys = qw(
|
||||
enabled
|
||||
@@ -39,6 +41,7 @@ sub updateDeclarativeJobset {
|
||||
checkinterval
|
||||
schedulingshares
|
||||
enableemail
|
||||
enable_dynamic_run_command
|
||||
emailoverride
|
||||
keepnr
|
||||
);
|
||||
@@ -61,15 +64,39 @@ sub updateDeclarativeJobset {
|
||||
}
|
||||
}
|
||||
|
||||
my $enable_dynamic_run_command = defined $update{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($config->{dynamicruncommand}->{enable}
|
||||
&& $project->enable_dynamic_run_command))
|
||||
{
|
||||
die "Dynamic RunCommand is not enabled by the server or the parent project.";
|
||||
}
|
||||
|
||||
return %update;
|
||||
}
|
||||
|
||||
sub createJobsetInputsRowAndData {
|
||||
my ($name, $declSpec) = @_;
|
||||
my $data = $declSpec->{"inputs"}->{$name};
|
||||
my $row = {
|
||||
name => $name,
|
||||
type => $data->{type}
|
||||
};
|
||||
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
||||
|
||||
return ($row, $data);
|
||||
}
|
||||
|
||||
sub updateDeclarativeJobset {
|
||||
my ($config, $db, $project, $jobsetName, $declSpec) = @_;
|
||||
|
||||
my %update = validateDeclarativeJobset($config, $project, $jobsetName, $declSpec);
|
||||
|
||||
$db->txn_do(sub {
|
||||
my $jobset = $project->jobsets->update_or_create(\%update);
|
||||
$jobset->jobsetinputs->delete;
|
||||
while ((my $name, my $data) = each %{$declSpec->{"inputs"}}) {
|
||||
my $row = {
|
||||
name => $name,
|
||||
type => $data->{type}
|
||||
};
|
||||
$row->{emailresponsible} = $data->{emailresponsible} // 0;
|
||||
foreach my $name (keys %{$declSpec->{"inputs"}}) {
|
||||
my ($row, $data) = createJobsetInputsRowAndData($name, $declSpec);
|
||||
my $input = $jobset->jobsetinputs->create($row);
|
||||
$input->jobsetinputalts->create({altnr => 0, value => $data->{value}});
|
||||
}
|
||||
@@ -80,13 +107,15 @@ sub updateDeclarativeJobset {
|
||||
|
||||
sub handleDeclarativeJobsetJson {
|
||||
my ($db, $project, $declSpec) = @_;
|
||||
my $config = getHydraConfig();
|
||||
$db->txn_do(sub {
|
||||
my @kept = keys %$declSpec;
|
||||
push @kept, ".jobsets";
|
||||
$project->jobsets->search({ name => { "not in" => \@kept } })->update({ enabled => 0, hidden => 1 });
|
||||
while ((my $jobsetName, my $spec) = each %$declSpec) {
|
||||
foreach my $jobsetName (keys %$declSpec) {
|
||||
my $spec = $declSpec->{$jobsetName};
|
||||
eval {
|
||||
updateDeclarativeJobset($db, $project, $jobsetName, $spec);
|
||||
updateDeclarativeJobset($config, $db, $project, $jobsetName, $spec);
|
||||
1;
|
||||
} or do {
|
||||
print STDERR "ERROR: failed to process declarative jobset ", $project->name, ":${jobsetName}, ", $@, "\n";
|
||||
|
||||
56
src/lib/Hydra/Helper/AttributeSet.pm
Normal file
56
src/lib/Hydra/Helper/AttributeSet.pm
Normal file
@@ -0,0 +1,56 @@
|
||||
package Hydra::Helper::AttributeSet;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub new {
|
||||
my ($self) = @_;
|
||||
return bless { "paths" => [] }, $self;
|
||||
}
|
||||
|
||||
sub registerValue {
|
||||
my ($self, $attributePath) = @_;
|
||||
|
||||
my @pathParts = splitPath($attributePath);
|
||||
|
||||
pop(@pathParts);
|
||||
if (scalar(@pathParts) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
my $lineage = "";
|
||||
for my $pathPart (@pathParts) {
|
||||
$lineage = $self->registerChild($lineage, $pathPart);
|
||||
}
|
||||
}
|
||||
|
||||
sub registerChild {
|
||||
my ($self, $parent, $attributePath) = @_;
|
||||
if ($parent ne "") {
|
||||
$parent .= "."
|
||||
}
|
||||
|
||||
my $name = $parent . $attributePath;
|
||||
if (!grep { $_ eq $name} @{$self->{"paths"}}) {
|
||||
push(@{$self->{"paths"}}, $name);
|
||||
}
|
||||
return $name;
|
||||
}
|
||||
|
||||
sub splitPath {
|
||||
my ($s) = @_;
|
||||
|
||||
if ($s eq "") {
|
||||
return ('')
|
||||
}
|
||||
|
||||
return split(/\./, $s, -1);
|
||||
}
|
||||
|
||||
sub enumerate {
|
||||
my ($self) = @_;
|
||||
my @paths = sort { length($a) <=> length($b) } @{$self->{"paths"}};
|
||||
return @paths;
|
||||
}
|
||||
|
||||
1;
|
||||
103
src/lib/Hydra/Helper/BuildDiff.pm
Normal file
103
src/lib/Hydra/Helper/BuildDiff.pm
Normal file
@@ -0,0 +1,103 @@
|
||||
package Hydra::Helper::BuildDiff;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
buildDiff
|
||||
);
|
||||
|
||||
sub cmpBuilds {
|
||||
my ($left, $right) = @_;
|
||||
return $left->get_column('job') cmp $right->get_column('job')
|
||||
|| $left->get_column('system') cmp $right->get_column('system')
|
||||
}
|
||||
|
||||
sub buildDiff {
|
||||
# $builds is the list of current builds
|
||||
# $builds2 is the list of previous (to-be-compared-to) builds
|
||||
my ($builds, $builds2) = @_;
|
||||
|
||||
$builds = [sort { cmpBuilds($a, $b) } @{$builds}];
|
||||
$builds2 = [sort { cmpBuilds($a, $b) } @{$builds2}];
|
||||
|
||||
my $ret = {
|
||||
stillSucceed => [],
|
||||
stillFail => [],
|
||||
nowSucceed => [],
|
||||
nowFail => [],
|
||||
new => [],
|
||||
removed => [],
|
||||
unfinished => [],
|
||||
aborted => [],
|
||||
|
||||
# These summary counters cut across the categories to determine whether
|
||||
# actions such as "Restart all failed" or "Bump queue" are available.
|
||||
totalAborted => 0,
|
||||
totalFailed => 0,
|
||||
totalQueued => 0,
|
||||
};
|
||||
|
||||
my $n = 0;
|
||||
foreach my $build (@{$builds}) {
|
||||
my $aborted = $build->finished != 0 && (
|
||||
# aborted
|
||||
$build->buildstatus == 3
|
||||
# cancelled
|
||||
|| $build->buildstatus == 4
|
||||
# timeout
|
||||
|| $build->buildstatus == 7
|
||||
# log limit exceeded
|
||||
|| $build->buildstatus == 10
|
||||
);
|
||||
my $d;
|
||||
my $found = 0;
|
||||
while ($n < scalar(@{$builds2})) {
|
||||
my $build2 = @{$builds2}[$n];
|
||||
my $d = cmpBuilds($build, $build2);
|
||||
last if $d == -1;
|
||||
if ($d == 0) {
|
||||
$n++;
|
||||
$found = 1;
|
||||
if ($aborted) {
|
||||
# do nothing
|
||||
} elsif ($build->finished == 0 || $build2->finished == 0) {
|
||||
push @{$ret->{unfinished}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus == 0) {
|
||||
push @{$ret->{stillSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus != 0) {
|
||||
push @{$ret->{stillFail}}, $build;
|
||||
} elsif ($build->buildstatus == 0 && $build2->buildstatus != 0) {
|
||||
push @{$ret->{nowSucceed}}, $build;
|
||||
} elsif ($build->buildstatus != 0 && $build2->buildstatus == 0) {
|
||||
push @{$ret->{nowFail}}, $build;
|
||||
} else { die; }
|
||||
last;
|
||||
}
|
||||
my $job_system = { job => $build2->get_column('job'), system => $build2->get_column('system') };
|
||||
push @{$ret->{removed}}, $job_system;
|
||||
$n++;
|
||||
}
|
||||
if ($aborted) {
|
||||
push @{$ret->{aborted}}, $build;
|
||||
} else {
|
||||
push @{$ret->{new}}, $build if !$found;
|
||||
}
|
||||
|
||||
if ($build->finished != 0 && $build->buildstatus != 0) {
|
||||
if ($aborted) {
|
||||
++$ret->{totalAborted};
|
||||
} else {
|
||||
++$ret->{totalFailed};
|
||||
}
|
||||
} elsif ($build->finished == 0) {
|
||||
++$ret->{totalQueued};
|
||||
}
|
||||
}
|
||||
|
||||
return $ret;
|
||||
}
|
||||
|
||||
1;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user