Compare commits
41 Commits
7a0b6b50f3
...
33a935e8ef
Author | SHA1 | Date | |
---|---|---|---|
|
33a935e8ef | ||
|
65618fd590 | ||
|
06ba54fca7 | ||
|
5b9c22dd18 | ||
|
e15070c6c2 | ||
|
37744c7018 | ||
|
1e3929e75f | ||
|
28da0a705f | ||
|
2050b2c324 | ||
|
21d6d805ba | ||
|
478bb01f7f | ||
|
08bf31b71a | ||
|
641056bd0e | ||
|
29a7ab8009 | ||
|
eddc234915 | ||
|
80f917d8fa | ||
|
5cb82812f2 | ||
|
17094c8371 | ||
|
d5fb163618 | ||
|
baec2bbb4c | ||
|
b55bd25581 | ||
|
1ca17faed4 | ||
|
9c022848cf | ||
|
f58a752419 | ||
|
0769853dec | ||
|
21c6afa83b | ||
|
1022514027 | ||
|
2d4232475c | ||
|
d799742057 | ||
|
485aa93f2d | ||
|
590e8d8511 | ||
|
90a8a0d94a | ||
|
eb17619ee5 | ||
|
ebefdb0a3d | ||
|
55349930f1 | ||
|
847a8ae6cd | ||
86d0009448 | |||
a20f37b97f | |||
a94f84118c | |||
|
99e3ad325c | ||
|
2f1fa2b069 |
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@ -1,7 +1,10 @@
|
|||||||
name: "Test"
|
name: "Test"
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
merge_group:
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,6 +3,7 @@
|
|||||||
/src/sql/hydra-postgresql.sql
|
/src/sql/hydra-postgresql.sql
|
||||||
/src/sql/hydra-sqlite.sql
|
/src/sql/hydra-sqlite.sql
|
||||||
/src/sql/tmp.sqlite
|
/src/sql/tmp.sqlite
|
||||||
|
.hydra-data
|
||||||
result
|
result
|
||||||
result-*
|
result-*
|
||||||
outputs
|
outputs
|
||||||
|
27
README.md
27
README.md
@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
|
|||||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-build
|
$ nix build
|
||||||
```
|
```
|
||||||
|
|
||||||
### Development Environment
|
### Development Environment
|
||||||
|
|
||||||
You can use the provided shell.nix to get a working development environment:
|
You can use the provided shell.nix to get a working development environment:
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ autoreconfPhase
|
$ mesonConfigurePhase
|
||||||
$ configurePhase # NOTE: not ./configure
|
$ ninja
|
||||||
$ make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Executing Hydra During Development
|
### Executing Hydra During Development
|
||||||
@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
|||||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ # hack hack
|
$ # hack hack
|
||||||
$ make
|
$ ninja -C build
|
||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
|
|||||||
Then, you can run the tests and the perlcritic linter together with:
|
Then, you can run the tests and the perlcritic linter together with:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make check
|
$ ninja -C build test
|
||||||
```
|
```
|
||||||
|
|
||||||
You can run a single test with:
|
You can run a single test with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ yath test ./t/foo/bar.t
|
$ cd build
|
||||||
|
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||||
```
|
```
|
||||||
|
|
||||||
And you can run just perlcritic with:
|
And you can run just perlcritic with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make perlcritic
|
$ cd build
|
||||||
|
$ meson test perlcritic
|
||||||
```
|
```
|
||||||
|
|
||||||
### JSON API
|
### JSON API
|
||||||
|
@ -11,12 +11,6 @@ $ cd hydra
|
|||||||
To enter a shell in which all environment variables (such as `PERL5LIB`)
|
To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||||
and dependencies can be found:
|
and dependencies can be found:
|
||||||
|
|
||||||
```console
|
|
||||||
$ nix-shell
|
|
||||||
```
|
|
||||||
|
|
||||||
of when flakes are enabled:
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix develop
|
$ nix develop
|
||||||
```
|
```
|
||||||
@ -24,15 +18,15 @@ $ nix develop
|
|||||||
To build Hydra, you should then do:
|
To build Hydra, you should then do:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ autoreconfPhase
|
$ mesonConfigurePhase
|
||||||
[nix-shell]$ configurePhase
|
$ ninja
|
||||||
[nix-shell]$ make -j$(nproc)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You start a local database, the webserver, and other components with
|
You start a local database, the webserver, and other components with
|
||||||
foreman:
|
foreman:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
|
$ ninja -C build
|
||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -47,18 +41,11 @@ $ ./src/script/hydra-server
|
|||||||
You can run Hydra's test suite with the following:
|
You can run Hydra's test suite with the following:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ make check
|
$ meson test
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
# to run as many tests as you have cores:
|
||||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||||
[nix-shell]$ # or run yath directly:
|
|
||||||
[nix-shell]$ yath test
|
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
|
||||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
|
||||||
```
|
```
|
||||||
|
|
||||||
When using `yath` instead of `make check`, ensure you have run `make`
|
|
||||||
in the root of the repository at least once.
|
|
||||||
|
|
||||||
**Warning**: Currently, the tests can fail
|
**Warning**: Currently, the tests can fail
|
||||||
if run with high parallelism [due to an issue in
|
if run with high parallelism [due to an issue in
|
||||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||||
@ -75,7 +62,7 @@ will reload the page every time you save.
|
|||||||
To build Hydra and its dependencies:
|
To build Hydra and its dependencies:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-build release.nix -A build.x86_64-linux
|
$ nix build .#packages.x86_64-linux.default
|
||||||
```
|
```
|
||||||
|
|
||||||
## Development Tasks
|
## Development Tasks
|
||||||
|
@ -92,6 +92,23 @@ Sets Gitea CI status
|
|||||||
|
|
||||||
- `gitea_authorization.<repo-owner>`
|
- `gitea_authorization.<repo-owner>`
|
||||||
|
|
||||||
|
## Gitea pulls
|
||||||
|
|
||||||
|
Create jobs based on open Gitea pull requests
|
||||||
|
|
||||||
|
### Configuration options
|
||||||
|
|
||||||
|
- `gitea_authorization.<repo-owner>`
|
||||||
|
|
||||||
|
## Gitea refs
|
||||||
|
|
||||||
|
Hydra plugin for retrieving the list of references (branches or tags) from
|
||||||
|
Gitea following a certain naming scheme.
|
||||||
|
|
||||||
|
### Configuration options
|
||||||
|
|
||||||
|
- `gitea_authorization.<repo-owner>`
|
||||||
|
|
||||||
## GitHub pulls
|
## GitHub pulls
|
||||||
|
|
||||||
Create jobs based on open GitHub pull requests
|
Create jobs based on open GitHub pull requests
|
||||||
|
20
flake.lock
generated
20
flake.lock
generated
@ -12,16 +12,16 @@
|
|||||||
"nixpkgs-regression": []
|
"nixpkgs-regression": []
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1739899400,
|
"lastModified": 1744030329,
|
||||||
"narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=",
|
"narHash": "sha256-r+psCOW77vTSTNbxTVrYHeh6OgB0QukbnyUVDwg8s4I=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nix",
|
"repo": "nix",
|
||||||
"rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918",
|
"rev": "a4962f73b5fc874d4b16baef47921daf349addfc",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "2.26-maintenance",
|
"ref": "2.28-maintenance",
|
||||||
"repo": "nix",
|
"repo": "nix",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
@ -29,11 +29,11 @@
|
|||||||
"nix-eval-jobs": {
|
"nix-eval-jobs": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1739500569,
|
"lastModified": 1744018595,
|
||||||
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
|
"narHash": "sha256-v5n6t49X7MOpqS9j0FtI6TWOXvxuZMmGsp2OfUK5QfA=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "nix-eval-jobs",
|
"repo": "nix-eval-jobs",
|
||||||
"rev": "4b392b284877d203ae262e16af269f702df036bc",
|
"rev": "cba718bafe5dc1607c2b6761ecf53c641a6f3b21",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -44,11 +44,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1739461644,
|
"lastModified": 1743987495,
|
||||||
"narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=",
|
"narHash": "sha256-46T2vMZ4/AfCK0Y2OjlFzJPxmdpP8GtsuEqSSJv3oe4=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f",
|
"rev": "db8f4fe18ce772a9c8f3adf321416981c8fe9371",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
29
flake.nix
29
flake.nix
@ -4,7 +4,7 @@
|
|||||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small";
|
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small";
|
||||||
|
|
||||||
inputs.nix = {
|
inputs.nix = {
|
||||||
url = "github:NixOS/nix/2.26-maintenance";
|
url = "github:NixOS/nix/2.28-maintenance";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
|
||||||
# hide nix dev tooling from our lock file
|
# hide nix dev tooling from our lock file
|
||||||
@ -34,7 +34,6 @@
|
|||||||
hydra = final.callPackage ./package.nix {
|
hydra = final.callPackage ./package.nix {
|
||||||
inherit (nixpkgs.lib) fileset;
|
inherit (nixpkgs.lib) fileset;
|
||||||
rawSrc = self;
|
rawSrc = self;
|
||||||
nix-perl-bindings = final.nixComponents.nix-perl-bindings;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -73,21 +72,29 @@
|
|||||||
validate-openapi = hydraJobs.tests.validate-openapi.${system};
|
validate-openapi = hydraJobs.tests.validate-openapi.${system};
|
||||||
});
|
});
|
||||||
|
|
||||||
packages = forEachSystem (system: {
|
packages = forEachSystem (system: let
|
||||||
nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs {
|
nixComponents = {
|
||||||
nix = nix.packages.${system}.nix;
|
|
||||||
};
|
|
||||||
hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix {
|
|
||||||
inherit (nixpkgs.lib) fileset;
|
|
||||||
inherit (self.packages.${system}) nix-eval-jobs;
|
|
||||||
rawSrc = self;
|
|
||||||
inherit (nix.packages.${system})
|
inherit (nix.packages.${system})
|
||||||
nix-util
|
nix-util
|
||||||
nix-store
|
nix-store
|
||||||
|
nix-expr
|
||||||
|
nix-fetchers
|
||||||
|
nix-flake
|
||||||
nix-main
|
nix-main
|
||||||
|
nix-cmd
|
||||||
nix-cli
|
nix-cli
|
||||||
|
nix-perl-bindings
|
||||||
;
|
;
|
||||||
nix-perl-bindings = nix.hydraJobs.perlBindings.${system};
|
};
|
||||||
|
in {
|
||||||
|
nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs {
|
||||||
|
inherit nixComponents;
|
||||||
|
};
|
||||||
|
hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix {
|
||||||
|
inherit (nixpkgs.lib) fileset;
|
||||||
|
inherit nixComponents;
|
||||||
|
inherit (self.packages.${system}) nix-eval-jobs;
|
||||||
|
rawSrc = self;
|
||||||
};
|
};
|
||||||
default = self.packages.${system}.hydra;
|
default = self.packages.${system}.hydra;
|
||||||
});
|
});
|
||||||
|
90
hydra/jobsets.nix
Normal file
90
hydra/jobsets.nix
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
{ pulls, branches, ... }:
|
||||||
|
let
|
||||||
|
# create the json spec for the jobset
|
||||||
|
makeSpec =
|
||||||
|
contents:
|
||||||
|
builtins.derivation {
|
||||||
|
name = "spec.json";
|
||||||
|
system = "x86_64-linux";
|
||||||
|
preferLocalBuild = true;
|
||||||
|
allowSubstitutes = false;
|
||||||
|
builder = "/bin/sh";
|
||||||
|
args = [
|
||||||
|
(builtins.toFile "builder.sh" ''
|
||||||
|
echo "$contents" > $out
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
contents = builtins.toJSON contents;
|
||||||
|
};
|
||||||
|
|
||||||
|
prs = readJSONFile pulls;
|
||||||
|
refs = readJSONFile branches;
|
||||||
|
|
||||||
|
# template for creating a job
|
||||||
|
makeJob =
|
||||||
|
{
|
||||||
|
schedulingshares ? 10,
|
||||||
|
keepnr ? 3,
|
||||||
|
description,
|
||||||
|
flake,
|
||||||
|
enabled ? 1,
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
inherit
|
||||||
|
description
|
||||||
|
flake
|
||||||
|
schedulingshares
|
||||||
|
keepnr
|
||||||
|
enabled
|
||||||
|
;
|
||||||
|
type = 1;
|
||||||
|
hidden = false;
|
||||||
|
checkinterval = 300; # every 5 minutes
|
||||||
|
enableemail = false;
|
||||||
|
emailoverride = "";
|
||||||
|
};
|
||||||
|
|
||||||
|
giteaHost = "ssh://gitea@nayeonie.com:2222";
|
||||||
|
repo = "ahuston-0/hydra";
|
||||||
|
# # Create a hydra job for a branch
|
||||||
|
jobOfRef =
|
||||||
|
name:
|
||||||
|
{ ref, ... }:
|
||||||
|
if ((builtins.match "^refs/heads/(.*)$" ref) == null) then
|
||||||
|
null
|
||||||
|
else
|
||||||
|
{
|
||||||
|
name = builtins.replaceStrings [ "/" ] [ "-" ] "branch-${name}";
|
||||||
|
value = makeJob {
|
||||||
|
description = "Branch ${name}";
|
||||||
|
flake = "git+${giteaHost}/${repo}?ref=${ref}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Create a hydra job for a PR
|
||||||
|
jobOfPR = id: info: {
|
||||||
|
name = if info.draft then "draft-${id}" else "pr-${id}";
|
||||||
|
value = makeJob {
|
||||||
|
description = "PR ${id}: ${info.title}";
|
||||||
|
flake = "git+${giteaHost}/${repo}?ref=${info.head.ref}";
|
||||||
|
enabled = info.state == "open";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# some utility functions
|
||||||
|
# converts json to name/value dicts
|
||||||
|
attrsToList = l: builtins.attrValues (builtins.mapAttrs (name: value: { inherit name value; }) l);
|
||||||
|
# wrapper function for reading json from file
|
||||||
|
readJSONFile = f: builtins.fromJSON (builtins.readFile f);
|
||||||
|
# remove null values from a set, in-case of branches that don't exist
|
||||||
|
mapFilter = f: l: builtins.filter (x: (x != null)) (map f l);
|
||||||
|
|
||||||
|
# Create job set from PRs and branches
|
||||||
|
jobs = makeSpec (
|
||||||
|
builtins.listToAttrs (map ({ name, value }: jobOfPR name value) (attrsToList prs))
|
||||||
|
// builtins.listToAttrs (mapFilter ({ name, value }: jobOfRef name value) (attrsToList refs))
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
jobsets = jobs;
|
||||||
|
}
|
35
hydra/spec.json
Normal file
35
hydra/spec.json
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"enabled": 1,
|
||||||
|
"hidden": false,
|
||||||
|
"description": "ahuston-0's fork of hydra",
|
||||||
|
"nixexprinput": "nixexpr",
|
||||||
|
"nixexprpath": "hydra/jobsets.nix",
|
||||||
|
"checkinterval": 60,
|
||||||
|
"schedulingshares": 100,
|
||||||
|
"enableemail": false,
|
||||||
|
"emailoverride": "",
|
||||||
|
"keepnr": 3,
|
||||||
|
"type": 0,
|
||||||
|
"inputs": {
|
||||||
|
"nixexpr": {
|
||||||
|
"value": "ssh://gitea@nayeonie.com:2222/ahuston-0/hydra.git add-gitea-pulls",
|
||||||
|
"type": "git",
|
||||||
|
"emailresponsible": false
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"value": "https://github.com/NixOS/nixpkgs nixos-unstable",
|
||||||
|
"type": "git",
|
||||||
|
"emailresponsible": false
|
||||||
|
},
|
||||||
|
"pulls": {
|
||||||
|
"type": "giteapulls",
|
||||||
|
"value": "nayeonie.com ahuston-0 hydra https",
|
||||||
|
"emailresponsible": false
|
||||||
|
},
|
||||||
|
"branches": {
|
||||||
|
"type": "gitea_refs",
|
||||||
|
"value": "nayeonie.com ahuston-0 hydra heads https -",
|
||||||
|
"emailresponsible": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
14
meson.build
14
meson.build
@ -12,20 +12,6 @@ nix_util_dep = dependency('nix-util', required: true)
|
|||||||
nix_store_dep = dependency('nix-store', required: true)
|
nix_store_dep = dependency('nix-store', required: true)
|
||||||
nix_main_dep = dependency('nix-main', required: true)
|
nix_main_dep = dependency('nix-main', required: true)
|
||||||
|
|
||||||
# Nix need extra flags not provided in its pkg-config files.
|
|
||||||
nix_dep = declare_dependency(
|
|
||||||
dependencies: [
|
|
||||||
nix_util_dep,
|
|
||||||
nix_store_dep,
|
|
||||||
nix_main_dep,
|
|
||||||
],
|
|
||||||
compile_args: [
|
|
||||||
'-include', 'nix/config-util.hh',
|
|
||||||
'-include', 'nix/config-store.hh',
|
|
||||||
'-include', 'nix/config-main.hh',
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
pqxx_dep = dependency('libpqxx', required: true)
|
pqxx_dep = dependency('libpqxx', required: true)
|
||||||
|
|
||||||
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
systemd.services.hydra-send-stats.enable = false;
|
systemd.services.hydra-send-stats.enable = false;
|
||||||
|
|
||||||
services.postgresql.enable = true;
|
services.postgresql.enable = true;
|
||||||
services.postgresql.package = pkgs.postgresql_12;
|
|
||||||
|
|
||||||
# The following is to work around the following error from hydra-server:
|
# The following is to work around the following error from hydra-server:
|
||||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||||
|
@ -468,7 +468,7 @@ in
|
|||||||
elif [[ $compression == zstd ]]; then
|
elif [[ $compression == zstd ]]; then
|
||||||
compression="zstd --rm"
|
compression="zstd --rm"
|
||||||
fi
|
fi
|
||||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||||
'';
|
'';
|
||||||
startAt = "Sun 01:45";
|
startAt = "Sun 01:45";
|
||||||
};
|
};
|
||||||
|
111
nixos-tests.nix
111
nixos-tests.nix
@ -27,8 +27,7 @@ in
|
|||||||
{
|
{
|
||||||
|
|
||||||
install = forEachSystem (system:
|
install = forEachSystem (system:
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||||
simpleTest {
|
|
||||||
name = "hydra-install";
|
name = "hydra-install";
|
||||||
nodes.machine = hydraServer;
|
nodes.machine = hydraServer;
|
||||||
testScript =
|
testScript =
|
||||||
@ -43,8 +42,7 @@ in
|
|||||||
});
|
});
|
||||||
|
|
||||||
notifications = forEachSystem (system:
|
notifications = forEachSystem (system:
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||||
simpleTest {
|
|
||||||
name = "hydra-notifications";
|
name = "hydra-notifications";
|
||||||
nodes.machine = {
|
nodes.machine = {
|
||||||
imports = [ hydraServer ];
|
imports = [ hydraServer ];
|
||||||
@ -56,7 +54,7 @@ in
|
|||||||
'';
|
'';
|
||||||
services.influxdb.enable = true;
|
services.influxdb.enable = true;
|
||||||
};
|
};
|
||||||
testScript = ''
|
testScript = { nodes, ... }: ''
|
||||||
machine.wait_for_job("hydra-init")
|
machine.wait_for_job("hydra-init")
|
||||||
|
|
||||||
# Create an admin account and some other state.
|
# Create an admin account and some other state.
|
||||||
@ -87,7 +85,7 @@ in
|
|||||||
|
|
||||||
# Setup the project and jobset
|
# Setup the project and jobset
|
||||||
machine.succeed(
|
machine.succeed(
|
||||||
"su - hydra -c 'perl -I ${config.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Wait until hydra has build the job and
|
# Wait until hydra has build the job and
|
||||||
@ -101,9 +99,10 @@ in
|
|||||||
});
|
});
|
||||||
|
|
||||||
gitea = forEachSystem (system:
|
gitea = forEachSystem (system:
|
||||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
let
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
makeTest {
|
in
|
||||||
|
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
|
||||||
name = "hydra-gitea";
|
name = "hydra-gitea";
|
||||||
nodes.machine = { pkgs, ... }: {
|
nodes.machine = { pkgs, ... }: {
|
||||||
imports = [ hydraServer ];
|
imports = [ hydraServer ];
|
||||||
@ -145,10 +144,24 @@ in
|
|||||||
git -C /tmp/repo add .
|
git -C /tmp/repo add .
|
||||||
git config --global user.email test@localhost
|
git config --global user.email test@localhost
|
||||||
git config --global user.name test
|
git config --global user.name test
|
||||||
|
|
||||||
|
# Create initial commit
|
||||||
git -C /tmp/repo commit -m 'Initial import'
|
git -C /tmp/repo commit -m 'Initial import'
|
||||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
export GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no'
|
||||||
git -C /tmp/repo push origin master
|
git -C /tmp/repo push origin master
|
||||||
|
git -C /tmp/repo log >&2
|
||||||
|
|
||||||
|
# Create PR branch
|
||||||
|
git -C /tmp/repo checkout -b pr
|
||||||
|
git -C /tmp/repo commit --allow-empty -m 'Additional change'
|
||||||
|
git -C /tmp/repo push origin pr
|
||||||
|
git -C /tmp/repo log >&2
|
||||||
|
|
||||||
|
# Create release branch
|
||||||
|
git -C /tmp/repo checkout -b release/release-1.0
|
||||||
|
git -C /tmp/repo commit --allow-empty -m 'Additional change'
|
||||||
|
git -C /tmp/repo push origin release/release-1.0
|
||||||
git -C /tmp/repo log >&2
|
git -C /tmp/repo log >&2
|
||||||
'';
|
'';
|
||||||
|
|
||||||
@ -185,7 +198,7 @@ in
|
|||||||
cat >data.json <<EOF
|
cat >data.json <<EOF
|
||||||
{
|
{
|
||||||
"description": "Trivial",
|
"description": "Trivial",
|
||||||
"checkinterval": "60",
|
"checkinterval": "20",
|
||||||
"enabled": "1",
|
"enabled": "1",
|
||||||
"visible": "1",
|
"visible": "1",
|
||||||
"keepnr": "1",
|
"keepnr": "1",
|
||||||
@ -199,7 +212,17 @@ in
|
|||||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"},
|
||||||
|
"pulls": {
|
||||||
|
"type": "giteapulls",
|
||||||
|
"value": "localhost:3001 root repo http",
|
||||||
|
"emailresponsible": false
|
||||||
|
},
|
||||||
|
"releases": {
|
||||||
|
"type": "gitea_refs",
|
||||||
|
"value": "localhost:3001 root repo heads http - release",
|
||||||
|
"emailresponseible": false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
@ -227,15 +250,41 @@ in
|
|||||||
};
|
};
|
||||||
|
|
||||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||||
{ trivial = builtins.derivation {
|
{ pulls, releases, ... }:
|
||||||
name = "trivial";
|
|
||||||
system = "${system}";
|
let
|
||||||
builder = "/bin/sh";
|
genDrv = name: builtins.derivation {
|
||||||
allowSubstitutes = false;
|
inherit name;
|
||||||
preferLocalBuild = true;
|
system = "${system}";
|
||||||
args = ["-c" "echo success > $out; exit 0"];
|
builder = "/bin/sh";
|
||||||
|
allowSubstitutes = false;
|
||||||
|
preferLocalBuild = true;
|
||||||
|
args = ["-c" "echo success > $out; exit 0"];
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
prs = builtins.fromJSON (builtins.readFile pulls);
|
||||||
|
prJobNames = map (n: "pr-''${n}") (builtins.attrNames prs);
|
||||||
|
prJobset = builtins.listToAttrs (
|
||||||
|
map (
|
||||||
|
name: {
|
||||||
|
inherit name;
|
||||||
|
value = genDrv name;
|
||||||
|
}
|
||||||
|
) prJobNames
|
||||||
|
);
|
||||||
|
rels = builtins.fromJSON (builtins.readFile releases);
|
||||||
|
relJobNames = builtins.attrNames rels;
|
||||||
|
relJobset = builtins.listToAttrs (
|
||||||
|
map (
|
||||||
|
name: {
|
||||||
|
inherit name;
|
||||||
|
value = genDrv name;
|
||||||
|
}
|
||||||
|
) relJobNames
|
||||||
|
);
|
||||||
|
in {
|
||||||
|
trivial = genDrv "trivial";
|
||||||
|
} // prJobset // relJobset
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
''
|
''
|
||||||
@ -279,18 +328,34 @@ in
|
|||||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
machine.sleep(3)
|
||||||
|
|
||||||
data = machine.succeed(
|
data = machine.succeed(
|
||||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show master | head -n1 | awk "{print \\$2}")?sort=leastindex" '
|
||||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
+ f"-H 'Authorization: token ${api_token}'"
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
)
|
)
|
||||||
|
|
||||||
response = json.loads(data)
|
response = json.loads(data)
|
||||||
|
|
||||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
assert len(response) == 2, "Expected exactly two status updates for latest commit (queued, finished)!"
|
||||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||||
|
|
||||||
|
# giteapulls test
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"curl --fail -X POST http://localhost:3001/api/v1/repos/root/repo/pulls "
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
+ ' -d \'{"title":"Test PR", "base":"master", "head": "pr"}\'''
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
'curl -Lf -s http://localhost:3000/build/2 -H "Accept: application/json" '
|
||||||
|
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||||
|
)
|
||||||
|
|
||||||
machine.shutdown()
|
machine.shutdown()
|
||||||
'';
|
'';
|
||||||
});
|
});
|
||||||
|
25
package.nix
25
package.nix
@ -8,11 +8,7 @@
|
|||||||
|
|
||||||
, perlPackages
|
, perlPackages
|
||||||
|
|
||||||
, nix-util
|
, nixComponents
|
||||||
, nix-store
|
|
||||||
, nix-main
|
|
||||||
, nix-cli
|
|
||||||
, nix-perl-bindings
|
|
||||||
, git
|
, git
|
||||||
|
|
||||||
, makeWrapper
|
, makeWrapper
|
||||||
@ -65,7 +61,7 @@ let
|
|||||||
name = "hydra-perl-deps";
|
name = "hydra-perl-deps";
|
||||||
paths = lib.closePropagation
|
paths = lib.closePropagation
|
||||||
([
|
([
|
||||||
nix-perl-bindings
|
nixComponents.nix-perl-bindings
|
||||||
git
|
git
|
||||||
] ++ (with perlPackages; [
|
] ++ (with perlPackages; [
|
||||||
AuthenSASL
|
AuthenSASL
|
||||||
@ -93,6 +89,7 @@ let
|
|||||||
DateTime
|
DateTime
|
||||||
DBDPg
|
DBDPg
|
||||||
DBDSQLite
|
DBDSQLite
|
||||||
|
DBIxClassHelpers
|
||||||
DigestSHA1
|
DigestSHA1
|
||||||
EmailMIME
|
EmailMIME
|
||||||
EmailSender
|
EmailSender
|
||||||
@ -165,7 +162,7 @@ stdenv.mkDerivation (finalAttrs: {
|
|||||||
nukeReferences
|
nukeReferences
|
||||||
pkg-config
|
pkg-config
|
||||||
mdbook
|
mdbook
|
||||||
nix-cli
|
nixComponents.nix-cli
|
||||||
perlDeps
|
perlDeps
|
||||||
perl
|
perl
|
||||||
unzip
|
unzip
|
||||||
@ -175,9 +172,9 @@ stdenv.mkDerivation (finalAttrs: {
|
|||||||
libpqxx
|
libpqxx
|
||||||
openssl
|
openssl
|
||||||
libxslt
|
libxslt
|
||||||
nix-util
|
nixComponents.nix-util
|
||||||
nix-store
|
nixComponents.nix-store
|
||||||
nix-main
|
nixComponents.nix-main
|
||||||
perlDeps
|
perlDeps
|
||||||
perl
|
perl
|
||||||
boost
|
boost
|
||||||
@ -204,14 +201,14 @@ stdenv.mkDerivation (finalAttrs: {
|
|||||||
glibcLocales
|
glibcLocales
|
||||||
libressl.nc
|
libressl.nc
|
||||||
python3
|
python3
|
||||||
nix-cli
|
nixComponents.nix-cli
|
||||||
];
|
];
|
||||||
|
|
||||||
hydraPath = lib.makeBinPath (
|
hydraPath = lib.makeBinPath (
|
||||||
[
|
[
|
||||||
subversion
|
subversion
|
||||||
openssh
|
openssh
|
||||||
nix-cli
|
nixComponents.nix-cli
|
||||||
coreutils
|
coreutils
|
||||||
findutils
|
findutils
|
||||||
pixz
|
pixz
|
||||||
@ -241,7 +238,7 @@ stdenv.mkDerivation (finalAttrs: {
|
|||||||
shellHook = ''
|
shellHook = ''
|
||||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||||
|
|
||||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
|
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||||
export HYDRA_HOME="$(pwd)/src/"
|
export HYDRA_HOME="$(pwd)/src/"
|
||||||
mkdir -p .hydra-data
|
mkdir -p .hydra-data
|
||||||
@ -272,7 +269,7 @@ stdenv.mkDerivation (finalAttrs: {
|
|||||||
--prefix PATH ':' $out/bin:$hydraPath \
|
--prefix PATH ':' $out/bin:$hydraPath \
|
||||||
--set HYDRA_RELEASE ${version} \
|
--set HYDRA_RELEASE ${version} \
|
||||||
--set HYDRA_HOME $out/libexec/hydra \
|
--set HYDRA_HOME $out/libexec/hydra \
|
||||||
--set NIX_RELEASE ${nix-cli.name or "unknown"} \
|
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
|
||||||
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||||
done
|
done
|
||||||
'';
|
'';
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "pool.hh"
|
#include <nix/util/pool.hh>
|
||||||
#include "shared.hh"
|
#include <nix/main/shared.hh>
|
||||||
#include "signals.hh"
|
#include <nix/util/signals.hh>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
@ -2,7 +2,8 @@ hydra_evaluator = executable('hydra-evaluator',
|
|||||||
'hydra-evaluator.cc',
|
'hydra-evaluator.cc',
|
||||||
dependencies: [
|
dependencies: [
|
||||||
libhydra_dep,
|
libhydra_dep,
|
||||||
nix_dep,
|
nix_util_dep,
|
||||||
|
nix_main_dep,
|
||||||
pqxx_dep,
|
pqxx_dep,
|
||||||
],
|
],
|
||||||
install: true,
|
install: true,
|
||||||
|
@ -5,17 +5,20 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
|
||||||
#include "build-result.hh"
|
#include <nix/store/build-result.hh>
|
||||||
#include "path.hh"
|
#include <nix/store/path.hh>
|
||||||
#include "legacy-ssh-store.hh"
|
#include <nix/store/legacy-ssh-store.hh>
|
||||||
#include "serve-protocol.hh"
|
#include <nix/store/serve-protocol.hh>
|
||||||
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "current-process.hh"
|
#include <nix/util/current-process.hh>
|
||||||
#include "processes.hh"
|
#include <nix/util/processes.hh>
|
||||||
#include "util.hh"
|
#include <nix/util/util.hh>
|
||||||
#include "ssh.hh"
|
#include <nix/store/serve-protocol.hh>
|
||||||
#include "finally.hh"
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
#include "url.hh"
|
#include <nix/store/ssh.hh>
|
||||||
|
#include <nix/util/finally.hh>
|
||||||
|
#include <nix/util/url.hh>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
@ -36,6 +39,38 @@ bool ::Machine::isLocalhost() const
|
|||||||
|
|
||||||
namespace nix::build_remote {
|
namespace nix::build_remote {
|
||||||
|
|
||||||
|
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||||
|
::Machine::ptr machine, SSHMaster & master)
|
||||||
|
{
|
||||||
|
Strings command = {"nix-store", "--serve", "--write"};
|
||||||
|
if (machine->isLocalhost()) {
|
||||||
|
command.push_back("--builders");
|
||||||
|
command.push_back("");
|
||||||
|
} else {
|
||||||
|
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||||
|
if (remoteStore != machine->storeUri.params.end()) {
|
||||||
|
command.push_back("--store");
|
||||||
|
command.push_back(shellEscape(remoteStore->second));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto ret = master.startCommand(std::move(command), {
|
||||||
|
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||||
|
});
|
||||||
|
|
||||||
|
// XXX: determine the actual max value we can use from /proc.
|
||||||
|
|
||||||
|
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||||
|
|
||||||
|
int pipesize = 1024 * 1024;
|
||||||
|
|
||||||
|
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void copyClosureTo(
|
static void copyClosureTo(
|
||||||
::Machine::Connection & conn,
|
::Machine::Connection & conn,
|
||||||
Store & destStore,
|
Store & destStore,
|
||||||
@ -52,8 +87,8 @@ static void copyClosureTo(
|
|||||||
// FIXME: substitute output pollutes our build log
|
// FIXME: substitute output pollutes our build log
|
||||||
/* Get back the set of paths that are already valid on the remote
|
/* Get back the set of paths that are already valid on the remote
|
||||||
host. */
|
host. */
|
||||||
auto present = conn.store->queryValidPaths(
|
auto present = conn.queryValidPaths(
|
||||||
closure, true, useSubstitutes);
|
destStore, true, closure, useSubstitutes);
|
||||||
|
|
||||||
if (present.size() == closure.size()) return;
|
if (present.size() == closure.size()) return;
|
||||||
|
|
||||||
@ -68,7 +103,12 @@ static void copyClosureTo(
|
|||||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||||
std::chrono::seconds(600));
|
std::chrono::seconds(600));
|
||||||
|
|
||||||
conn.store->addMultipleToStoreLegacy(destStore, missing);
|
conn.to << ServeProto::Command::ImportPaths;
|
||||||
|
destStore.exportPaths(missing, conn.to);
|
||||||
|
conn.to.flush();
|
||||||
|
|
||||||
|
if (readInt(conn.from) != 1)
|
||||||
|
throw Error("remote machine failed to import closure");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -188,7 +228,7 @@ static BuildResult performBuild(
|
|||||||
counter & nrStepsBuilding
|
counter & nrStepsBuilding
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
auto kont = conn.store->buildDerivationAsync(drvPath, drv, options);
|
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||||
|
|
||||||
BuildResult result;
|
BuildResult result;
|
||||||
|
|
||||||
@ -197,10 +237,7 @@ static BuildResult performBuild(
|
|||||||
startTime = time(0);
|
startTime = time(0);
|
||||||
{
|
{
|
||||||
MaintainCount<counter> mc(nrStepsBuilding);
|
MaintainCount<counter> mc(nrStepsBuilding);
|
||||||
result = kont();
|
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||||
// Without proper call-once functions, we need to manually
|
|
||||||
// delete after calling.
|
|
||||||
kont = {};
|
|
||||||
}
|
}
|
||||||
stopTime = time(0);
|
stopTime = time(0);
|
||||||
|
|
||||||
@ -216,7 +253,7 @@ static BuildResult performBuild(
|
|||||||
|
|
||||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||||
// it manually by introspecting the derivation.
|
// it manually by introspecting the derivation.
|
||||||
if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6)
|
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||||
{
|
{
|
||||||
// If the remote is too old to handle CA derivations, we can’t get this
|
// If the remote is too old to handle CA derivations, we can’t get this
|
||||||
// far anyways
|
// far anyways
|
||||||
@ -249,25 +286,26 @@ static void copyPathFromRemote(
|
|||||||
const ValidPathInfo & info
|
const ValidPathInfo & info
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
/* Receive the NAR from the remote and add it to the
|
/* Receive the NAR from the remote and add it to the
|
||||||
destination store. Meanwhile, extract all the info from the
|
destination store. Meanwhile, extract all the info from the
|
||||||
NAR that getBuildOutput() needs. */
|
NAR that getBuildOutput() needs. */
|
||||||
auto source2 = sinkToSource([&](Sink & sink)
|
auto source2 = sinkToSource([&](Sink & sink)
|
||||||
{
|
{
|
||||||
/* Note: we should only send the command to dump the store
|
/* Note: we should only send the command to dump the store
|
||||||
path to the remote if the NAR is actually going to get read
|
path to the remote if the NAR is actually going to get read
|
||||||
by the destination store, which won't happen if this path
|
by the destination store, which won't happen if this path
|
||||||
is already valid on the destination store. Since this
|
is already valid on the destination store. Since this
|
||||||
lambda function only gets executed if someone tries to read
|
lambda function only gets executed if someone tries to read
|
||||||
from source2, we will send the command from here rather
|
from source2, we will send the command from here rather
|
||||||
than outside the lambda. */
|
than outside the lambda. */
|
||||||
conn.store->narFromPath(info.path, [&](Source & source) {
|
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||||
TeeSource tee{source, sink};
|
conn.to.flush();
|
||||||
extractNarData(tee, conn.store->printStorePath(info.path), narMembers);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
TeeSource tee(conn.from, sink);
|
||||||
|
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||||
|
});
|
||||||
|
|
||||||
|
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void copyPathsFromRemote(
|
static void copyPathsFromRemote(
|
||||||
@ -348,8 +386,19 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||||
|
template <typename T>
|
||||||
|
class SemaphoreReleaser {
|
||||||
|
public:
|
||||||
|
SemaphoreReleaser(T* s) : sem(s) {}
|
||||||
|
~SemaphoreReleaser() { sem->release(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* sem;
|
||||||
|
};
|
||||||
|
|
||||||
void State::buildRemote(ref<Store> destStore,
|
void State::buildRemote(ref<Store> destStore,
|
||||||
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
::Machine::ptr machine, Step::ptr step,
|
::Machine::ptr machine, Step::ptr step,
|
||||||
const ServeProto::BuildOptions & buildOptions,
|
const ServeProto::BuildOptions & buildOptions,
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
@ -366,39 +415,30 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
updateStep(ssConnecting);
|
updateStep(ssConnecting);
|
||||||
|
|
||||||
// FIXME: rewrite to use Store.
|
auto storeRef = machine->completeStoreReference();
|
||||||
::Machine::Connection conn {
|
|
||||||
.machine = machine,
|
|
||||||
.store = [&]{
|
|
||||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&machine->storeUri.variant);
|
|
||||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
|
||||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
|
||||||
}
|
|
||||||
|
|
||||||
auto remoteStore = machine->openStore().dynamic_pointer_cast<LegacySSHStore>();
|
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||||
assert(remoteStore);
|
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||||
|
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||||
|
}
|
||||||
|
|
||||||
remoteStore->connPipeSize = 1024 * 1024;
|
LegacySSHStoreConfig storeConfig {
|
||||||
|
pSpecified->scheme,
|
||||||
if (machine->isLocalhost()) {
|
pSpecified->authority,
|
||||||
auto rp_new = remoteStore->remoteProgram.get();
|
storeRef.params
|
||||||
rp_new.push_back("--builders");
|
|
||||||
rp_new.push_back("");
|
|
||||||
const_cast<nix::Setting<Strings> &>(remoteStore->remoteProgram).assign(rp_new);
|
|
||||||
}
|
|
||||||
remoteStore->extraSshArgs = {
|
|
||||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
|
||||||
};
|
|
||||||
const_cast<nix::Setting<int> &>(remoteStore->logFD).assign(logFD.get());
|
|
||||||
|
|
||||||
return nix::ref{remoteStore};
|
|
||||||
}(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
auto master = storeConfig.createSSHMaster(
|
||||||
|
false, // no SSH master yet
|
||||||
|
logFD.get());
|
||||||
|
|
||||||
|
// FIXME: rewrite to use Store.
|
||||||
|
auto child = build_remote::openConnection(machine, master);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto activeStepState(activeStep->state_.lock());
|
auto activeStepState(activeStep->state_.lock());
|
||||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||||
activeStepState->pid = conn.store->getConnectionPid();
|
activeStepState->pid = child->sshPid;
|
||||||
}
|
}
|
||||||
|
|
||||||
Finally clearPid([&]() {
|
Finally clearPid([&]() {
|
||||||
@ -413,12 +453,35 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
process. Meh. */
|
process. Meh. */
|
||||||
});
|
});
|
||||||
|
|
||||||
|
::Machine::Connection conn {
|
||||||
|
{
|
||||||
|
.to = child->in.get(),
|
||||||
|
.from = child->out.get(),
|
||||||
|
/* Handshake. */
|
||||||
|
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||||
|
},
|
||||||
|
/*.machine =*/ machine,
|
||||||
|
};
|
||||||
|
|
||||||
Finally updateStats([&]() {
|
Finally updateStats([&]() {
|
||||||
auto stats = conn.store->getConnectionStats();
|
bytesReceived += conn.from.read;
|
||||||
bytesReceived += stats.bytesReceived;
|
bytesSent += conn.to.written;
|
||||||
bytesSent += stats.bytesSent;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
constexpr ServeProto::Version our_version = 0x206;
|
||||||
|
|
||||||
|
try {
|
||||||
|
conn.remoteVersion = decltype(conn)::handshake(
|
||||||
|
conn.to,
|
||||||
|
conn.from,
|
||||||
|
our_version,
|
||||||
|
machine->storeUri.render());
|
||||||
|
} catch (EndOfFile & e) {
|
||||||
|
child->sshPid.wait();
|
||||||
|
std::string s = chomp(readFile(result.logFile));
|
||||||
|
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
auto info(machine->state->connectInfo.lock());
|
auto info(machine->state->connectInfo.lock());
|
||||||
info->consecutiveFailures = 0;
|
info->consecutiveFailures = 0;
|
||||||
@ -475,6 +538,23 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
result.logFile = "";
|
result.logFile = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||||
|
* step, since this requires a DB roundtrip. */
|
||||||
|
if (!localWorkThrottler.try_acquire()) {
|
||||||
|
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||||
|
updateStep(ssWaitingForLocalSlot);
|
||||||
|
localWorkThrottler.acquire();
|
||||||
|
}
|
||||||
|
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||||
|
|
||||||
|
/* Once we've started copying outputs, release the machine reservation
|
||||||
|
* so further builds can happen. We do not release the machine earlier
|
||||||
|
* to avoid situations where the queue runner is bottlenecked on
|
||||||
|
* copying outputs and we end up building too many things that we
|
||||||
|
* haven't been able to allow copy slots for. */
|
||||||
|
reservation.reset();
|
||||||
|
wakeDispatcher();
|
||||||
|
|
||||||
StorePathSet outputs;
|
StorePathSet outputs;
|
||||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||||
outputs.insert(realisation.outPath);
|
outputs.insert(realisation.outPath);
|
||||||
@ -487,7 +567,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
auto infos = conn.store->queryPathInfosUncached(outputs);
|
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||||
|
|
||||||
size_t totalNarSize = 0;
|
size_t totalNarSize = 0;
|
||||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||||
@ -522,11 +602,9 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Shut down the connection done by RAII.
|
/* Shut down the connection. */
|
||||||
|
child->in = -1;
|
||||||
Only difference is kill() instead of wait() (i.e. send signal
|
child->sshPid.wait();
|
||||||
then wait())
|
|
||||||
*/
|
|
||||||
|
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
/* Disable this machine until a certain period of time has
|
/* Disable this machine until a certain period of time has
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include <nix/store/store-api.hh>
|
||||||
#include "util.hh"
|
#include <nix/util/util.hh>
|
||||||
#include "source-accessor.hh"
|
#include <nix/util/source-accessor.hh>
|
||||||
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "finally.hh"
|
#include <nix/util/finally.hh>
|
||||||
#include "binary-cache-store.hh"
|
#include <nix/store/binary-cache-store.hh>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void State::builder(MachineReservation::ptr reservation)
|
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||||
{
|
{
|
||||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||||
|
|
||||||
@ -35,22 +35,20 @@ void State::builder(MachineReservation::ptr reservation)
|
|||||||
activeSteps_.lock()->erase(activeStep);
|
activeSteps_.lock()->erase(activeStep);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
std::string machine = reservation->machine->storeUri.render();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
res = doBuildStep(destStore, reservation, activeStep);
|
// Might release the reservation.
|
||||||
|
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||||
localStore->printStorePath(reservation->step->drvPath),
|
localStore->printStorePath(activeStep->step->drvPath),
|
||||||
reservation->machine->storeUri.render(),
|
machine,
|
||||||
e.what());
|
e.what());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Release the machine and wake up the dispatcher. */
|
|
||||||
assert(reservation.unique());
|
|
||||||
reservation = 0;
|
|
||||||
wakeDispatcher();
|
|
||||||
|
|
||||||
/* If there was a temporary failure, retry the step after an
|
/* If there was a temporary failure, retry the step after an
|
||||||
exponentially increasing interval. */
|
exponentially increasing interval. */
|
||||||
Step::ptr step = wstep.lock();
|
Step::ptr step = wstep.lock();
|
||||||
@ -72,11 +70,11 @@ void State::builder(MachineReservation::ptr reservation)
|
|||||||
|
|
||||||
|
|
||||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
MachineReservation::ptr reservation,
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
std::shared_ptr<ActiveStep> activeStep)
|
std::shared_ptr<ActiveStep> activeStep)
|
||||||
{
|
{
|
||||||
auto & step(reservation->step);
|
auto step(reservation->step);
|
||||||
auto & machine(reservation->machine);
|
auto machine(reservation->machine);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
@ -211,7 +209,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
/* FIXME: referring builds may have conflicting timeouts. */
|
/* FIXME: referring builds may have conflicting timeouts. */
|
||||||
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
if (activeStep->state_.lock()->cancelled) {
|
if (activeStep->state_.lock()->cancelled) {
|
||||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||||
|
@ -40,13 +40,15 @@ void State::dispatcher()
|
|||||||
printMsg(lvlDebug, "dispatcher woken up");
|
printMsg(lvlDebug, "dispatcher woken up");
|
||||||
nrDispatcherWakeups++;
|
nrDispatcherWakeups++;
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto t_before_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
auto sleepUntil = doDispatch();
|
auto sleepUntil = doDispatch();
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
prom.dispatcher_time_spent_running.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||||
|
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||||
|
|
||||||
/* Sleep until we're woken up (either because a runnable build
|
/* Sleep until we're woken up (either because a runnable build
|
||||||
is added, or because a build finishes). */
|
is added, or because a build finishes). */
|
||||||
@ -60,6 +62,10 @@ void State::dispatcher()
|
|||||||
*dispatcherWakeup_ = false;
|
*dispatcherWakeup_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
|
prom.dispatcher_time_spent_waiting.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||||
|
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError("dispatcher: %s", e.what());
|
printError("dispatcher: %s", e.what());
|
||||||
sleep(1);
|
sleep(1);
|
||||||
@ -282,7 +288,7 @@ system_time State::doDispatch()
|
|||||||
/* Make a slot reservation and start a thread to
|
/* Make a slot reservation and start a thread to
|
||||||
do the build. */
|
do the build. */
|
||||||
auto builderThread = std::thread(&State::builder, this,
|
auto builderThread = std::thread(&State::builder, this,
|
||||||
std::make_shared<MachineReservation>(*this, step, mi.machine));
|
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||||
builderThread.detach(); // FIXME?
|
builderThread.detach(); // FIXME?
|
||||||
|
|
||||||
keepGoing = true;
|
keepGoing = true;
|
||||||
|
@ -2,9 +2,9 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "hash.hh"
|
#include <nix/util/hash.hh>
|
||||||
#include "derivations.hh"
|
#include <nix/store/derivations.hh>
|
||||||
#include "store-api.hh"
|
#include <nix/store/store-api.hh>
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
struct BuildProduct
|
struct BuildProduct
|
||||||
|
@ -11,16 +11,16 @@
|
|||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
#include "signals.hh"
|
#include <nix/util/signals.hh>
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include <nix/store/store-api.hh>
|
||||||
#include "remote-store.hh"
|
#include <nix/store/remote-store.hh>
|
||||||
|
|
||||||
#include "globals.hh"
|
#include <nix/store/globals.hh>
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "s3-binary-cache-store.hh"
|
#include <nix/store/s3-binary-cache-store.hh>
|
||||||
#include "shared.hh"
|
#include <nix/main/shared.hh>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
using nlohmann::json;
|
using nlohmann::json;
|
||||||
@ -70,10 +70,31 @@ State::PromMetrics::PromMetrics()
|
|||||||
.Register(*registry)
|
.Register(*registry)
|
||||||
.Add({})
|
.Add({})
|
||||||
)
|
)
|
||||||
, queue_max_id(
|
, dispatcher_time_spent_running(
|
||||||
prometheus::BuildGauge()
|
prometheus::BuildCounter()
|
||||||
.Name("hydraqueuerunner_queue_max_build_id_info")
|
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||||
.Help("Maximum build record ID in the queue")
|
.Help("Time (in micros) spent running the dispatcher")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, dispatcher_time_spent_waiting(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||||
|
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_monitor_time_spent_running(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||||
|
.Help("Time (in micros) spent running the queue monitor")
|
||||||
|
.Register(*registry)
|
||||||
|
.Add({})
|
||||||
|
)
|
||||||
|
, queue_monitor_time_spent_waiting(
|
||||||
|
prometheus::BuildCounter()
|
||||||
|
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||||
|
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||||
.Register(*registry)
|
.Register(*registry)
|
||||||
.Add({})
|
.Add({})
|
||||||
)
|
)
|
||||||
@ -85,6 +106,7 @@ State::State(std::optional<std::string> metricsAddrOpt)
|
|||||||
: config(std::make_unique<HydraConfig>())
|
: config(std::make_unique<HydraConfig>())
|
||||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||||
|
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||||
@ -551,6 +573,7 @@ void State::dumpStatus(Connection & conn)
|
|||||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||||
|
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||||
@ -592,6 +615,7 @@ void State::dumpStatus(Connection & conn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
auto machines_json = json::object();
|
||||||
auto machines_(machines.lock());
|
auto machines_(machines.lock());
|
||||||
for (auto & i : *machines_) {
|
for (auto & i : *machines_) {
|
||||||
auto & m(i.second);
|
auto & m(i.second);
|
||||||
@ -618,8 +642,9 @@ void State::dumpStatus(Connection & conn)
|
|||||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||||
}
|
}
|
||||||
statusJson["machines"][m->storeUri.render()] = machine;
|
machines_json[m->storeUri.render()] = machine;
|
||||||
}
|
}
|
||||||
|
statusJson["machines"] = machines_json;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,9 @@ hydra_queue_runner = executable('hydra-queue-runner',
|
|||||||
srcs,
|
srcs,
|
||||||
dependencies: [
|
dependencies: [
|
||||||
libhydra_dep,
|
libhydra_dep,
|
||||||
nix_dep,
|
nix_util_dep,
|
||||||
|
nix_store_dep,
|
||||||
|
nix_main_dep,
|
||||||
pqxx_dep,
|
pqxx_dep,
|
||||||
prom_cpp_core_dep,
|
prom_cpp_core_dep,
|
||||||
prom_cpp_pull_dep,
|
prom_cpp_pull_dep,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
#include "archive.hh"
|
#include <nix/util/archive.hh>
|
||||||
|
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "source-accessor.hh"
|
#include <nix/util/source-accessor.hh>
|
||||||
#include "types.hh"
|
#include <nix/util/types.hh>
|
||||||
#include "serialise.hh"
|
#include <nix/util/serialise.hh>
|
||||||
#include "hash.hh"
|
#include <nix/util/hash.hh>
|
||||||
|
|
||||||
struct NarMemberData
|
struct NarMemberData
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "globals.hh"
|
#include <nix/store/globals.hh>
|
||||||
|
#include <nix/store/parsed-derivations.hh>
|
||||||
|
#include <nix/util/thread-pool.hh>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
@ -37,16 +39,21 @@ void State::queueMonitorLoop(Connection & conn)
|
|||||||
|
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
|
|
||||||
unsigned int lastBuildId = 0;
|
|
||||||
|
|
||||||
bool quit = false;
|
bool quit = false;
|
||||||
while (!quit) {
|
while (!quit) {
|
||||||
|
auto t_before_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
localStore->clearPathInfoCache();
|
localStore->clearPathInfoCache();
|
||||||
|
|
||||||
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
|
bool done = getQueuedBuilds(conn, destStore);
|
||||||
|
|
||||||
if (buildOne && buildOneDone) quit = true;
|
if (buildOne && buildOneDone) quit = true;
|
||||||
|
|
||||||
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
prom.queue_monitor_time_spent_running.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||||
|
|
||||||
/* Sleep until we get notification from the database about an
|
/* Sleep until we get notification from the database about an
|
||||||
event. */
|
event. */
|
||||||
if (done && !quit) {
|
if (done && !quit) {
|
||||||
@ -56,12 +63,10 @@ void State::queueMonitorLoop(Connection & conn)
|
|||||||
conn.get_notifs();
|
conn.get_notifs();
|
||||||
|
|
||||||
if (auto lowestId = buildsAdded.get()) {
|
if (auto lowestId = buildsAdded.get()) {
|
||||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
|
||||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||||
}
|
}
|
||||||
if (buildsRestarted.get()) {
|
if (buildsRestarted.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||||
lastBuildId = 0; // check all builds
|
|
||||||
}
|
}
|
||||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||||
@ -71,6 +76,10 @@ void State::queueMonitorLoop(Connection & conn)
|
|||||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||||
processJobsetSharesChange(conn);
|
processJobsetSharesChange(conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
|
prom.queue_monitor_time_spent_waiting.Increment(
|
||||||
|
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||||
}
|
}
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
@ -84,20 +93,18 @@ struct PreviousFailure : public std::exception {
|
|||||||
|
|
||||||
|
|
||||||
bool State::getQueuedBuilds(Connection & conn,
|
bool State::getQueuedBuilds(Connection & conn,
|
||||||
ref<Store> destStore, unsigned int & lastBuildId)
|
ref<Store> destStore)
|
||||||
{
|
{
|
||||||
prom.queue_checks_started.Increment();
|
prom.queue_checks_started.Increment();
|
||||||
|
|
||||||
printInfo("checking the queue for builds > %d...", lastBuildId);
|
printInfo("checking the queue for builds...");
|
||||||
|
|
||||||
/* Grab the queued builds from the database, but don't process
|
/* Grab the queued builds from the database, but don't process
|
||||||
them yet (since we don't want a long-running transaction). */
|
them yet (since we don't want a long-running transaction). */
|
||||||
std::vector<BuildID> newIDs;
|
std::vector<BuildID> newIDs;
|
||||||
std::map<BuildID, Build::ptr> newBuildsByID;
|
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||||
|
|
||||||
unsigned int newLastBuildId = lastBuildId;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
|
|
||||||
@ -106,17 +113,12 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||||
"globalPriority, priority from Builds "
|
"globalPriority, priority from Builds "
|
||||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||||
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
|
"where finished = 0 order by globalPriority desc, random()");
|
||||||
lastBuildId);
|
|
||||||
|
|
||||||
for (auto const & row : res) {
|
for (auto const & row : res) {
|
||||||
auto builds_(builds.lock());
|
auto builds_(builds.lock());
|
||||||
BuildID id = row["id"].as<BuildID>();
|
BuildID id = row["id"].as<BuildID>();
|
||||||
if (buildOne && id != buildOne) continue;
|
if (buildOne && id != buildOne) continue;
|
||||||
if (id > newLastBuildId) {
|
|
||||||
newLastBuildId = id;
|
|
||||||
prom.queue_max_id.Set(id);
|
|
||||||
}
|
|
||||||
if (builds_->count(id)) continue;
|
if (builds_->count(id)) continue;
|
||||||
|
|
||||||
auto build = std::make_shared<Build>(
|
auto build = std::make_shared<Build>(
|
||||||
@ -318,15 +320,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
|
|
||||||
/* Stop after a certain time to allow priority bumps to be
|
/* Stop after a certain time to allow priority bumps to be
|
||||||
processed. */
|
processed. */
|
||||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||||
prom.queue_checks_early_exits.Increment();
|
prom.queue_checks_early_exits.Increment();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prom.queue_checks_finished.Increment();
|
prom.queue_checks_finished.Increment();
|
||||||
|
|
||||||
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
|
|
||||||
return newBuildsByID.empty();
|
return newBuildsByID.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,6 +405,34 @@ void State::processQueueChange(Connection & conn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||||
|
ref<Store> destStore,
|
||||||
|
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||||
|
{
|
||||||
|
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||||
|
ThreadPool tp;
|
||||||
|
|
||||||
|
for (auto & [output, maybeOutputPath] : paths) {
|
||||||
|
if (!maybeOutputPath) {
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
missing->insert({output, maybeOutputPath});
|
||||||
|
} else {
|
||||||
|
tp.enqueue([&] {
|
||||||
|
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
missing->insert({output, maybeOutputPath});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tp.process();
|
||||||
|
|
||||||
|
auto missing(missing_.lock());
|
||||||
|
return *missing;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Step::ptr State::createStep(ref<Store> destStore,
|
Step::ptr State::createStep(ref<Store> destStore,
|
||||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||||
@ -463,14 +491,17 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
it's not runnable yet, and other threads won't make it
|
it's not runnable yet, and other threads won't make it
|
||||||
runnable while step->created == false. */
|
runnable while step->created == false. */
|
||||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||||
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
|
{
|
||||||
|
auto parsedDrv = ParsedDerivation{drvPath, *step->drv};
|
||||||
|
step->drvOptions = std::make_unique<DerivationOptions>(DerivationOptions::fromParsedDerivation(parsedDrv));
|
||||||
|
}
|
||||||
|
|
||||||
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
|
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||||
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||||
|
|
||||||
step->systemType = step->drv->platform;
|
step->systemType = step->drv->platform;
|
||||||
{
|
{
|
||||||
StringSet features = step->requiredSystemFeatures = step->parsedDrv->getRequiredSystemFeatures();
|
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||||
if (step->preferLocalBuild)
|
if (step->preferLocalBuild)
|
||||||
features.insert("local");
|
features.insert("local");
|
||||||
if (!features.empty()) {
|
if (!features.empty()) {
|
||||||
@ -485,16 +516,15 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
|
|
||||||
/* Are all outputs valid? */
|
/* Are all outputs valid? */
|
||||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||||
bool valid = true;
|
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||||
std::map<DrvOutput, std::optional<StorePath>> missing;
|
|
||||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||||
auto outputHash = outputHashes.at(outputName);
|
auto outputHash = outputHashes.at(outputName);
|
||||||
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
|
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||||
continue;
|
|
||||||
valid = false;
|
|
||||||
missing.insert({{outputHash, outputName}, maybeOutputPath});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto missing = getMissingRemotePaths(destStore, paths);
|
||||||
|
bool valid = missing.empty();
|
||||||
|
|
||||||
/* Try to copy the missing paths from the local store or from
|
/* Try to copy the missing paths from the local store or from
|
||||||
substitutes. */
|
substitutes. */
|
||||||
if (!missing.empty()) {
|
if (!missing.empty()) {
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
#include <regex>
|
||||||
|
#include <semaphore>
|
||||||
|
|
||||||
#include <prometheus/counter.h>
|
#include <prometheus/counter.h>
|
||||||
#include <prometheus/gauge.h>
|
#include <prometheus/gauge.h>
|
||||||
@ -13,15 +15,18 @@
|
|||||||
|
|
||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
|
|
||||||
#include "parsed-derivations.hh"
|
#include <nix/store/derivations.hh>
|
||||||
#include "pathlocks.hh"
|
#include <nix/store/derivation-options.hh>
|
||||||
#include "pool.hh"
|
#include <nix/store/pathlocks.hh>
|
||||||
#include "build-result.hh"
|
#include <nix/util/pool.hh>
|
||||||
#include "store-api.hh"
|
#include <nix/store/build-result.hh>
|
||||||
#include "sync.hh"
|
#include <nix/store/store-api.hh>
|
||||||
|
#include <nix/util/sync.hh>
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
#include "legacy-ssh-store.hh"
|
#include <nix/store/serve-protocol.hh>
|
||||||
#include "machines.hh"
|
#include <nix/store/serve-protocol-impl.hh>
|
||||||
|
#include <nix/store/serve-protocol-connection.hh>
|
||||||
|
#include <nix/store/machines.hh>
|
||||||
|
|
||||||
|
|
||||||
typedef unsigned int BuildID;
|
typedef unsigned int BuildID;
|
||||||
@ -55,6 +60,7 @@ typedef enum {
|
|||||||
ssConnecting = 10,
|
ssConnecting = 10,
|
||||||
ssSendingInputs = 20,
|
ssSendingInputs = 20,
|
||||||
ssBuilding = 30,
|
ssBuilding = 30,
|
||||||
|
ssWaitingForLocalSlot = 35,
|
||||||
ssReceivingOutputs = 40,
|
ssReceivingOutputs = 40,
|
||||||
ssPostProcessing = 50,
|
ssPostProcessing = 50,
|
||||||
} StepState;
|
} StepState;
|
||||||
@ -165,7 +171,7 @@ struct Step
|
|||||||
|
|
||||||
nix::StorePath drvPath;
|
nix::StorePath drvPath;
|
||||||
std::unique_ptr<nix::Derivation> drv;
|
std::unique_ptr<nix::Derivation> drv;
|
||||||
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
|
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||||
std::set<std::string> requiredSystemFeatures;
|
std::set<std::string> requiredSystemFeatures;
|
||||||
bool preferLocalBuild;
|
bool preferLocalBuild;
|
||||||
bool isDeterministic;
|
bool isDeterministic;
|
||||||
@ -290,11 +296,9 @@ struct Machine : nix::Machine
|
|||||||
bool isLocalhost() const;
|
bool isLocalhost() const;
|
||||||
|
|
||||||
// A connection to a machine
|
// A connection to a machine
|
||||||
struct Connection {
|
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||||
// Backpointer to the machine
|
// Backpointer to the machine
|
||||||
ptr machine;
|
ptr machine;
|
||||||
// Opened store
|
|
||||||
nix::ref<nix::LegacySSHStore> store;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -352,6 +356,10 @@ private:
|
|||||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||||
|
|
||||||
|
/* Throttler for CPU-bound local work. */
|
||||||
|
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||||
|
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||||
|
|
||||||
/* Various stats. */
|
/* Various stats. */
|
||||||
time_t startedAt;
|
time_t startedAt;
|
||||||
counter nrBuildsRead{0};
|
counter nrBuildsRead{0};
|
||||||
@ -361,6 +369,7 @@ private:
|
|||||||
counter nrStepsDone{0};
|
counter nrStepsDone{0};
|
||||||
counter nrStepsBuilding{0};
|
counter nrStepsBuilding{0};
|
||||||
counter nrStepsCopyingTo{0};
|
counter nrStepsCopyingTo{0};
|
||||||
|
counter nrStepsWaitingForDownloadSlot{0};
|
||||||
counter nrStepsCopyingFrom{0};
|
counter nrStepsCopyingFrom{0};
|
||||||
counter nrStepsWaiting{0};
|
counter nrStepsWaiting{0};
|
||||||
counter nrUnsupportedSteps{0};
|
counter nrUnsupportedSteps{0};
|
||||||
@ -391,7 +400,6 @@ private:
|
|||||||
|
|
||||||
struct MachineReservation
|
struct MachineReservation
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<MachineReservation> ptr;
|
|
||||||
State & state;
|
State & state;
|
||||||
Step::ptr step;
|
Step::ptr step;
|
||||||
Machine::ptr machine;
|
Machine::ptr machine;
|
||||||
@ -449,7 +457,12 @@ private:
|
|||||||
prometheus::Counter& queue_steps_created;
|
prometheus::Counter& queue_steps_created;
|
||||||
prometheus::Counter& queue_checks_early_exits;
|
prometheus::Counter& queue_checks_early_exits;
|
||||||
prometheus::Counter& queue_checks_finished;
|
prometheus::Counter& queue_checks_finished;
|
||||||
prometheus::Gauge& queue_max_id;
|
|
||||||
|
prometheus::Counter& dispatcher_time_spent_running;
|
||||||
|
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||||
|
|
||||||
|
prometheus::Counter& queue_monitor_time_spent_running;
|
||||||
|
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||||
|
|
||||||
PromMetrics();
|
PromMetrics();
|
||||||
};
|
};
|
||||||
@ -493,8 +506,7 @@ private:
|
|||||||
void queueMonitorLoop(Connection & conn);
|
void queueMonitorLoop(Connection & conn);
|
||||||
|
|
||||||
/* Check the queue for new builds. */
|
/* Check the queue for new builds. */
|
||||||
bool getQueuedBuilds(Connection & conn,
|
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||||
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
|
|
||||||
|
|
||||||
/* Handle cancellation, deletion and priority bumps. */
|
/* Handle cancellation, deletion and priority bumps. */
|
||||||
void processQueueChange(Connection & conn);
|
void processQueueChange(Connection & conn);
|
||||||
@ -502,6 +514,12 @@ private:
|
|||||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||||
const nix::StorePath & drvPath);
|
const nix::StorePath & drvPath);
|
||||||
|
|
||||||
|
/* Returns paths missing from the remote store. Paths are processed in
|
||||||
|
* parallel to work around the possible latency of remote stores. */
|
||||||
|
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||||
|
nix::ref<nix::Store> destStore,
|
||||||
|
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||||
|
|
||||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::StorePath> & finishedDrvs,
|
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::StorePath> & finishedDrvs,
|
||||||
@ -531,16 +549,17 @@ private:
|
|||||||
|
|
||||||
void abortUnsupported();
|
void abortUnsupported();
|
||||||
|
|
||||||
void builder(MachineReservation::ptr reservation);
|
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||||
|
|
||||||
/* Perform the given build step. Return true if the step is to be
|
/* Perform the given build step. Return true if the step is to be
|
||||||
retried. */
|
retried. */
|
||||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||||
MachineReservation::ptr reservation,
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
std::shared_ptr<ActiveStep> activeStep);
|
std::shared_ptr<ActiveStep> activeStep);
|
||||||
|
|
||||||
void buildRemote(nix::ref<nix::Store> destStore,
|
void buildRemote(nix::ref<nix::Store> destStore,
|
||||||
|
std::unique_ptr<MachineReservation> reservation,
|
||||||
Machine::ptr machine, Step::ptr step,
|
Machine::ptr machine, Step::ptr step,
|
||||||
const nix::ServeProto::BuildOptions & buildOptions,
|
const nix::ServeProto::BuildOptions & buildOptions,
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
|
@ -238,7 +238,7 @@ sub serveFile {
|
|||||||
# XSS hole.
|
# XSS hole.
|
||||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||||
|
|
||||||
# Detect MIME type.
|
# Detect MIME type.
|
||||||
|
@ -364,6 +364,21 @@ sub evals_GET {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||||
|
|
||||||
|
sub errors_GET {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->stash->{template} = 'eval-error.tt';
|
||||||
|
|
||||||
|
my $jobsetName = $c->stash->{params}->{name};
|
||||||
|
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||||
|
{ name => $jobsetName },
|
||||||
|
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||||
|
);
|
||||||
|
|
||||||
|
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||||
|
}
|
||||||
|
|
||||||
# Redirect to the latest finished evaluation of this jobset.
|
# Redirect to the latest finished evaluation of this jobset.
|
||||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||||
|
@ -86,6 +86,17 @@ sub view_GET {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||||
|
|
||||||
|
sub errors_GET {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->stash->{template} = 'eval-error.tt';
|
||||||
|
|
||||||
|
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||||
|
|
||||||
|
$self->status_ok($c, entity => $c->stash->{eval});
|
||||||
|
}
|
||||||
|
|
||||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||||
my ($self, $c) = @_;
|
my ($self, $c) = @_;
|
||||||
|
@ -162,7 +162,7 @@ sub status_GET {
|
|||||||
{ "buildsteps.busy" => { '!=', 0 } },
|
{ "buildsteps.busy" => { '!=', 0 } },
|
||||||
{ order_by => ["globalpriority DESC", "id"],
|
{ order_by => ["globalpriority DESC", "id"],
|
||||||
join => "buildsteps",
|
join => "buildsteps",
|
||||||
columns => [@buildListColumns]
|
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||||
})]
|
})]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,16 @@ sub buildDiff {
|
|||||||
|
|
||||||
my $n = 0;
|
my $n = 0;
|
||||||
foreach my $build (@{$builds}) {
|
foreach my $build (@{$builds}) {
|
||||||
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
|
my $aborted = $build->finished != 0 && (
|
||||||
|
# aborted
|
||||||
|
$build->buildstatus == 3
|
||||||
|
# cancelled
|
||||||
|
|| $build->buildstatus == 4
|
||||||
|
# timeout
|
||||||
|
|| $build->buildstatus == 7
|
||||||
|
# log limit exceeded
|
||||||
|
|| $build->buildstatus == 10
|
||||||
|
);
|
||||||
my $d;
|
my $d;
|
||||||
my $found = 0;
|
my $found = 0;
|
||||||
while ($n < scalar(@{$builds2})) {
|
while ($n < scalar(@{$builds2})) {
|
||||||
|
@ -36,6 +36,7 @@ our @EXPORT = qw(
|
|||||||
jobsetOverview
|
jobsetOverview
|
||||||
jobsetOverview_
|
jobsetOverview_
|
||||||
pathIsInsidePrefix
|
pathIsInsidePrefix
|
||||||
|
readIntoSocket
|
||||||
readNixFile
|
readNixFile
|
||||||
registerRoot
|
registerRoot
|
||||||
restartBuilds
|
restartBuilds
|
||||||
@ -296,8 +297,7 @@ sub getEvals {
|
|||||||
|
|
||||||
my @evals = $evals_result_set->search(
|
my @evals = $evals_result_set->search(
|
||||||
{ hasnewbuilds => 1 },
|
{ hasnewbuilds => 1 },
|
||||||
{ order_by => "$me.id DESC", rows => $rows, offset => $offset
|
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
|
||||||
, prefetch => { evaluationerror => [ ] } });
|
|
||||||
my @res = ();
|
my @res = ();
|
||||||
my $cache = {};
|
my $cache = {};
|
||||||
|
|
||||||
@ -417,6 +417,16 @@ sub pathIsInsidePrefix {
|
|||||||
return $cur;
|
return $cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub readIntoSocket{
|
||||||
|
my (%args) = @_;
|
||||||
|
my $sock;
|
||||||
|
|
||||||
|
eval {
|
||||||
|
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||||
|
};
|
||||||
|
|
||||||
|
return $sock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
84
src/lib/Hydra/Plugin/GiteaPulls.pm
Normal file
84
src/lib/Hydra/Plugin/GiteaPulls.pm
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
# Allow building based on Gitea pull requests.
|
||||||
|
#
|
||||||
|
# Example input:
|
||||||
|
# "pulls": {
|
||||||
|
# "type": "giteapulls",
|
||||||
|
# "value": "example.com alice repo"
|
||||||
|
# "emailresponsible": false
|
||||||
|
# }
|
||||||
|
|
||||||
|
package Hydra::Plugin::GiteaPulls;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use parent 'Hydra::Plugin';
|
||||||
|
use HTTP::Request;
|
||||||
|
use LWP::UserAgent;
|
||||||
|
use JSON::MaybeXS;
|
||||||
|
use Hydra::Helper::CatalystUtils;
|
||||||
|
use File::Temp;
|
||||||
|
use POSIX qw(strftime);
|
||||||
|
|
||||||
|
sub supportedInputTypes {
|
||||||
|
my ($self, $inputTypes) = @_;
|
||||||
|
$inputTypes->{'giteapulls'} = 'Open Gitea Pull Requests';
|
||||||
|
}
|
||||||
|
|
||||||
|
sub _iterate {
|
||||||
|
my ($url, $auth, $pulls, $ua) = @_;
|
||||||
|
|
||||||
|
my $req = HTTP::Request->new('GET', $url);
|
||||||
|
$req->header('Authorization' => 'token ' . $auth) if defined $auth;
|
||||||
|
|
||||||
|
my $res = $ua->request($req);
|
||||||
|
my $content = $res->decoded_content;
|
||||||
|
die "Error pulling from the gitea pulls API: $content\n"
|
||||||
|
unless $res->is_success;
|
||||||
|
|
||||||
|
my $pulls_list = decode_json $content;
|
||||||
|
|
||||||
|
foreach my $pull (@$pulls_list) {
|
||||||
|
$pulls->{$pull->{number}} = $pull;
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO Make Link header parsing more robust!!!
|
||||||
|
my @links = split ',', ($res->header("Link") // "");
|
||||||
|
my $next = "";
|
||||||
|
foreach my $link (@links) {
|
||||||
|
my ($url, $rel) = split ";", $link;
|
||||||
|
if (trim($rel) eq 'rel="next"') {
|
||||||
|
$next = substr trim($url), 1, -1;
|
||||||
|
last;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_iterate($next, $auth, $pulls, $ua) unless $next eq "";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub fetchInput {
|
||||||
|
my ($self, $type, $name, $value, $project, $jobset) = @_;
|
||||||
|
return undef if $type ne "giteapulls";
|
||||||
|
|
||||||
|
my ($baseUrl, $owner, $repo, $proto) = split ' ', $value;
|
||||||
|
if (not defined $proto) { # the protocol handler is exposed as an option in order to do integration testing
|
||||||
|
$proto = "https"
|
||||||
|
}
|
||||||
|
my $auth = $self->{config}->{gitea_authorization}->{$owner};
|
||||||
|
|
||||||
|
my $ua = LWP::UserAgent->new();
|
||||||
|
my %pulls;
|
||||||
|
_iterate("$proto://$baseUrl/api/v1/repos/$owner/$repo/pulls?limit=100", $auth, \%pulls, $ua);
|
||||||
|
|
||||||
|
my $tempdir = File::Temp->newdir("gitea-pulls" . "XXXXX", TMPDIR => 1);
|
||||||
|
my $filename = "$tempdir/gitea-pulls.json";
|
||||||
|
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
|
||||||
|
print $fh encode_json \%pulls;
|
||||||
|
close $fh;
|
||||||
|
|
||||||
|
my $storePath = trim(`nix-store --add "$filename"`
|
||||||
|
or die "cannot copy path $filename to the Nix store.\n");
|
||||||
|
chomp $storePath;
|
||||||
|
my $timestamp = time;
|
||||||
|
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
|
||||||
|
}
|
||||||
|
|
||||||
|
1;
|
129
src/lib/Hydra/Plugin/GiteaRefs.pm
Normal file
129
src/lib/Hydra/Plugin/GiteaRefs.pm
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
package Hydra::Plugin::GiteaRefs;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use parent 'Hydra::Plugin';
|
||||||
|
use HTTP::Request;
|
||||||
|
use LWP::UserAgent;
|
||||||
|
use JSON::MaybeXS;
|
||||||
|
use Hydra::Helper::CatalystUtils;
|
||||||
|
use File::Temp;
|
||||||
|
use POSIX qw(strftime);
|
||||||
|
|
||||||
|
=head1 NAME
|
||||||
|
|
||||||
|
GiteaRefs - Hydra plugin for retrieving the list of references (branches or
|
||||||
|
tags) from Gitea following a certain naming scheme
|
||||||
|
|
||||||
|
=head1 DESCRIPTION
|
||||||
|
|
||||||
|
This plugin reads the list of branches or tags using Gitea's REST API. The name
|
||||||
|
of the reference must follow a particular prefix. This list is stored in the
|
||||||
|
nix-store and used as an input to declarative jobsets.
|
||||||
|
|
||||||
|
=head1 CONFIGURATION
|
||||||
|
|
||||||
|
The plugin doesn't require any dedicated configuration block, but it has to
|
||||||
|
consult C<gitea_authorization> entry for obtaining the API token. In addition,
|
||||||
|
|
||||||
|
The declarative project C<spec.json> file must contains an input such as
|
||||||
|
|
||||||
|
"pulls": {
|
||||||
|
"type": "gitea_refs",
|
||||||
|
"value": "[gitea_hostname] [owner] [repo] heads|tags [scheme] - [prefix]",
|
||||||
|
"emailresponsible": false
|
||||||
|
}
|
||||||
|
|
||||||
|
In the above snippet, C<[gitea_hostname]> must be set to the hostname of the
|
||||||
|
repository's Gitea instance.
|
||||||
|
|
||||||
|
C<[owner]> is the repository owner and C<[repo]> is the repository name. Also
|
||||||
|
note a literal C<->, which is placed there for the future use.
|
||||||
|
|
||||||
|
C<heads|tags> denotes that one of these two is allowed, that is, the third
|
||||||
|
position should hold either the C<heads> or the C<tags> keyword. In case of the former, the plugin
|
||||||
|
will fetch all branches, while in case of the latter, it will fetch the tags.
|
||||||
|
|
||||||
|
C<scheme> should be set to either https or http, depending on what the Gitea
|
||||||
|
host supports.
|
||||||
|
|
||||||
|
C<prefix> denotes the prefix the reference name must start with, in order to be
|
||||||
|
included.
|
||||||
|
|
||||||
|
For example, C<"value": "projects.blender.org blender blender heads https - blender-v/"> refers to
|
||||||
|
L<https://projects.blender.org/blender/blender> repository, and will fetch all branches that
|
||||||
|
begin with C<blender-v/>.
|
||||||
|
|
||||||
|
=head1 USE
|
||||||
|
|
||||||
|
The result is stored in the nix-store as a JSON I<map>, where the key is the
|
||||||
|
name of the reference, while the value is the complete Gitea response. Thus,
|
||||||
|
any of the values listed in
|
||||||
|
L<https://docs.gitea.com/api#tag/repository/operation/repoListAllGitRefs> can be
|
||||||
|
used to build the git input value in C<jobsets.nix>.
|
||||||
|
|
||||||
|
=cut
|
||||||
|
|
||||||
|
sub supportedInputTypes {
|
||||||
|
my ($self, $inputTypes) = @_;
|
||||||
|
$inputTypes->{'gitea_refs'} = 'Open Gitea Refs';
|
||||||
|
}
|
||||||
|
|
||||||
|
sub _iterate {
|
||||||
|
my ($url, $auth, $refs, $ua) = @_;
|
||||||
|
my $req = HTTP::Request->new('GET', $url);
|
||||||
|
$req->header('Accept' => 'application/json');
|
||||||
|
$req->header('Authorization' => $auth) if defined $auth;
|
||||||
|
my $res = $ua->request($req);
|
||||||
|
my $content = $res->decoded_content;
|
||||||
|
die "Error pulling from the gitea refs API: $content\n"
|
||||||
|
unless $res->is_success;
|
||||||
|
my $refs_list = decode_json $content;
|
||||||
|
# TODO Stream out the json instead
|
||||||
|
foreach my $ref (@$refs_list) {
|
||||||
|
my $ref_name = $ref->{ref};
|
||||||
|
$ref_name =~ s,^refs/(?:heads|tags)/,,o;
|
||||||
|
$refs->{$ref_name} = $ref;
|
||||||
|
}
|
||||||
|
# TODO Make Link header parsing more robust!!!
|
||||||
|
my @links = split ',', $res->header("Link");
|
||||||
|
my $next = "";
|
||||||
|
foreach my $link (@links) {
|
||||||
|
my ($url, $rel) = split ";", $link;
|
||||||
|
if (trim($rel) eq 'rel="next"') {
|
||||||
|
$next = substr trim($url), 1, -1;
|
||||||
|
last;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_iterate($next, $auth, $refs, $ua) unless $next eq "";
|
||||||
|
}
|
||||||
|
|
||||||
|
sub fetchInput {
|
||||||
|
my ($self, $input_type, $name, $value, $project, $jobset) = @_;
|
||||||
|
return undef if $input_type ne "gitea_refs";
|
||||||
|
|
||||||
|
my ($giteaHostname, $owner, $repo, $type, $scheme, $fut, $prefix) = split ' ', $value;
|
||||||
|
die "type field is neither 'heads' nor 'tags', but '$type'"
|
||||||
|
unless $type eq 'heads' or $type eq 'tags';
|
||||||
|
die "scheme field is neither 'https' nor 'http' but '$scheme'"
|
||||||
|
unless $scheme eq 'https' or $scheme eq 'http';
|
||||||
|
|
||||||
|
my $auth = $self->{config}->{gitea_authorization}->{$owner};
|
||||||
|
my $giteaEndpoint = "$scheme://$giteaHostname";
|
||||||
|
my %refs;
|
||||||
|
my $ua = LWP::UserAgent->new();
|
||||||
|
_iterate("$giteaEndpoint/api/v1/repos/$owner/$repo/git/refs/$type/$prefix?per_page=100", $auth, \%refs, $ua);
|
||||||
|
my $tempdir = File::Temp->newdir("gitea-refs" . "XXXXX", TMPDIR => 1);
|
||||||
|
my $filename = "$tempdir/gitea-refs.json";
|
||||||
|
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
|
||||||
|
print $fh encode_json \%refs;
|
||||||
|
close $fh;
|
||||||
|
system("jq -S . < $filename > $tempdir/gitea-refs-sorted.json");
|
||||||
|
my $storePath = trim(qx{nix-store --add "$tempdir/gitea-refs-sorted.json"}
|
||||||
|
or die "cannot copy path $filename to the Nix store.\n");
|
||||||
|
chomp $storePath;
|
||||||
|
my $timestamp = time;
|
||||||
|
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
|
||||||
|
}
|
||||||
|
|
||||||
|
1;
|
@ -105,4 +105,6 @@ __PACKAGE__->add_column(
|
|||||||
"+id" => { retrieve_on_insert => 1 }
|
"+id" => { retrieve_on_insert => 1 }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
@ -386,6 +386,8 @@ __PACKAGE__->add_column(
|
|||||||
"+id" => { retrieve_on_insert => 1 }
|
"+id" => { retrieve_on_insert => 1 }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
__PACKAGE__->mk_group_accessors('column' => 'has_error');
|
||||||
|
|
||||||
sub supportsDynamicRunCommand {
|
sub supportsDynamicRunCommand {
|
||||||
my ($self) = @_;
|
my ($self) = @_;
|
||||||
|
|
||||||
|
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/EvaluationErrors.pm
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package Hydra::Schema::ResultSet::EvaluationErrors;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use utf8;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use parent 'DBIx::Class::ResultSet';
|
||||||
|
|
||||||
|
use Storable qw(dclone);
|
||||||
|
|
||||||
|
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||||
|
|
||||||
|
# Exclude expensive error message values unless explicitly requested, and
|
||||||
|
# replace them with a summary field describing their presence/absence.
|
||||||
|
sub search_rs {
|
||||||
|
my ( $class, $query, $attrs ) = @_;
|
||||||
|
|
||||||
|
if ($attrs) {
|
||||||
|
$attrs = dclone($attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||||
|
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||||
|
}
|
||||||
|
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||||
|
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||||
|
}
|
||||||
|
|
||||||
|
return $class->next::method($query, $attrs);
|
||||||
|
}
|
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
30
src/lib/Hydra/Schema/ResultSet/Jobsets.pm
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package Hydra::Schema::ResultSet::Jobsets;
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
use utf8;
|
||||||
|
use warnings;
|
||||||
|
|
||||||
|
use parent 'DBIx::Class::ResultSet';
|
||||||
|
|
||||||
|
use Storable qw(dclone);
|
||||||
|
|
||||||
|
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
|
||||||
|
|
||||||
|
# Exclude expensive error message values unless explicitly requested, and
|
||||||
|
# replace them with a summary field describing their presence/absence.
|
||||||
|
sub search_rs {
|
||||||
|
my ( $class, $query, $attrs ) = @_;
|
||||||
|
|
||||||
|
if ($attrs) {
|
||||||
|
$attrs = dclone($attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
|
||||||
|
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
|
||||||
|
}
|
||||||
|
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
|
||||||
|
push @{ $attrs->{'remove_columns'} }, 'errormsg';
|
||||||
|
}
|
||||||
|
|
||||||
|
return $class->next::method($query, $attrs);
|
||||||
|
}
|
@ -6,6 +6,7 @@ use base 'Catalyst::View::TT';
|
|||||||
use Template::Plugin::HTML;
|
use Template::Plugin::HTML;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Time::Seconds;
|
use Time::Seconds;
|
||||||
|
use Digest::SHA qw(sha1_hex);
|
||||||
|
|
||||||
__PACKAGE__->config(
|
__PACKAGE__->config(
|
||||||
TEMPLATE_EXTENSION => '.tt',
|
TEMPLATE_EXTENSION => '.tt',
|
||||||
@ -25,8 +26,14 @@ __PACKAGE__->config(
|
|||||||
makeNameTextForJobset
|
makeNameTextForJobset
|
||||||
relativeDuration
|
relativeDuration
|
||||||
stripSSHUser
|
stripSSHUser
|
||||||
|
metricDivId
|
||||||
/]);
|
/]);
|
||||||
|
|
||||||
|
sub metricDivId {
|
||||||
|
my ($self, $c, $text) = @_;
|
||||||
|
return "metric-" . sha1_hex($text);
|
||||||
|
}
|
||||||
|
|
||||||
sub buildLogExists {
|
sub buildLogExists {
|
||||||
my ($self, $c, $build) = @_;
|
my ($self, $c, $build) = @_;
|
||||||
return 1 if defined $c->config->{log_prefix};
|
return 1 if defined $c->config->{log_prefix};
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
#include <pqxx/pqxx>
|
#include <pqxx/pqxx>
|
||||||
|
|
||||||
#include "environment-variables.hh"
|
#include <nix/util/environment-variables.hh>
|
||||||
#include "util.hh"
|
#include <nix/util/util.hh>
|
||||||
|
|
||||||
|
|
||||||
struct Connection : pqxx::connection
|
struct Connection : pqxx::connection
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
#include "file-system.hh"
|
#include <nix/util/file-system.hh>
|
||||||
#include "util.hh"
|
#include <nix/util/util.hh>
|
||||||
|
|
||||||
struct HydraConfig
|
struct HydraConfig
|
||||||
{
|
{
|
||||||
|
@ -61,21 +61,7 @@ END;
|
|||||||
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
|
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
|
||||||
<td class="step-status">
|
<td class="step-status">
|
||||||
[% IF step.busy != 0 %]
|
[% IF step.busy != 0 %]
|
||||||
[% IF step.busy == 1 %]
|
[% INCLUDE renderBusyStatus %]
|
||||||
<strong>Preparing</strong>
|
|
||||||
[% ELSIF step.busy == 10 %]
|
|
||||||
<strong>Connecting</strong>
|
|
||||||
[% ELSIF step.busy == 20 %]
|
|
||||||
<strong>Sending inputs</strong>
|
|
||||||
[% ELSIF step.busy == 30 %]
|
|
||||||
<strong>Building</strong>
|
|
||||||
[% ELSIF step.busy == 40 %]
|
|
||||||
<strong>Receiving outputs</strong>
|
|
||||||
[% ELSIF step.busy == 50 %]
|
|
||||||
<strong>Post-processing</strong>
|
|
||||||
[% ELSE %]
|
|
||||||
<strong>Unknown state</strong>
|
|
||||||
[% END %]
|
|
||||||
[% ELSIF step.status == 0 %]
|
[% ELSIF step.status == 0 %]
|
||||||
[% IF step.isnondeterministic %]
|
[% IF step.isnondeterministic %]
|
||||||
<span class="warn">Succeeded with non-determistic result</span>
|
<span class="warn">Succeeded with non-determistic result</span>
|
||||||
|
@ -91,6 +91,17 @@ BLOCK renderDuration;
|
|||||||
duration % 60 %]s[%
|
duration % 60 %]s[%
|
||||||
END;
|
END;
|
||||||
|
|
||||||
|
BLOCK renderDrvInfo;
|
||||||
|
drvname = step.drvpath
|
||||||
|
.substr(11) # strip `/nix/store/`
|
||||||
|
.split('-').slice(1).join("-") # strip hash part
|
||||||
|
.substr(0, -4); # strip `.drv`
|
||||||
|
IF drvname != releasename;
|
||||||
|
IF step.type == 0; action = "Build"; ELSE; action = "Substitution"; END;
|
||||||
|
IF drvname; %]<em> ([% action %] of [% drvname %])</em>[% END;
|
||||||
|
END;
|
||||||
|
END;
|
||||||
|
|
||||||
|
|
||||||
BLOCK renderBuildListHeader %]
|
BLOCK renderBuildListHeader %]
|
||||||
<table class="table table-striped table-condensed clickable-rows">
|
<table class="table table-striped table-condensed clickable-rows">
|
||||||
@ -131,7 +142,12 @@ BLOCK renderBuildListBody;
|
|||||||
[% END %]
|
[% END %]
|
||||||
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
|
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
|
||||||
[% IF !hideJobName %]
|
[% IF !hideJobName %]
|
||||||
<td><a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</td>
|
<td>
|
||||||
|
<a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</a>
|
||||||
|
[% IF showStepName %]
|
||||||
|
[% INCLUDE renderDrvInfo step=build.buildsteps releasename=build.nixname %]
|
||||||
|
[% END %]
|
||||||
|
</td>
|
||||||
[% END %]
|
[% END %]
|
||||||
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
|
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
|
||||||
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
|
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
|
||||||
@ -245,6 +261,27 @@ BLOCK renderBuildStatusIcon;
|
|||||||
END;
|
END;
|
||||||
|
|
||||||
|
|
||||||
|
BLOCK renderBusyStatus;
|
||||||
|
IF step.busy == 1 %]
|
||||||
|
<strong>Preparing</strong>
|
||||||
|
[% ELSIF step.busy == 10 %]
|
||||||
|
<strong>Connecting</strong>
|
||||||
|
[% ELSIF step.busy == 20 %]
|
||||||
|
<strong>Sending inputs</strong>
|
||||||
|
[% ELSIF step.busy == 30 %]
|
||||||
|
<strong>Building</strong>
|
||||||
|
[% ELSIF step.busy == 35 %]
|
||||||
|
<strong>Waiting to receive outputs</strong>
|
||||||
|
[% ELSIF step.busy == 40 %]
|
||||||
|
<strong>Receiving outputs</strong>
|
||||||
|
[% ELSIF step.busy == 50 %]
|
||||||
|
<strong>Post-processing</strong>
|
||||||
|
[% ELSE %]
|
||||||
|
<strong>Unknown state</strong>
|
||||||
|
[% END;
|
||||||
|
END;
|
||||||
|
|
||||||
|
|
||||||
BLOCK renderStatus;
|
BLOCK renderStatus;
|
||||||
IF build.finished;
|
IF build.finished;
|
||||||
buildstatus = build.buildstatus;
|
buildstatus = build.buildstatus;
|
||||||
@ -476,7 +513,7 @@ BLOCK renderEvals %]
|
|||||||
ELSE %]
|
ELSE %]
|
||||||
-
|
-
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF eval.evaluationerror.errormsg %]
|
[% IF eval.evaluationerror.has_error %]
|
||||||
<span class="badge badge-warning">Eval Errors</span>
|
<span class="badge badge-warning">Eval Errors</span>
|
||||||
[% END %]
|
[% END %]
|
||||||
</td>
|
</td>
|
||||||
@ -602,7 +639,7 @@ BLOCK renderJobsetOverview %]
|
|||||||
<td>[% HTML.escape(j.description) %]</td>
|
<td>[% HTML.escape(j.description) %]</td>
|
||||||
<td>[% IF j.lastcheckedtime;
|
<td>[% IF j.lastcheckedtime;
|
||||||
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
|
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
|
||||||
IF j.errormsg || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
IF j.has_error || j.fetcherrormsg; %] <span class = 'badge badge-warning'>Error</span>[% END;
|
||||||
ELSE; "-";
|
ELSE; "-";
|
||||||
END %]</td>
|
END %]</td>
|
||||||
[% IF j.get_column('nrtotal') > 0 %]
|
[% IF j.get_column('nrtotal') > 0 %]
|
||||||
|
26
src/root/eval-error.tt
Normal file
26
src/root/eval-error.tt
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
[% PROCESS common.tt %]
|
||||||
|
<!DOCTYPE html>
|
||||||
|
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
|
||||||
|
[% INCLUDE style.tt %]
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
|
||||||
|
<div class="tab-content tab-pane">
|
||||||
|
<div id="tabs-errors" class="">
|
||||||
|
[% IF jobset %]
|
||||||
|
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
|
||||||
|
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
|
||||||
|
[% ELSIF eval %]
|
||||||
|
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
||||||
|
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
||||||
|
[% END %]
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -18,8 +18,7 @@
|
|||||||
|
|
||||||
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
|
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
|
||||||
|
|
||||||
[% id = "metric-" _ metric.name;
|
[% id = metricDivId(metric.name);
|
||||||
id = id.replace('\.', '_');
|
|
||||||
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
|
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
|
||||||
|
|
||||||
[% END %]
|
[% END %]
|
||||||
|
@ -65,7 +65,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
|||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
[% IF aborted.size > 0 %]
|
[% IF aborted.size > 0 %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted Jobs ([% aborted.size %])</span></a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted / Timed out Jobs ([% aborted.size %])</span></a></li>
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF nowFail.size > 0 %]
|
[% IF nowFail.size > 0 %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-now-fail" data-toggle="tab"><span class="text-warning">Newly Failing Jobs ([% nowFail.size %])</span></a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-now-fail" data-toggle="tab"><span class="text-warning">Newly Failing Jobs ([% nowFail.size %])</span></a></li>
|
||||||
@ -90,7 +90,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
|||||||
[% END %]
|
[% END %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-inputs" data-toggle="tab">Inputs</a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-inputs" data-toggle="tab">Inputs</a></li>
|
||||||
|
|
||||||
[% IF eval.evaluationerror.errormsg %]
|
[% IF eval.evaluationerror.has_error %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
||||||
[% END %]
|
[% END %]
|
||||||
</ul>
|
</ul>
|
||||||
@ -108,13 +108,6 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
|||||||
|
|
||||||
<div class="tab-content">
|
<div class="tab-content">
|
||||||
|
|
||||||
[% IF eval.evaluationerror.errormsg %]
|
|
||||||
<div id="tabs-errors" class="tab-pane">
|
|
||||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
|
||||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
|
||||||
</div>
|
|
||||||
[% END %]
|
|
||||||
|
|
||||||
<div id="tabs-aborted" class="tab-pane">
|
<div id="tabs-aborted" class="tab-pane">
|
||||||
[% INCLUDE renderSome builds=aborted tabname="#tabs-aborted" %]
|
[% INCLUDE renderSome builds=aborted tabname="#tabs-aborted" %]
|
||||||
</div>
|
</div>
|
||||||
@ -172,10 +165,9 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
|||||||
[% END %]
|
[% END %]
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
[% IF eval.evaluationerror.errormsg %]
|
[% IF eval.evaluationerror.has_error %]
|
||||||
<div id="tabs-errors" class="tab-pane">
|
<div id="tabs-errors" class="tab-pane">
|
||||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
|
<iframe src="[% c.uri_for(c.controller('JobsetEval').action_for('errors'), [eval.id], params) %]" loading="lazy" frameBorder="0" width="100%"></iframe>
|
||||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
|
|
||||||
</div>
|
</div>
|
||||||
[% END %]
|
[% END %]
|
||||||
</div>
|
</div>
|
||||||
|
@ -61,7 +61,7 @@
|
|||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
<li class="nav-item"><a class="nav-link active" href="#tabs-evaluations" data-toggle="tab">Evaluations</a></li>
|
<li class="nav-item"><a class="nav-link active" href="#tabs-evaluations" data-toggle="tab">Evaluations</a></li>
|
||||||
[% IF jobset.errormsg || jobset.fetcherrormsg %]
|
[% IF jobset.has_error || jobset.fetcherrormsg %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
|
||||||
[% END %]
|
[% END %]
|
||||||
<li class="nav-item"><a class="nav-link" href="#tabs-jobs" data-toggle="tab">Jobs</a></li>
|
<li class="nav-item"><a class="nav-link" href="#tabs-jobs" data-toggle="tab">Jobs</a></li>
|
||||||
@ -79,7 +79,7 @@
|
|||||||
<th>Last checked:</th>
|
<th>Last checked:</th>
|
||||||
<td>
|
<td>
|
||||||
[% IF jobset.lastcheckedtime %]
|
[% IF jobset.lastcheckedtime %]
|
||||||
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.errormsg || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
|
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.has_error || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
<em>never</em>
|
<em>never</em>
|
||||||
[% END %]
|
[% END %]
|
||||||
@ -117,10 +117,9 @@
|
|||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
[% IF jobset.errormsg || jobset.fetcherrormsg %]
|
[% IF jobset.has_error || jobset.fetcherrormsg %]
|
||||||
<div id="tabs-errors" class="tab-pane">
|
<div id="tabs-errors" class="tab-pane">
|
||||||
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
|
<iframe src="[% c.uri_for('/jobset' project.name jobset.name "errors") %]" loading="lazy" frameBorder="0" width="100%"></iframe>
|
||||||
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
|
|
||||||
</div>
|
</div>
|
||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
|
@ -10,31 +10,7 @@
|
|||||||
|
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
|
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
|
||||||
|
[% INCLUDE style.tt %]
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-3.4.1.min.js") %]"></script>
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-ui-1.10.4.min.js") %]"></script>
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/moment/moment-2.24.0.min.js") %]"></script>
|
|
||||||
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
||||||
|
|
||||||
<link href="[% c.uri_for("/static/fontawesome/css/all.css") %]" rel="stylesheet" />
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/popper.min.js") %]"></script>
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/bootstrap/js/bootstrap.min.js") %]"></script>
|
|
||||||
<link href="[% c.uri_for("/static/bootstrap/css/bootstrap.min.css") %]" rel="stylesheet" />
|
|
||||||
|
|
||||||
<!-- hydra.css may need to be moved to before boostrap to make the @media rule work. -->
|
|
||||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/hydra.css") %]" type="text/css" />
|
|
||||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/rotated-th.css") %]" type="text/css" />
|
|
||||||
|
|
||||||
<style>
|
|
||||||
.popover { max-width: 40%; }
|
|
||||||
</style>
|
|
||||||
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/bootbox.min.js") %]"></script>
|
|
||||||
|
|
||||||
<link rel="stylesheet" href="[% c.uri_for("/static/css/tree.css") %]" type="text/css" />
|
|
||||||
|
|
||||||
<script type="text/javascript" src="[% c.uri_for("/static/js/common.js") %]"></script>
|
|
||||||
|
|
||||||
[% IF c.config.enable_google_login %]
|
[% IF c.config.enable_google_login %]
|
||||||
<meta name="google-signin-client_id" content="[% c.config.google_client_id %]">
|
<meta name="google-signin-client_id" content="[% c.config.google_client_id %]">
|
||||||
|
@ -6,10 +6,10 @@
|
|||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Job</th>
|
<th>Job</th>
|
||||||
<th>System</th>
|
|
||||||
<th>Build</th>
|
<th>Build</th>
|
||||||
<th>Step</th>
|
<th>Step</th>
|
||||||
<th>What</th>
|
<th>What</th>
|
||||||
|
<th>Status</th>
|
||||||
<th>Since</th>
|
<th>Since</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
@ -40,10 +40,10 @@
|
|||||||
[% idle = 0 %]
|
[% idle = 0 %]
|
||||||
<tr>
|
<tr>
|
||||||
<td><tt>[% INCLUDE renderFullJobName project=step.project jobset=step.jobset job=step.job %]</tt></td>
|
<td><tt>[% INCLUDE renderFullJobName project=step.project jobset=step.jobset job=step.job %]</tt></td>
|
||||||
<td><tt>[% step.system %]</tt></td>
|
|
||||||
<td><a href="[% c.uri_for('/build' step.build) %]">[% step.build %]</a></td>
|
<td><a href="[% c.uri_for('/build' step.build) %]">[% step.build %]</a></td>
|
||||||
<td>[% IF step.busy >= 30 %]<a class="row-link" href="[% c.uri_for('/build' step.build 'nixlog' step.stepnr 'tail') %]">[% step.stepnr %]</a>[% ELSE; step.stepnr; END %]</td>
|
<td>[% IF step.busy >= 30 %]<a class="row-link" href="[% c.uri_for('/build' step.build 'nixlog' step.stepnr 'tail') %]">[% step.stepnr %]</a>[% ELSE; step.stepnr; END %]</td>
|
||||||
<td><tt>[% step.drvpath.match('-(.*)').0 %]</tt></td>
|
<td><tt>[% step.drvpath.match('-(.*)').0 %]</tt></td>
|
||||||
|
<td>[% INCLUDE renderBusyStatus %]</td>
|
||||||
<td style="width: 10em">[% INCLUDE renderDuration duration = curTime - step.starttime %] </td>
|
<td style="width: 10em">[% INCLUDE renderDuration duration = curTime - step.starttime %] </td>
|
||||||
</tr>
|
</tr>
|
||||||
[% END %]
|
[% END %]
|
||||||
|
@ -129,6 +129,12 @@ $(document).ready(function() {
|
|||||||
el.addClass("is-local");
|
el.addClass("is-local");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
[...document.getElementsByTagName("iframe")].forEach((element) => {
|
||||||
|
element.contentWindow.addEventListener("DOMContentLoaded", (_) => {
|
||||||
|
element.style.height = element.contentWindow.document.body.scrollHeight + 'px';
|
||||||
|
})
|
||||||
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
var tabsLoaded = {};
|
var tabsLoaded = {};
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
|
|
||||||
[% INCLUDE renderBuildList builds=resource showSchedulingInfo=1 hideResultInfo=1 busy=1 %]
|
[% INCLUDE renderBuildList builds=resource showSchedulingInfo=1 hideResultInfo=1 busy=1 showStepName=1 %]
|
||||||
|
|
||||||
[% END %]
|
[% END %]
|
||||||
|
|
||||||
|
24
src/root/style.tt
Normal file
24
src/root/style.tt
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-3.4.1.min.js") %]"></script>
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-ui-1.10.4.min.js") %]"></script>
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/moment/moment-2.24.0.min.js") %]"></script>
|
||||||
|
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
|
|
||||||
|
<link href="[% c.uri_for("/static/fontawesome/css/all.css") %]" rel="stylesheet" />
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/popper.min.js") %]"></script>
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/bootstrap/js/bootstrap.min.js") %]"></script>
|
||||||
|
<link href="[% c.uri_for("/static/bootstrap/css/bootstrap.min.css") %]" rel="stylesheet" />
|
||||||
|
|
||||||
|
<!-- hydra.css may need to be moved to before boostrap to make the @media rule work. -->
|
||||||
|
<link rel="stylesheet" href="[% c.uri_for("/static/css/hydra.css") %]" type="text/css" />
|
||||||
|
<link rel="stylesheet" href="[% c.uri_for("/static/css/rotated-th.css") %]" type="text/css" />
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.popover { max-width: 40%; }
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/bootbox.min.js") %]"></script>
|
||||||
|
|
||||||
|
<link rel="stylesheet" href="[% c.uri_for("/static/css/tree.css") %]" type="text/css" />
|
||||||
|
|
||||||
|
<script type="text/javascript" src="[% c.uri_for("/static/js/common.js") %]"></script>
|
@ -773,6 +773,9 @@ sub checkJobsetWrapped {
|
|||||||
my $jobsetChanged = 0;
|
my $jobsetChanged = 0;
|
||||||
my %buildMap;
|
my %buildMap;
|
||||||
|
|
||||||
|
my @jobs;
|
||||||
|
push @jobs, $_ while defined($_ = $jobsIter->());
|
||||||
|
|
||||||
$db->txn_do(sub {
|
$db->txn_do(sub {
|
||||||
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
||||||
|
|
||||||
@ -796,7 +799,7 @@ sub checkJobsetWrapped {
|
|||||||
|
|
||||||
my @jobsWithConstituents;
|
my @jobsWithConstituents;
|
||||||
|
|
||||||
while (defined(my $job = $jobsIter->())) {
|
foreach my $job (@jobs) {
|
||||||
if ($jobsetsJobset) {
|
if ($jobsetsJobset) {
|
||||||
die "The .jobsets jobset must only have a single job named 'jobsets'"
|
die "The .jobsets jobset must only have a single job named 'jobsets'"
|
||||||
unless $job->{attr} eq "jobsets";
|
unless $job->{attr} eq "jobsets";
|
||||||
|
@ -32,4 +32,9 @@ subtest "/jobset/PROJECT/JOBSET/evals" => sub {
|
|||||||
ok($jobsetevals->is_success, "The page showing the jobset evals returns 200.");
|
ok($jobsetevals->is_success, "The page showing the jobset evals returns 200.");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
subtest "/jobset/PROJECT/JOBSET/errors" => sub {
|
||||||
|
my $jobsetevals = request(GET '/jobset/' . $project->name . '/' . $jobset->name . '/errors');
|
||||||
|
ok($jobsetevals->is_success, "The page showing the jobset eval errors returns 200.");
|
||||||
|
};
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
@ -35,6 +35,10 @@ subtest "Fetching the eval's overview" => sub {
|
|||||||
is($fetch->code, 200, "channel page is 200");
|
is($fetch->code, 200, "channel page is 200");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
subtest "Fetching the eval's overview" => sub {
|
||||||
|
my $fetch = request(GET '/eval/' . $eval->id, '/errors');
|
||||||
|
is($fetch->code, 200, "errors page is 200");
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
@ -6,27 +6,55 @@ use Hydra::Helper::Exec;
|
|||||||
|
|
||||||
my $ctx = test_context();
|
my $ctx = test_context();
|
||||||
|
|
||||||
my $jobsetCtx = $ctx->makeJobset(
|
subtest "broken constituents expression" => sub {
|
||||||
expression => 'constituents-broken.nix',
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
);
|
expression => 'constituents-broken.nix',
|
||||||
my $jobset = $jobsetCtx->{"jobset"};
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
);
|
);
|
||||||
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||||
like(
|
like(
|
||||||
$stderr,
|
$stderr,
|
||||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
qr/aggregate job 'mixed_aggregate' references non-existent job 'constituentA'/,
|
||||||
"The stderr record includes a relevant error message"
|
"The stderr record includes a relevant error message"
|
||||||
);
|
);
|
||||||
|
|
||||||
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
like(
|
like(
|
||||||
$jobset->errormsg,
|
$jobset->errormsg,
|
||||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/,
|
||||||
"The jobset records a relevant error message"
|
"The jobset records a relevant error message"
|
||||||
);
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
subtest "no matches" => sub {
|
||||||
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
|
expression => 'constituents-no-matches.nix',
|
||||||
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
|
);
|
||||||
|
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||||
|
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||||
|
like(
|
||||||
|
$stderr,
|
||||||
|
qr/aggregate job 'non_match_aggregate' references constituent glob pattern 'tests\.\*' with no matches/,
|
||||||
|
"The stderr record includes a relevant error message"
|
||||||
|
);
|
||||||
|
|
||||||
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
|
like(
|
||||||
|
$jobset->errormsg,
|
||||||
|
qr/aggregate job ‘non_match_aggregate’ failed with the error: tests\.\*: constituent glob pattern had no matches/,
|
||||||
|
qr/in job ‘non_match_aggregate’:\ntests\.\*: constituent glob pattern had no matches/,
|
||||||
|
"The jobset records a relevant error message"
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
138
t/evaluator/evaluate-constituents-globbing.t
Normal file
138
t/evaluator/evaluate-constituents-globbing.t
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use Setup;
|
||||||
|
use Test2::V0;
|
||||||
|
use Hydra::Helper::Exec;
|
||||||
|
use Data::Dumper;
|
||||||
|
|
||||||
|
my $ctx = test_context();
|
||||||
|
|
||||||
|
subtest "general glob testing" => sub {
|
||||||
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
|
expression => 'constituents-glob.nix',
|
||||||
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
|
);
|
||||||
|
is($res, 0, "hydra-eval-jobset exits zero");
|
||||||
|
|
||||||
|
my $builds = {};
|
||||||
|
for my $build ($jobset->builds) {
|
||||||
|
$builds->{$build->job} = $build;
|
||||||
|
}
|
||||||
|
|
||||||
|
subtest "basic globbing works" => sub {
|
||||||
|
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
|
||||||
|
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
|
||||||
|
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
|
||||||
|
|
||||||
|
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
|
||||||
|
|
||||||
|
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
|
||||||
|
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
|
||||||
|
};
|
||||||
|
|
||||||
|
subtest "transitivity is OK" => sub {
|
||||||
|
ok(defined $builds->{"indirect_aggregate"}, "'indirect_aggregate' is part of the jobset evaluation");
|
||||||
|
my @constituents = $builds->{"indirect_aggregate"}->constituents->all;
|
||||||
|
is(1, scalar @constituents, "'indirect_aggregate' has one constituent");
|
||||||
|
is($constituents[0]->nixname, "direct_aggregate", "'indirect_aggregate' has 'direct_aggregate' as single constituent");
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
subtest "* selects all except current aggregate" => sub {
|
||||||
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
|
expression => 'constituents-glob-all.nix',
|
||||||
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
|
);
|
||||||
|
|
||||||
|
subtest "no eval errors" => sub {
|
||||||
|
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||||
|
ok(
|
||||||
|
$stderr !~ "aggregate job ‘ok_aggregate’ has a constituent .* that doesn't correspond to a Hydra build",
|
||||||
|
"Catchall wildcard must not select itself as constituent"
|
||||||
|
);
|
||||||
|
|
||||||
|
$jobset->discard_changes; # refresh from DB
|
||||||
|
is(
|
||||||
|
$jobset->has_error,
|
||||||
|
0,
|
||||||
|
"eval-errors non-empty"
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
my $builds = {};
|
||||||
|
for my $build ($jobset->builds) {
|
||||||
|
$builds->{$build->job} = $build;
|
||||||
|
}
|
||||||
|
|
||||||
|
subtest "two constituents" => sub {
|
||||||
|
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
|
||||||
|
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
|
||||||
|
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
|
||||||
|
|
||||||
|
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
|
||||||
|
|
||||||
|
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
|
||||||
|
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
subtest "trivial cycle check" => sub {
|
||||||
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
|
expression => 'constituents-cycle.nix',
|
||||||
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(
|
||||||
|
$stderr =~ "Found dependency cycle between jobs 'indirect_aggregate' and 'ok_aggregate'",
|
||||||
|
"Dependency cycle error is on stderr"
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||||
|
|
||||||
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
|
like(
|
||||||
|
$jobset->errormsg,
|
||||||
|
qr/Dependency cycle: indirect_aggregate <-> ok_aggregate/,
|
||||||
|
"eval-errors non-empty"
|
||||||
|
);
|
||||||
|
|
||||||
|
is(0, $jobset->builds->count, "No builds should be scheduled");
|
||||||
|
};
|
||||||
|
|
||||||
|
subtest "cycle check with globbing" => sub {
|
||||||
|
my $jobsetCtx = $ctx->makeJobset(
|
||||||
|
expression => 'constituents-cycle-glob.nix',
|
||||||
|
);
|
||||||
|
my $jobset = $jobsetCtx->{"jobset"};
|
||||||
|
|
||||||
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||||
|
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||||
|
);
|
||||||
|
|
||||||
|
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||||
|
|
||||||
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
|
like(
|
||||||
|
$jobset->errormsg,
|
||||||
|
qr/aggregate job ‘indirect_aggregate’ failed with the error: Dependency cycle: indirect_aggregate <-> packages.constituentA/,
|
||||||
|
"packages.constituentA error missing"
|
||||||
|
);
|
||||||
|
|
||||||
|
# on this branch of Hydra, hydra-eval-jobset fails hard if an aggregate
|
||||||
|
# job is broken.
|
||||||
|
is(0, $jobset->builds->count, "Zero jobs are scheduled");
|
||||||
|
};
|
||||||
|
|
||||||
|
done_testing;
|
14
t/jobs/config.nix
Normal file
14
t/jobs/config.nix
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
rec {
|
||||||
|
path = "/nix/store/l9mg93sgx50y88p5rr6x1vib6j1rjsds-coreutils-9.1/bin";
|
||||||
|
|
||||||
|
mkDerivation = args:
|
||||||
|
derivation ({
|
||||||
|
system = builtins.currentSystem;
|
||||||
|
PATH = path;
|
||||||
|
} // args);
|
||||||
|
mkContentAddressedDerivation = args: mkDerivation ({
|
||||||
|
__contentAddressed = true;
|
||||||
|
outputHashMode = "recursive";
|
||||||
|
outputHashAlgo = "sha256";
|
||||||
|
} // args);
|
||||||
|
}
|
34
t/jobs/constituents-cycle-glob.nix
Normal file
34
t/jobs/constituents-cycle-glob.nix
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
with import ./config.nix;
|
||||||
|
{
|
||||||
|
packages.constituentA = mkDerivation {
|
||||||
|
name = "empty-dir-A";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [ "*_aggregate" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
packages.constituentB = mkDerivation {
|
||||||
|
name = "empty-dir-B";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
ok_aggregate = mkDerivation {
|
||||||
|
name = "direct_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [
|
||||||
|
"packages.*"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
indirect_aggregate = mkDerivation {
|
||||||
|
name = "indirect_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
constituents = [
|
||||||
|
"ok_aggregate"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
}
|
21
t/jobs/constituents-cycle.nix
Normal file
21
t/jobs/constituents-cycle.nix
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
with import ./config.nix;
|
||||||
|
{
|
||||||
|
ok_aggregate = mkDerivation {
|
||||||
|
name = "direct_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [
|
||||||
|
"indirect_aggregate"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
indirect_aggregate = mkDerivation {
|
||||||
|
name = "indirect_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
constituents = [
|
||||||
|
"ok_aggregate"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
}
|
22
t/jobs/constituents-glob-all.nix
Normal file
22
t/jobs/constituents-glob-all.nix
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
with import ./config.nix;
|
||||||
|
{
|
||||||
|
packages.constituentA = mkDerivation {
|
||||||
|
name = "empty-dir-A";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
packages.constituentB = mkDerivation {
|
||||||
|
name = "empty-dir-B";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
ok_aggregate = mkDerivation {
|
||||||
|
name = "direct_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [
|
||||||
|
"*"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
}
|
31
t/jobs/constituents-glob.nix
Normal file
31
t/jobs/constituents-glob.nix
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
with import ./config.nix;
|
||||||
|
{
|
||||||
|
packages.constituentA = mkDerivation {
|
||||||
|
name = "empty-dir-A";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
packages.constituentB = mkDerivation {
|
||||||
|
name = "empty-dir-B";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
ok_aggregate = mkDerivation {
|
||||||
|
name = "direct_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [
|
||||||
|
"packages.*"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
indirect_aggregate = mkDerivation {
|
||||||
|
name = "indirect_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
constituents = [
|
||||||
|
"ok_aggregate"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
}
|
20
t/jobs/constituents-no-matches.nix
Normal file
20
t/jobs/constituents-no-matches.nix
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
with import ./config.nix;
|
||||||
|
{
|
||||||
|
non_match_aggregate = mkDerivation {
|
||||||
|
name = "mixed_aggregate";
|
||||||
|
_hydraAggregate = true;
|
||||||
|
_hydraGlobConstituents = true;
|
||||||
|
constituents = [
|
||||||
|
"tests.*"
|
||||||
|
];
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Without a second job no jobset is attempted to be created
|
||||||
|
# (the only job would be broken)
|
||||||
|
# and thus the constituent validation is never reached.
|
||||||
|
dummy = mkDerivation {
|
||||||
|
name = "dummy";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
}
|
24
t/jobs/declarative/project.json
Normal file
24
t/jobs/declarative/project.json
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"enabled": 1,
|
||||||
|
"hidden": false,
|
||||||
|
"description": "declarative-jobset-example",
|
||||||
|
"nixexprinput": "src",
|
||||||
|
"nixexprpath": "declarative/generator.nix",
|
||||||
|
"checkinterval": 300,
|
||||||
|
"schedulingshares": 100,
|
||||||
|
"enableemail": false,
|
||||||
|
"emailoverride": "",
|
||||||
|
"keepnr": 3,
|
||||||
|
"inputs": {
|
||||||
|
"src": {
|
||||||
|
"type": "path",
|
||||||
|
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
|
||||||
|
"emailresponsible": false
|
||||||
|
},
|
||||||
|
"jobspath": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
|
||||||
|
"emailresponsible": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -14,7 +14,7 @@ our @EXPORT = qw(
|
|||||||
sub evalSucceeds {
|
sub evalSucceeds {
|
||||||
my ($jobset) = @_;
|
my ($jobset) = @_;
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
|
||||||
$jobset->discard_changes; # refresh from DB
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
if ($res) {
|
if ($res) {
|
||||||
chomp $stdout; chomp $stderr;
|
chomp $stdout; chomp $stderr;
|
||||||
utf8::decode($stdout) or die "Invalid unicode in stdout.";
|
utf8::decode($stdout) or die "Invalid unicode in stdout.";
|
||||||
@ -29,7 +29,7 @@ sub evalSucceeds {
|
|||||||
sub evalFails {
|
sub evalFails {
|
||||||
my ($jobset) = @_;
|
my ($jobset) = @_;
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
|
||||||
$jobset->discard_changes; # refresh from DB
|
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||||
if (!$res) {
|
if (!$res) {
|
||||||
chomp $stdout; chomp $stderr;
|
chomp $stdout; chomp $stderr;
|
||||||
utf8::decode($stdout) or die "Invalid unicode in stdout.";
|
utf8::decode($stdout) or die "Invalid unicode in stdout.";
|
||||||
|
@ -22,11 +22,11 @@ is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/broken-constituent.nix
|
|||||||
|
|
||||||
like(
|
like(
|
||||||
$jobset->errormsg,
|
$jobset->errormsg,
|
||||||
qr/^"does-not-exist": does not exist$/m,
|
qr/^does-not-exist: does not exist$/m,
|
||||||
"Evaluating jobs/broken-constituent.nix should log an error for does-not-exist");
|
"Evaluating jobs/broken-constituent.nix should log an error for does-not-exist");
|
||||||
like(
|
like(
|
||||||
$jobset->errormsg,
|
$jobset->errormsg,
|
||||||
qr/^"does-not-evaluate": "error: assertion 'false' failed/m,
|
qr/^does-not-evaluate: error: assertion 'false' failed/m,
|
||||||
"Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate");
|
"Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate");
|
||||||
|
|
||||||
done_testing;
|
done_testing;
|
||||||
|
@ -13,7 +13,7 @@ my $constituentBuildA = $builds->{"constituentA"};
|
|||||||
my $constituentBuildB = $builds->{"constituentB"};
|
my $constituentBuildB = $builds->{"constituentB"};
|
||||||
|
|
||||||
my $eval = $constituentBuildA->jobsetevals->first();
|
my $eval = $constituentBuildA->jobsetevals->first();
|
||||||
is($eval->evaluationerror->errormsg, "");
|
is($eval->evaluationerror->has_error, 0);
|
||||||
|
|
||||||
subtest "Verifying the direct aggregate" => sub {
|
subtest "Verifying the direct aggregate" => sub {
|
||||||
my $aggBuild = $builds->{"direct_aggregate"};
|
my $aggBuild = $builds->{"direct_aggregate"};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user