6 Commits

Author SHA1 Message Date
678dd3203c add gitea refs to docs 2025-03-31 04:04:52 -04:00
73cad96b6f add gitea ref tests 2025-03-31 04:04:39 -04:00
9b649be8b5 fix api endpoint 2025-03-31 03:20:15 -04:00
4142d26c34 add gitea refs
Some checks failed
Test / tests (push) Has been cancelled
Test / tests (pull_request) Has been cancelled
Signed-off-by: ahuston-0 <aliceghuston@gmail.com>
2025-03-31 02:17:24 -04:00
48ced45675 add Gitea pulls docs entry
Some checks failed
Test / tests (push) Failing after 25m47s
Signed-off-by: ahuston-0 <aliceghuston@gmail.com>
2025-03-31 02:15:46 -04:00
500ac83724 Add a plugin to poll Gitea pull requests
Based off the existing GithubPulls.pm and GitlabPulls.pm plugins.

Also adds an integration test for the new 'giteapulls' input type to
the existing 'gitea' test.
2025-03-31 02:15:46 -04:00
67 changed files with 411 additions and 1215 deletions

View File

@ -1,10 +1,7 @@
name: "Test"
on:
pull_request:
merge_group:
push:
branches:
- master
jobs:
tests:
runs-on: ubuntu-latest

1
.gitignore vendored
View File

@ -3,7 +3,6 @@
/src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite
.hydra-data
result
result-*
outputs

View File

@ -72,16 +72,17 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
```
$ nix build
$ nix-build
```
### Development Environment
You can use the provided shell.nix to get a working development environment:
```
$ nix develop
$ mesonConfigurePhase
$ ninja
$ nix-shell
$ autoreconfPhase
$ configurePhase # NOTE: not ./configure
$ make
```
### Executing Hydra During Development
@ -90,9 +91,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
can be done using [foreman](https://github.com/ddollar/foreman):
```
$ nix develop
$ nix-shell
$ # hack hack
$ ninja -C build
$ make
$ foreman start
```
@ -114,24 +115,22 @@ Start by following the steps in [Development Environment](#development-environme
Then, you can run the tests and the perlcritic linter together with:
```console
$ nix develop
$ ninja -C build test
$ nix-shell
$ make check
```
You can run a single test with:
```
$ nix develop
$ cd build
$ meson test --test-args=../t/Hydra/Event.t testsuite
$ nix-shell
$ yath test ./t/foo/bar.t
```
And you can run just perlcritic with:
```
$ nix develop
$ cd build
$ meson test perlcritic
$ nix-shell
$ make perlcritic
```
### JSON API

View File

@ -63,7 +63,8 @@ following:
.. other configuration ..
location /hydra/ {
proxy_pass http://127.0.0.1:3000/;
proxy_pass http://127.0.0.1:3000;
proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@ -73,9 +74,6 @@ following:
}
}
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
strip off the `/hydra/` part of the URL before passing it to hydra.
Populating a Cache
------------------

View File

@ -11,6 +11,12 @@ $ cd hydra
To enter a shell in which all environment variables (such as `PERL5LIB`)
and dependencies can be found:
```console
$ nix-shell
```
of when flakes are enabled:
```console
$ nix develop
```
@ -18,15 +24,15 @@ $ nix develop
To build Hydra, you should then do:
```console
$ mesonConfigurePhase
$ ninja
[nix-shell]$ autoreconfPhase
[nix-shell]$ configurePhase
[nix-shell]$ make -j$(nproc)
```
You start a local database, the webserver, and other components with
foreman:
```console
$ ninja -C build
$ foreman start
```
@ -41,11 +47,18 @@ $ ./src/script/hydra-server
You can run Hydra's test suite with the following:
```console
$ meson test
# to run as many tests as you have cores:
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
[nix-shell]$ make check
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
[nix-shell]$ # or run yath directly:
[nix-shell]$ yath test
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ yath test -j $NIX_BUILD_CORES
```
When using `yath` instead of `make check`, ensure you have run `make`
in the root of the repository at least once.
**Warning**: Currently, the tests can fail
if run with high parallelism [due to an issue in
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
@ -62,7 +75,7 @@ will reload the page every time you save.
To build Hydra and its dependencies:
```console
$ nix build .#packages.x86_64-linux.default
$ nix-build release.nix -A build.x86_64-linux
```
## Development Tasks

33
flake.lock generated
View File

@ -1,18 +1,27 @@
{
"nodes": {
"nix": {
"flake": false,
"inputs": {
"flake-compat": [],
"flake-parts": [],
"git-hooks-nix": [],
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-23-11": [],
"nixpkgs-regression": []
},
"locked": {
"lastModified": 1748154947,
"narHash": "sha256-rCpANMHFIlafta6J/G0ILRd+WNSnzv/lzi40Y8f1AR8=",
"lastModified": 1739899400,
"narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=",
"owner": "NixOS",
"repo": "nix",
"rev": "d761dad79c79af17aa476a29749bd9d69747548f",
"rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "2.29-maintenance",
"ref": "2.26-maintenance",
"repo": "nix",
"type": "github"
}
@ -20,11 +29,11 @@
"nix-eval-jobs": {
"flake": false,
"locked": {
"lastModified": 1748211873,
"narHash": "sha256-AJ22q6yWc1hPkqssXMxQqD6QUeJ6hbx52xWHhKsmuP0=",
"lastModified": 1739500569,
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
"owner": "nix-community",
"repo": "nix-eval-jobs",
"rev": "d9262e535e35454daebcebd434bdb9c1486bb998",
"rev": "4b392b284877d203ae262e16af269f702df036bc",
"type": "github"
},
"original": {
@ -35,16 +44,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1748124805,
"narHash": "sha256-8A7HjmnvCpDjmETrZY1QwzKunR63LiP7lHu1eA5q6JI=",
"lastModified": 1739461644,
"narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "db1aed32009f408e4048c1dd0beaf714dd34ed93",
"rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05-small",
"ref": "nixos-24.11-small",
"repo": "nixpkgs",
"type": "github"
}

View File

@ -1,12 +1,18 @@
{
description = "A Nix-based continuous build system";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small";
inputs.nix = {
url = "github:NixOS/nix/2.29-maintenance";
# We want to control the deps precisely
flake = false;
url = "github:NixOS/nix/2.26-maintenance";
inputs.nixpkgs.follows = "nixpkgs";
# hide nix dev tooling from our lock file
inputs.flake-parts.follows = "";
inputs.git-hooks-nix.follows = "";
inputs.nixpkgs-regression.follows = "";
inputs.nixpkgs-23-11.follows = "";
inputs.flake-compat.follows = "";
};
inputs.nix-eval-jobs = {
@ -24,27 +30,11 @@
# A Nixpkgs overlay that provides a 'hydra' package.
overlays.default = final: prev: {
nixDependenciesForHydra = final.lib.makeScope final.newScope
(import (nix + "/packaging/dependencies.nix") {
pkgs = final;
inherit (final) stdenv;
inputs = {};
});
nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope
(import (nix + "/packaging/components.nix") {
officialRelease = true;
inherit (final) lib;
pkgs = final;
src = nix;
maintainers = [ ];
});
nix-eval-jobs = final.callPackage nix-eval-jobs {
nixComponents = final.nixComponentsForHydra;
};
nix-eval-jobs = final.callPackage nix-eval-jobs {};
hydra = final.callPackage ./package.nix {
inherit (final.lib) fileset;
inherit (nixpkgs.lib) fileset;
rawSrc = self;
nixComponents = final.nixComponentsForHydra;
nix-perl-bindings = final.nixComponents.nix-perl-bindings;
};
};
@ -83,31 +73,21 @@
validate-openapi = hydraJobs.tests.validate-openapi.${system};
});
packages = forEachSystem (system: let
inherit (nixpkgs) lib;
pkgs = nixpkgs.legacyPackages.${system};
nixDependencies = lib.makeScope pkgs.newScope
(import (nix + "/packaging/dependencies.nix") {
inherit pkgs;
inherit (pkgs) stdenv;
inputs = {};
});
nixComponents = lib.makeScope nixDependencies.newScope
(import (nix + "/packaging/components.nix") {
officialRelease = true;
inherit lib pkgs;
src = nix;
maintainers = [ ];
});
in {
nix-eval-jobs = pkgs.callPackage nix-eval-jobs {
inherit nixComponents;
packages = forEachSystem (system: {
nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs {
nix = nix.packages.${system}.nix;
};
hydra = pkgs.callPackage ./package.nix {
hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix {
inherit (nixpkgs.lib) fileset;
inherit nixComponents;
inherit (self.packages.${system}) nix-eval-jobs;
rawSrc = self;
inherit (nix.packages.${system})
nix-util
nix-store
nix-main
nix-cli
;
nix-perl-bindings = nix.hydraJobs.perlBindings.${system};
};
default = self.packages.${system}.hydra;
});

View File

@ -1,90 +0,0 @@
{ pulls, branches, ... }:
let
# create the json spec for the jobset
makeSpec =
contents:
builtins.derivation {
name = "spec.json";
system = "x86_64-linux";
preferLocalBuild = true;
allowSubstitutes = false;
builder = "/bin/sh";
args = [
(builtins.toFile "builder.sh" ''
echo "$contents" > $out
'')
];
contents = builtins.toJSON contents;
};
prs = readJSONFile pulls;
refs = readJSONFile branches;
# template for creating a job
makeJob =
{
schedulingshares ? 10,
keepnr ? 3,
description,
flake,
enabled ? 1,
}:
{
inherit
description
flake
schedulingshares
keepnr
enabled
;
type = 1;
hidden = false;
checkinterval = 300; # every 5 minutes
enableemail = false;
emailoverride = "";
};
giteaHost = "ssh://gitea@nayeonie.com:2222";
repo = "ahuston-0/hydra";
# # Create a hydra job for a branch
jobOfRef =
name:
{ ref, ... }:
if ((builtins.match "^refs/heads/(.*)$" ref) == null) then
null
else
{
name = builtins.replaceStrings [ "/" ] [ "-" ] "branch-${name}";
value = makeJob {
description = "Branch ${name}";
flake = "git+${giteaHost}/${repo}?ref=${ref}";
};
};
# Create a hydra job for a PR
jobOfPR = id: info: {
name = if info.draft then "draft-${id}" else "pr-${id}";
value = makeJob {
description = "PR ${id}: ${info.title}";
flake = "git+${giteaHost}/${repo}?ref=${info.head.ref}";
enabled = info.state == "open";
};
};
# some utility functions
# converts json to name/value dicts
attrsToList = l: builtins.attrValues (builtins.mapAttrs (name: value: { inherit name value; }) l);
# wrapper function for reading json from file
readJSONFile = f: builtins.fromJSON (builtins.readFile f);
# remove null values from a set, in-case of branches that don't exist
mapFilter = f: l: builtins.filter (x: (x != null)) (map f l);
# Create job set from PRs and branches
jobs = makeSpec (
builtins.listToAttrs (map ({ name, value }: jobOfPR name value) (attrsToList prs))
// builtins.listToAttrs (mapFilter ({ name, value }: jobOfRef name value) (attrsToList refs))
);
in
{
jobsets = jobs;
}

View File

@ -1,35 +0,0 @@
{
"enabled": 1,
"hidden": false,
"description": "ahuston-0's fork of hydra",
"nixexprinput": "nixexpr",
"nixexprpath": "hydra/jobsets.nix",
"checkinterval": 60,
"schedulingshares": 100,
"enableemail": false,
"emailoverride": "",
"keepnr": 3,
"type": 0,
"inputs": {
"nixexpr": {
"value": "ssh://gitea@nayeonie.com:2222/ahuston-0/hydra.git add-gitea-pulls",
"type": "git",
"emailresponsible": false
},
"nixpkgs": {
"value": "https://github.com/NixOS/nixpkgs nixos-unstable",
"type": "git",
"emailresponsible": false
},
"pulls": {
"type": "giteapulls",
"value": "nayeonie.com ahuston-0 hydra https",
"emailresponsible": false
},
"branches": {
"type": "gitea_refs",
"value": "nayeonie.com ahuston-0 hydra heads https -",
"emailresponsible": false
}
}
}

View File

@ -12,6 +12,20 @@ nix_util_dep = dependency('nix-util', required: true)
nix_store_dep = dependency('nix-store', required: true)
nix_main_dep = dependency('nix-main', required: true)
# Nix need extra flags not provided in its pkg-config files.
nix_dep = declare_dependency(
dependencies: [
nix_util_dep,
nix_store_dep,
nix_main_dep,
],
compile_args: [
'-include', 'nix/config-util.hh',
'-include', 'nix/config-store.hh',
'-include', 'nix/config-main.hh',
],
)
pqxx_dep = dependency('libpqxx', required: true)
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)

View File

@ -15,6 +15,7 @@
systemd.services.hydra-send-stats.enable = false;
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_12;
# The following is to work around the following error from hydra-server:
# [error] Caught exception in engine "Cannot determine local time zone"

View File

@ -228,8 +228,8 @@ in
nix.settings = {
trusted-users = [ "hydra-queue-runner" ];
keep-outputs = true;
keep-derivations = true;
gc-keep-outputs = true;
gc-keep-derivations = true;
};
services.hydra-dev.extraConfig =
@ -468,7 +468,7 @@ in
elif [[ $compression == zstd ]]; then
compression="zstd --rm"
fi
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
'';
startAt = "Sun 01:45";
};

View File

@ -27,7 +27,8 @@ in
{
install = forEachSystem (system:
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-install";
nodes.machine = hydraServer;
testScript =
@ -42,7 +43,8 @@ in
});
notifications = forEachSystem (system:
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-notifications";
nodes.machine = {
imports = [ hydraServer ];
@ -54,7 +56,7 @@ in
'';
services.influxdb.enable = true;
};
testScript = { nodes, ... }: ''
testScript = ''
machine.wait_for_job("hydra-init")
# Create an admin account and some other state.
@ -85,7 +87,7 @@ in
# Setup the project and jobset
machine.succeed(
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
"su - hydra -c 'perl -I ${config.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
)
# Wait until hydra has build the job and
@ -99,10 +101,9 @@ in
});
gitea = forEachSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
let pkgs = nixpkgs.legacyPackages.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest {
name = "hydra-gitea";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];

View File

@ -8,7 +8,11 @@
, perlPackages
, nixComponents
, nix-util
, nix-store
, nix-main
, nix-cli
, nix-perl-bindings
, git
, makeWrapper
@ -61,7 +65,7 @@ let
name = "hydra-perl-deps";
paths = lib.closePropagation
([
nixComponents.nix-perl-bindings
nix-perl-bindings
git
] ++ (with perlPackages; [
AuthenSASL
@ -89,7 +93,6 @@ let
DateTime
DBDPg
DBDSQLite
DBIxClassHelpers
DigestSHA1
EmailMIME
EmailSender
@ -162,7 +165,7 @@ stdenv.mkDerivation (finalAttrs: {
nukeReferences
pkg-config
mdbook
nixComponents.nix-cli
nix-cli
perlDeps
perl
unzip
@ -172,9 +175,9 @@ stdenv.mkDerivation (finalAttrs: {
libpqxx
openssl
libxslt
nixComponents.nix-util
nixComponents.nix-store
nixComponents.nix-main
nix-util
nix-store
nix-main
perlDeps
perl
boost
@ -201,14 +204,14 @@ stdenv.mkDerivation (finalAttrs: {
glibcLocales
libressl.nc
python3
nixComponents.nix-cli
nix-cli
];
hydraPath = lib.makeBinPath (
[
subversion
openssh
nixComponents.nix-cli
nix-cli
coreutils
findutils
pixz
@ -238,7 +241,7 @@ stdenv.mkDerivation (finalAttrs: {
shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
export HYDRA_HOME="$(pwd)/src/"
mkdir -p .hydra-data
@ -269,7 +272,7 @@ stdenv.mkDerivation (finalAttrs: {
--prefix PATH ':' $out/bin:$hydraPath \
--set HYDRA_RELEASE ${version} \
--set HYDRA_HOME $out/libexec/hydra \
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
--set NIX_RELEASE ${nix-cli.name or "unknown"} \
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
done
'';
@ -277,8 +280,5 @@ stdenv.mkDerivation (finalAttrs: {
dontStrip = true;
meta.description = "Build of Hydra on ${stdenv.system}";
passthru = {
inherit perlDeps;
nix = nixComponents.nix-cli;
};
passthru = { inherit perlDeps; };
})

View File

@ -1,8 +1,8 @@
#include "db.hh"
#include "hydra-config.hh"
#include <nix/util/pool.hh>
#include <nix/main/shared.hh>
#include <nix/util/signals.hh>
#include "pool.hh"
#include "shared.hh"
#include "signals.hh"
#include <algorithm>
#include <thread>

View File

@ -2,8 +2,7 @@ hydra_evaluator = executable('hydra-evaluator',
'hydra-evaluator.cc',
dependencies: [
libhydra_dep,
nix_util_dep,
nix_main_dep,
nix_dep,
pqxx_dep,
],
install: true,

View File

@ -5,20 +5,17 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <nix/store/build-result.hh>
#include <nix/store/path.hh>
#include <nix/store/legacy-ssh-store.hh>
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include "build-result.hh"
#include "path.hh"
#include "legacy-ssh-store.hh"
#include "serve-protocol.hh"
#include "state.hh"
#include <nix/util/current-process.hh>
#include <nix/util/processes.hh>
#include <nix/util/util.hh>
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include <nix/store/ssh.hh>
#include <nix/util/finally.hh>
#include <nix/util/url.hh>
#include "current-process.hh"
#include "processes.hh"
#include "util.hh"
#include "ssh.hh"
#include "finally.hh"
#include "url.hh"
using namespace nix;
@ -39,38 +36,6 @@ bool ::Machine::isLocalhost() const
namespace nix::build_remote {
static std::unique_ptr<SSHMaster::Connection> openConnection(
::Machine::ptr machine, SSHMaster & master)
{
Strings command = {"nix-store", "--serve", "--write"};
if (machine->isLocalhost()) {
command.push_back("--builders");
command.push_back("");
} else {
auto remoteStore = machine->storeUri.params.find("remote-store");
if (remoteStore != machine->storeUri.params.end()) {
command.push_back("--store");
command.push_back(escapeShellArgAlways(remoteStore->second));
}
}
auto ret = master.startCommand(std::move(command), {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
});
// XXX: determine the actual max value we can use from /proc.
// FIXME: Should this be upstreamed into `startCommand` in Nix?
int pipesize = 1024 * 1024;
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
return ret;
}
static void copyClosureTo(
::Machine::Connection & conn,
Store & destStore,
@ -87,8 +52,8 @@ static void copyClosureTo(
// FIXME: substitute output pollutes our build log
/* Get back the set of paths that are already valid on the remote
host. */
auto present = conn.queryValidPaths(
destStore, true, closure, useSubstitutes);
auto present = conn.store->queryValidPaths(
closure, true, useSubstitutes);
if (present.size() == closure.size()) return;
@ -103,12 +68,7 @@ static void copyClosureTo(
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
std::chrono::seconds(600));
conn.to << ServeProto::Command::ImportPaths;
destStore.exportPaths(missing, conn.to);
conn.to.flush();
if (readInt(conn.from) != 1)
throw Error("remote machine failed to import closure");
conn.store->addMultipleToStoreLegacy(destStore, missing);
}
@ -228,7 +188,7 @@ static BuildResult performBuild(
counter & nrStepsBuilding
)
{
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
auto kont = conn.store->buildDerivationAsync(drvPath, drv, options);
BuildResult result;
@ -237,7 +197,10 @@ static BuildResult performBuild(
startTime = time(0);
{
MaintainCount<counter> mc(nrStepsBuilding);
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
result = kont();
// Without proper call-once functions, we need to manually
// delete after calling.
kont = {};
}
stopTime = time(0);
@ -253,7 +216,7 @@ static BuildResult performBuild(
// If the protocol was too old to give us `builtOutputs`, initialize
// it manually by introspecting the derivation.
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6)
{
// If the remote is too old to handle CA derivations, we cant get this
// far anyways
@ -298,11 +261,10 @@ static void copyPathFromRemote(
lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather
than outside the lambda. */
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
conn.to.flush();
TeeSource tee(conn.from, sink);
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
conn.store->narFromPath(info.path, [&](Source & source) {
TeeSource tee{source, sink};
extractNarData(tee, conn.store->printStorePath(info.path), narMembers);
});
});
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
@ -386,19 +348,8 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
}
/* Utility guard object to auto-release a semaphore on destruction. */
template <typename T>
class SemaphoreReleaser {
public:
SemaphoreReleaser(T* s) : sem(s) {}
~SemaphoreReleaser() { sem->release(); }
private:
T* sem;
};
void State::buildRemote(ref<Store> destStore,
std::unique_ptr<MachineReservation> reservation,
::Machine::ptr machine, Step::ptr step,
const ServeProto::BuildOptions & buildOptions,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
@ -415,30 +366,39 @@ void State::buildRemote(ref<Store> destStore,
updateStep(ssConnecting);
auto storeRef = machine->completeStoreReference();
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
// FIXME: rewrite to use Store.
::Machine::Connection conn {
.machine = machine,
.store = [&]{
auto * pSpecified = std::get_if<StoreReference::Specified>(&machine->storeUri.variant);
if (!pSpecified || pSpecified->scheme != "ssh") {
throw Error("Currently, only (legacy-)ssh stores are supported!");
}
LegacySSHStoreConfig storeConfig {
pSpecified->scheme,
pSpecified->authority,
storeRef.params
auto remoteStore = machine->openStore().dynamic_pointer_cast<LegacySSHStore>();
assert(remoteStore);
remoteStore->connPipeSize = 1024 * 1024;
if (machine->isLocalhost()) {
auto rp_new = remoteStore->remoteProgram.get();
rp_new.push_back("--builders");
rp_new.push_back("");
const_cast<nix::Setting<Strings> &>(remoteStore->remoteProgram).assign(rp_new);
}
remoteStore->extraSshArgs = {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
};
const_cast<nix::Setting<int> &>(remoteStore->logFD).assign(logFD.get());
auto master = storeConfig.createSSHMaster(
false, // no SSH master yet
logFD.get());
// FIXME: rewrite to use Store.
auto child = build_remote::openConnection(machine, master);
return nix::ref{remoteStore};
}(),
};
{
auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) throw Error("step cancelled");
activeStepState->pid = child->sshPid;
activeStepState->pid = conn.store->getConnectionPid();
}
Finally clearPid([&]() {
@ -453,35 +413,12 @@ void State::buildRemote(ref<Store> destStore,
process. Meh. */
});
::Machine::Connection conn {
{
.to = child->in.get(),
.from = child->out.get(),
/* Handshake. */
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
},
/*.machine =*/ machine,
};
Finally updateStats([&]() {
bytesReceived += conn.from.read;
bytesSent += conn.to.written;
auto stats = conn.store->getConnectionStats();
bytesReceived += stats.bytesReceived;
bytesSent += stats.bytesSent;
});
constexpr ServeProto::Version our_version = 0x206;
try {
conn.remoteVersion = decltype(conn)::handshake(
conn.to,
conn.from,
our_version,
machine->storeUri.render());
} catch (EndOfFile & e) {
child->sshPid.wait();
std::string s = chomp(readFile(result.logFile));
throw Error("cannot connect to %1%: %2%", machine->storeUri.render(), s);
}
{
auto info(machine->state->connectInfo.lock());
info->consecutiveFailures = 0;
@ -538,23 +475,6 @@ void State::buildRemote(ref<Store> destStore,
result.logFile = "";
}
/* Throttle CPU-bound work. Opportunistically skip updating the current
* step, since this requires a DB roundtrip. */
if (!localWorkThrottler.try_acquire()) {
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
updateStep(ssWaitingForLocalSlot);
localWorkThrottler.acquire();
}
SemaphoreReleaser releaser(&localWorkThrottler);
/* Once we've started copying outputs, release the machine reservation
* so further builds can happen. We do not release the machine earlier
* to avoid situations where the queue runner is bottlenecked on
* copying outputs and we end up building too many things that we
* haven't been able to allow copy slots for. */
reservation.reset();
wakeDispatcher();
StorePathSet outputs;
for (auto & [_, realisation] : buildResult.builtOutputs)
outputs.insert(realisation.outPath);
@ -567,7 +487,7 @@ void State::buildRemote(ref<Store> destStore,
auto now1 = std::chrono::steady_clock::now();
auto infos = conn.queryPathInfos(*localStore, outputs);
auto infos = conn.store->queryPathInfosUncached(outputs);
size_t totalNarSize = 0;
for (auto & [_, info] : infos) totalNarSize += info.narSize;
@ -602,9 +522,11 @@ void State::buildRemote(ref<Store> destStore,
}
}
/* Shut down the connection. */
child->in = -1;
child->sshPid.wait();
/* Shut down the connection done by RAII.
Only difference is kill() instead of wait() (i.e. send signal
then wait())
*/
} catch (Error & e) {
/* Disable this machine until a certain period of time has

View File

@ -1,7 +1,7 @@
#include "hydra-build-result.hh"
#include <nix/store/store-api.hh>
#include <nix/util/util.hh>
#include <nix/util/source-accessor.hh>
#include "store-api.hh"
#include "util.hh"
#include "source-accessor.hh"
#include <regex>

View File

@ -2,8 +2,8 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/util/finally.hh>
#include <nix/store/binary-cache-store.hh>
#include "finally.hh"
#include "binary-cache-store.hh"
using namespace nix;
@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
}
void State::builder(std::unique_ptr<MachineReservation> reservation)
void State::builder(MachineReservation::ptr reservation)
{
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
@ -35,20 +35,22 @@ void State::builder(std::unique_ptr<MachineReservation> reservation)
activeSteps_.lock()->erase(activeStep);
});
std::string machine = reservation->machine->storeUri.render();
try {
auto destStore = getDestStore();
// Might release the reservation.
res = doBuildStep(destStore, std::move(reservation), activeStep);
res = doBuildStep(destStore, reservation, activeStep);
} catch (std::exception & e) {
printMsg(lvlError, "uncaught exception building %s on %s: %s",
localStore->printStorePath(activeStep->step->drvPath),
machine,
localStore->printStorePath(reservation->step->drvPath),
reservation->machine->storeUri.render(),
e.what());
}
}
/* Release the machine and wake up the dispatcher. */
assert(reservation.unique());
reservation = 0;
wakeDispatcher();
/* If there was a temporary failure, retry the step after an
exponentially increasing interval. */
Step::ptr step = wstep.lock();
@ -70,11 +72,11 @@ void State::builder(std::unique_ptr<MachineReservation> reservation)
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
std::unique_ptr<MachineReservation> reservation,
MachineReservation::ptr reservation,
std::shared_ptr<ActiveStep> activeStep)
{
auto step(reservation->step);
auto machine(reservation->machine);
auto & step(reservation->step);
auto & machine(reservation->machine);
{
auto step_(step->state.lock());
@ -209,7 +211,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
try {
/* FIXME: referring builds may have conflicting timeouts. */
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
} catch (Error & e) {
if (activeStep->state_.lock()->cancelled) {
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);

View File

@ -40,15 +40,13 @@ void State::dispatcher()
printMsg(lvlDebug, "dispatcher woken up");
nrDispatcherWakeups++;
auto t_before_work = std::chrono::steady_clock::now();
auto now1 = std::chrono::steady_clock::now();
auto sleepUntil = doDispatch();
auto t_after_work = std::chrono::steady_clock::now();
auto now2 = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_running.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
/* Sleep until we're woken up (either because a runnable build
is added, or because a build finishes). */
@ -62,10 +60,6 @@ void State::dispatcher()
*dispatcherWakeup_ = false;
}
auto t_after_sleep = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_waiting.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
} catch (std::exception & e) {
printError("dispatcher: %s", e.what());
sleep(1);
@ -134,8 +128,6 @@ system_time State::doDispatch()
comparator is a partial ordering (see MachineInfo). */
int highestGlobalPriority;
int highestLocalPriority;
size_t numRequiredSystemFeatures;
size_t numRevDeps;
BuildID lowestBuildID;
StepInfo(Step::ptr step, Step::State & step_) : step(step)
@ -144,8 +136,6 @@ system_time State::doDispatch()
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
highestGlobalPriority = step_.highestGlobalPriority;
highestLocalPriority = step_.highestLocalPriority;
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
numRevDeps = step_.rdeps.size();
lowestBuildID = step_.lowestBuildID;
}
};
@ -198,8 +188,6 @@ system_time State::doDispatch()
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
a.lowestBuildID < b.lowestBuildID;
});
@ -294,7 +282,7 @@ system_time State::doDispatch()
/* Make a slot reservation and start a thread to
do the build. */
auto builderThread = std::thread(&State::builder, this,
std::make_unique<MachineReservation>(*this, step, mi.machine));
std::make_shared<MachineReservation>(*this, step, mi.machine));
builderThread.detach(); // FIXME?
keepGoing = true;

View File

@ -2,9 +2,9 @@
#include <memory>
#include <nix/util/hash.hh>
#include <nix/store/derivations.hh>
#include <nix/store/store-api.hh>
#include "hash.hh"
#include "derivations.hh"
#include "store-api.hh"
#include "nar-extractor.hh"
struct BuildProduct

View File

@ -11,16 +11,16 @@
#include <nlohmann/json.hpp>
#include <nix/util/signals.hh>
#include "signals.hh"
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/store/store-open.hh>
#include <nix/store/remote-store.hh>
#include "store-api.hh"
#include "remote-store.hh"
#include <nix/store/globals.hh>
#include "globals.hh"
#include "hydra-config.hh"
#include <nix/store/s3-binary-cache-store.hh>
#include <nix/main/shared.hh>
#include "s3-binary-cache-store.hh"
#include "shared.hh"
using namespace nix;
using nlohmann::json;
@ -70,31 +70,10 @@ State::PromMetrics::PromMetrics()
.Register(*registry)
.Add({})
)
, dispatcher_time_spent_running(
prometheus::BuildCounter()
.Name("hydraqueuerunner_dispatcher_time_spent_running")
.Help("Time (in micros) spent running the dispatcher")
.Register(*registry)
.Add({})
)
, dispatcher_time_spent_waiting(
prometheus::BuildCounter()
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
.Register(*registry)
.Add({})
)
, queue_monitor_time_spent_running(
prometheus::BuildCounter()
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
.Help("Time (in micros) spent running the queue monitor")
.Register(*registry)
.Add({})
)
, queue_monitor_time_spent_waiting(
prometheus::BuildCounter()
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
, queue_max_id(
prometheus::BuildGauge()
.Name("hydraqueuerunner_queue_max_build_id_info")
.Help("Maximum build record ID in the queue")
.Register(*registry)
.Add({})
)
@ -106,7 +85,6 @@ State::State(std::optional<std::string> metricsAddrOpt)
: config(std::make_unique<HydraConfig>())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
@ -573,7 +551,6 @@ void State::dumpStatus(Connection & conn)
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
{"nrStepsWaiting", nrStepsWaiting.load()},
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
@ -615,7 +592,6 @@ void State::dumpStatus(Connection & conn)
}
{
auto machines_json = json::object();
auto machines_(machines.lock());
for (auto & i : *machines_) {
auto & m(i.second);
@ -642,9 +618,8 @@ void State::dumpStatus(Connection & conn)
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
}
machines_json[m->storeUri.render()] = machine;
statusJson["machines"][m->storeUri.render()] = machine;
}
statusJson["machines"] = machines_json;
}
{
@ -703,7 +678,6 @@ void State::dumpStatus(Connection & conn)
: 0.0},
};
#if NIX_WITH_S3_SUPPORT
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) {
auto & s3Stats = s3Store->getS3Stats();
@ -729,7 +703,6 @@ void State::dumpStatus(Connection & conn)
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
}
#endif
}
{
@ -832,7 +805,7 @@ void State::run(BuildID buildOne)
<< metricsAddr << "/metrics (port " << exposerPort << ")"
<< std::endl;
Store::Config::Params localParams;
Store::Params localParams;
localParams["max-connections"] = "16";
localParams["max-connection-age"] = "600";
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);

View File

@ -13,9 +13,7 @@ hydra_queue_runner = executable('hydra-queue-runner',
srcs,
dependencies: [
libhydra_dep,
nix_util_dep,
nix_store_dep,
nix_main_dep,
nix_dep,
pqxx_dep,
prom_cpp_core_dep,
prom_cpp_pull_dep,

View File

@ -1,6 +1,6 @@
#include "nar-extractor.hh"
#include <nix/util/archive.hh>
#include "archive.hh"
#include <unordered_set>

View File

@ -1,9 +1,9 @@
#pragma once
#include <nix/util/source-accessor.hh>
#include <nix/util/types.hh>
#include <nix/util/serialise.hh>
#include <nix/util/hash.hh>
#include "source-accessor.hh"
#include "types.hh"
#include "serialise.hh"
#include "hash.hh"
struct NarMemberData
{

View File

@ -1,8 +1,6 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/store/globals.hh>
#include <nix/store/parsed-derivations.hh>
#include <nix/util/thread-pool.hh>
#include "globals.hh"
#include <cstring>
@ -39,21 +37,16 @@ void State::queueMonitorLoop(Connection & conn)
auto destStore = getDestStore();
unsigned int lastBuildId = 0;
bool quit = false;
while (!quit) {
auto t_before_work = std::chrono::steady_clock::now();
localStore->clearPathInfoCache();
bool done = getQueuedBuilds(conn, destStore);
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
if (buildOne && buildOneDone) quit = true;
auto t_after_work = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_running.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
/* Sleep until we get notification from the database about an
event. */
if (done && !quit) {
@ -63,10 +56,12 @@ void State::queueMonitorLoop(Connection & conn)
conn.get_notifs();
if (auto lowestId = buildsAdded.get()) {
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
printMsg(lvlTalkative, "got notification: new builds added to the queue");
}
if (buildsRestarted.get()) {
printMsg(lvlTalkative, "got notification: builds restarted");
lastBuildId = 0; // check all builds
}
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
@ -76,10 +71,6 @@ void State::queueMonitorLoop(Connection & conn)
printMsg(lvlTalkative, "got notification: jobset shares changed");
processJobsetSharesChange(conn);
}
auto t_after_sleep = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_waiting.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
}
exit(0);
@ -93,18 +84,20 @@ struct PreviousFailure : public std::exception {
bool State::getQueuedBuilds(Connection & conn,
ref<Store> destStore)
ref<Store> destStore, unsigned int & lastBuildId)
{
prom.queue_checks_started.Increment();
printInfo("checking the queue for builds...");
printInfo("checking the queue for builds > %d...", lastBuildId);
/* Grab the queued builds from the database, but don't process
them yet (since we don't want a long-running transaction). */
std::vector<BuildID> newIDs;
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
std::map<BuildID, Build::ptr> newBuildsByID;
std::multimap<StorePath, BuildID> newBuildsByPath;
unsigned int newLastBuildId = lastBuildId;
{
pqxx::work txn(conn);
@ -113,12 +106,17 @@ bool State::getQueuedBuilds(Connection & conn,
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
"globalPriority, priority from Builds "
"inner join jobsets on builds.jobset_id = jobsets.id "
"where finished = 0 order by globalPriority desc, random()");
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
lastBuildId);
for (auto const & row : res) {
auto builds_(builds.lock());
BuildID id = row["id"].as<BuildID>();
if (buildOne && id != buildOne) continue;
if (id > newLastBuildId) {
newLastBuildId = id;
prom.queue_max_id.Set(id);
}
if (builds_->count(id)) continue;
auto build = std::make_shared<Build>(
@ -320,13 +318,15 @@ bool State::getQueuedBuilds(Connection & conn,
/* Stop after a certain time to allow priority bumps to be
processed. */
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
prom.queue_checks_early_exits.Increment();
break;
}
}
prom.queue_checks_finished.Increment();
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
return newBuildsByID.empty();
}
@ -405,34 +405,6 @@ void State::processQueueChange(Connection & conn)
}
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
ref<Store> destStore,
const std::map<DrvOutput, std::optional<StorePath>> & paths)
{
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
ThreadPool tp;
for (auto & [output, maybeOutputPath] : paths) {
if (!maybeOutputPath) {
auto missing(missing_.lock());
missing->insert({output, maybeOutputPath});
} else {
tp.enqueue([&] {
if (!destStore->isValidPath(*maybeOutputPath)) {
auto missing(missing_.lock());
missing->insert({output, maybeOutputPath});
}
});
}
}
tp.process();
auto missing(missing_.lock());
return *missing;
}
Step::ptr State::createStep(ref<Store> destStore,
Connection & conn, Build::ptr build, const StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
@ -491,23 +463,14 @@ Step::ptr State::createStep(ref<Store> destStore,
it's not runnable yet, and other threads won't make it
runnable while step->created == false. */
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
{
auto parsedOpt = StructuredAttrs::tryParse(step->drv->env);
try {
step->drvOptions = std::make_unique<DerivationOptions>(
DerivationOptions::fromStructuredAttrs(step->drv->env, parsedOpt ? &*parsedOpt : nullptr));
} catch (Error & e) {
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
throw;
}
}
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
step->systemType = step->drv->platform;
{
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
StringSet features = step->requiredSystemFeatures = step->parsedDrv->getRequiredSystemFeatures();
if (step->preferLocalBuild)
features.insert("local");
if (!features.empty()) {
@ -522,15 +485,16 @@ Step::ptr State::createStep(ref<Store> destStore,
/* Are all outputs valid? */
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
std::map<DrvOutput, std::optional<StorePath>> paths;
bool valid = true;
std::map<DrvOutput, std::optional<StorePath>> missing;
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
auto outputHash = outputHashes.at(outputName);
paths.insert({{outputHash, outputName}, maybeOutputPath});
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
continue;
valid = false;
missing.insert({{outputHash, outputName}, maybeOutputPath});
}
auto missing = getMissingRemotePaths(destStore, paths);
bool valid = missing.empty();
/* Try to copy the missing paths from the local store or from
substitutes. */
if (!missing.empty()) {

View File

@ -6,8 +6,6 @@
#include <map>
#include <memory>
#include <queue>
#include <regex>
#include <semaphore>
#include <prometheus/counter.h>
#include <prometheus/gauge.h>
@ -15,18 +13,15 @@
#include "db.hh"
#include <nix/store/derivations.hh>
#include <nix/store/derivation-options.hh>
#include <nix/store/pathlocks.hh>
#include <nix/util/pool.hh>
#include <nix/store/build-result.hh>
#include <nix/store/store-api.hh>
#include <nix/util/sync.hh>
#include "parsed-derivations.hh"
#include "pathlocks.hh"
#include "pool.hh"
#include "build-result.hh"
#include "store-api.hh"
#include "sync.hh"
#include "nar-extractor.hh"
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include <nix/store/serve-protocol-connection.hh>
#include <nix/store/machines.hh>
#include "legacy-ssh-store.hh"
#include "machines.hh"
typedef unsigned int BuildID;
@ -60,7 +55,6 @@ typedef enum {
ssConnecting = 10,
ssSendingInputs = 20,
ssBuilding = 30,
ssWaitingForLocalSlot = 35,
ssReceivingOutputs = 40,
ssPostProcessing = 50,
} StepState;
@ -171,8 +165,8 @@ struct Step
nix::StorePath drvPath;
std::unique_ptr<nix::Derivation> drv;
std::unique_ptr<nix::DerivationOptions> drvOptions;
nix::StringSet requiredSystemFeatures;
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
std::set<std::string> requiredSystemFeatures;
bool preferLocalBuild;
bool isDeterministic;
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
@ -296,9 +290,11 @@ struct Machine : nix::Machine
bool isLocalhost() const;
// A connection to a machine
struct Connection : nix::ServeProto::BasicClientConnection {
struct Connection {
// Backpointer to the machine
ptr machine;
// Opened store
nix::ref<nix::LegacySSHStore> store;
};
};
@ -356,10 +352,6 @@ private:
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
/* Throttler for CPU-bound local work. */
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
/* Various stats. */
time_t startedAt;
counter nrBuildsRead{0};
@ -369,7 +361,6 @@ private:
counter nrStepsDone{0};
counter nrStepsBuilding{0};
counter nrStepsCopyingTo{0};
counter nrStepsWaitingForDownloadSlot{0};
counter nrStepsCopyingFrom{0};
counter nrStepsWaiting{0};
counter nrUnsupportedSteps{0};
@ -400,6 +391,7 @@ private:
struct MachineReservation
{
typedef std::shared_ptr<MachineReservation> ptr;
State & state;
Step::ptr step;
Machine::ptr machine;
@ -457,12 +449,7 @@ private:
prometheus::Counter& queue_steps_created;
prometheus::Counter& queue_checks_early_exits;
prometheus::Counter& queue_checks_finished;
prometheus::Counter& dispatcher_time_spent_running;
prometheus::Counter& dispatcher_time_spent_waiting;
prometheus::Counter& queue_monitor_time_spent_running;
prometheus::Counter& queue_monitor_time_spent_waiting;
prometheus::Gauge& queue_max_id;
PromMetrics();
};
@ -506,7 +493,8 @@ private:
void queueMonitorLoop(Connection & conn);
/* Check the queue for new builds. */
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
bool getQueuedBuilds(Connection & conn,
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
/* Handle cancellation, deletion and priority bumps. */
void processQueueChange(Connection & conn);
@ -514,12 +502,6 @@ private:
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
const nix::StorePath & drvPath);
/* Returns paths missing from the remote store. Paths are processed in
* parallel to work around the possible latency of remote stores. */
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
nix::ref<nix::Store> destStore,
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
Step::ptr createStep(nix::ref<nix::Store> store,
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::StorePath> & finishedDrvs,
@ -549,17 +531,16 @@ private:
void abortUnsupported();
void builder(std::unique_ptr<MachineReservation> reservation);
void builder(MachineReservation::ptr reservation);
/* Perform the given build step. Return true if the step is to be
retried. */
enum StepResult { sDone, sRetry, sMaybeCancelled };
StepResult doBuildStep(nix::ref<nix::Store> destStore,
std::unique_ptr<MachineReservation> reservation,
MachineReservation::ptr reservation,
std::shared_ptr<ActiveStep> activeStep);
void buildRemote(nix::ref<nix::Store> destStore,
std::unique_ptr<MachineReservation> reservation,
Machine::ptr machine, Step::ptr step,
const nix::ServeProto::BuildOptions & buildOptions,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,

View File

@ -238,7 +238,7 @@ sub serveFile {
# XSS hole.
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
"store", "cat", "--store", getStoreUri(), "$path"]) };
# Detect MIME type.

View File

@ -364,21 +364,6 @@ sub evals_GET {
);
}
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
sub errors_GET {
my ($self, $c) = @_;
$c->stash->{template} = 'eval-error.tt';
my $jobsetName = $c->stash->{params}->{name};
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
{ name => $jobsetName },
{ '+columns' => { 'errormsg' => 'errormsg' } }
);
$self->status_ok($c, entity => $c->stash->{jobset});
}
# Redirect to the latest finished evaluation of this jobset.
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {

View File

@ -76,9 +76,7 @@ sub view_GET {
$c->stash->{removed} = $diff->{removed};
$c->stash->{unfinished} = $diff->{unfinished};
$c->stash->{aborted} = $diff->{aborted};
$c->stash->{totalAborted} = $diff->{totalAborted};
$c->stash->{totalFailed} = $diff->{totalFailed};
$c->stash->{totalQueued} = $diff->{totalQueued};
$c->stash->{failed} = $diff->{failed};
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
@ -88,17 +86,6 @@ sub view_GET {
);
}
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
sub errors_GET {
my ($self, $c) = @_;
$c->stash->{template} = 'eval-error.tt';
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
$self->status_ok($c, entity => $c->stash->{eval});
}
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
my ($self, $c) = @_;

View File

@ -162,7 +162,7 @@ sub status_GET {
{ "buildsteps.busy" => { '!=', 0 } },
{ order_by => ["globalpriority DESC", "id"],
join => "buildsteps",
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
columns => [@buildListColumns]
})]
);
}

View File

@ -32,26 +32,12 @@ sub buildDiff {
removed => [],
unfinished => [],
aborted => [],
# These summary counters cut across the categories to determine whether
# actions such as "Restart all failed" or "Bump queue" are available.
totalAborted => 0,
totalFailed => 0,
totalQueued => 0,
failed => [],
};
my $n = 0;
foreach my $build (@{$builds}) {
my $aborted = $build->finished != 0 && (
# aborted
$build->buildstatus == 3
# cancelled
|| $build->buildstatus == 4
# timeout
|| $build->buildstatus == 7
# log limit exceeded
|| $build->buildstatus == 10
);
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
my $d;
my $found = 0;
while ($n < scalar(@{$builds2})) {
@ -85,15 +71,8 @@ sub buildDiff {
} else {
push @{$ret->{new}}, $build if !$found;
}
if ($build->finished != 0 && $build->buildstatus != 0) {
if ($aborted) {
++$ret->{totalAborted};
} else {
++$ret->{totalFailed};
}
} elsif ($build->finished == 0) {
++$ret->{totalQueued};
if (defined $build->buildstatus && $build->buildstatus != 0) {
push @{$ret->{failed}}, $build;
}
}

View File

@ -36,7 +36,6 @@ our @EXPORT = qw(
jobsetOverview
jobsetOverview_
pathIsInsidePrefix
readIntoSocket
readNixFile
registerRoot
restartBuilds
@ -297,7 +296,8 @@ sub getEvals {
my @evals = $evals_result_set->search(
{ hasnewbuilds => 1 },
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
{ order_by => "$me.id DESC", rows => $rows, offset => $offset
, prefetch => { evaluationerror => [ ] } });
my @res = ();
my $cache = {};
@ -417,16 +417,6 @@ sub pathIsInsidePrefix {
return $cur;
}
sub readIntoSocket{
my (%args) = @_;
my $sock;
eval {
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
};
return $sock;
}

View File

@ -105,6 +105,4 @@ __PACKAGE__->add_column(
"+id" => { retrieve_on_insert => 1 }
);
__PACKAGE__->mk_group_accessors('column' => 'has_error');
1;

View File

@ -386,8 +386,6 @@ __PACKAGE__->add_column(
"+id" => { retrieve_on_insert => 1 }
);
__PACKAGE__->mk_group_accessors('column' => 'has_error');
sub supportsDynamicRunCommand {
my ($self) = @_;

View File

@ -1,30 +0,0 @@
package Hydra::Schema::ResultSet::EvaluationErrors;
use strict;
use utf8;
use warnings;
use parent 'DBIx::Class::ResultSet';
use Storable qw(dclone);
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
# Exclude expensive error message values unless explicitly requested, and
# replace them with a summary field describing their presence/absence.
sub search_rs {
my ( $class, $query, $attrs ) = @_;
if ($attrs) {
$attrs = dclone($attrs);
}
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
}
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
push @{ $attrs->{'remove_columns'} }, 'errormsg';
}
return $class->next::method($query, $attrs);
}

View File

@ -1,30 +0,0 @@
package Hydra::Schema::ResultSet::Jobsets;
use strict;
use utf8;
use warnings;
use parent 'DBIx::Class::ResultSet';
use Storable qw(dclone);
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
# Exclude expensive error message values unless explicitly requested, and
# replace them with a summary field describing their presence/absence.
sub search_rs {
my ( $class, $query, $attrs ) = @_;
if ($attrs) {
$attrs = dclone($attrs);
}
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
}
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
push @{ $attrs->{'remove_columns'} }, 'errormsg';
}
return $class->next::method($query, $attrs);
}

View File

@ -6,7 +6,6 @@ use base 'Catalyst::View::TT';
use Template::Plugin::HTML;
use Hydra::Helper::Nix;
use Time::Seconds;
use Digest::SHA qw(sha1_hex);
__PACKAGE__->config(
TEMPLATE_EXTENSION => '.tt',
@ -26,14 +25,8 @@ __PACKAGE__->config(
makeNameTextForJobset
relativeDuration
stripSSHUser
metricDivId
/]);
sub metricDivId {
my ($self, $c, $text) = @_;
return "metric-" . sha1_hex($text);
}
sub buildLogExists {
my ($self, $c, $build) = @_;
return 1 if defined $c->config->{log_prefix};

View File

@ -2,8 +2,8 @@
#include <pqxx/pqxx>
#include <nix/util/environment-variables.hh>
#include <nix/util/util.hh>
#include "environment-variables.hh"
#include "util.hh"
struct Connection : pqxx::connection

View File

@ -2,8 +2,8 @@
#include <map>
#include <nix/util/file-system.hh>
#include <nix/util/util.hh>
#include "file-system.hh"
#include "util.hh"
struct HydraConfig
{

View File

@ -61,7 +61,21 @@ END;
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
<td class="step-status">
[% IF step.busy != 0 %]
[% INCLUDE renderBusyStatus %]
[% IF step.busy == 1 %]
<strong>Preparing</strong>
[% ELSIF step.busy == 10 %]
<strong>Connecting</strong>
[% ELSIF step.busy == 20 %]
<strong>Sending inputs</strong>
[% ELSIF step.busy == 30 %]
<strong>Building</strong>
[% ELSIF step.busy == 40 %]
<strong>Receiving outputs</strong>
[% ELSIF step.busy == 50 %]
<strong>Post-processing</strong>
[% ELSE %]
<strong>Unknown state</strong>
[% END %]
[% ELSIF step.status == 0 %]
[% IF step.isnondeterministic %]
<span class="warn">Succeeded with non-determistic result</span>

View File

@ -91,17 +91,6 @@ BLOCK renderDuration;
duration % 60 %]s[%
END;
BLOCK renderDrvInfo;
drvname = step.drvpath
.substr(11) # strip `/nix/store/`
.split('-').slice(1).join("-") # strip hash part
.substr(0, -4); # strip `.drv`
IF drvname != releasename;
IF step.type == 0; action = "Build"; ELSE; action = "Substitution"; END;
IF drvname; %]<em> ([% action %] of [% drvname %])</em>[% END;
END;
END;
BLOCK renderBuildListHeader %]
<table class="table table-striped table-condensed clickable-rows">
@ -142,12 +131,7 @@ BLOCK renderBuildListBody;
[% END %]
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
[% IF !hideJobName %]
<td>
<a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</a>
[% IF showStepName %]
[% INCLUDE renderDrvInfo step=build.buildsteps releasename=build.nixname %]
[% END %]
</td>
<td><a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</td>
[% END %]
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
@ -261,27 +245,6 @@ BLOCK renderBuildStatusIcon;
END;
BLOCK renderBusyStatus;
IF step.busy == 1 %]
<strong>Preparing</strong>
[% ELSIF step.busy == 10 %]
<strong>Connecting</strong>
[% ELSIF step.busy == 20 %]
<strong>Sending inputs</strong>
[% ELSIF step.busy == 30 %]
<strong>Building</strong>
[% ELSIF step.busy == 35 %]
<strong>Waiting to receive outputs</strong>
[% ELSIF step.busy == 40 %]
<strong>Receiving outputs</strong>
[% ELSIF step.busy == 50 %]
<strong>Post-processing</strong>
[% ELSE %]
<strong>Unknown state</strong>
[% END;
END;
BLOCK renderStatus;
IF build.finished;
buildstatus = build.buildstatus;
@ -513,7 +476,7 @@ BLOCK renderEvals %]
ELSE %]
-
[% END %]
[% IF eval.evaluationerror.has_error %]
[% IF eval.evaluationerror.errormsg %]
<span class="badge badge-warning">Eval Errors</span>
[% END %]
</td>
@ -639,7 +602,7 @@ BLOCK renderJobsetOverview %]
<td>[% HTML.escape(j.description) %]</td>
<td>[% IF j.lastcheckedtime;
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
IF j.has_error || j.fetcherrormsg; %]&nbsp;<span class = 'badge badge-warning'>Error</span>[% END;
IF j.errormsg || j.fetcherrormsg; %]&nbsp;<span class = 'badge badge-warning'>Error</span>[% END;
ELSE; "-";
END %]</td>
[% IF j.get_column('nrtotal') > 0 %]

View File

@ -1,26 +0,0 @@
[% PROCESS common.tt %]
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
[% INCLUDE style.tt %]
</head>
<body>
<div class="tab-content tab-pane">
<div id="tabs-errors" class="">
[% IF eval %]
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
[% ELSIF jobset %]
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
[% END %]
</div>
</div>
</body>
</html>

View File

@ -18,7 +18,8 @@
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
[% id = metricDivId(metric.name);
[% id = "metric-" _ metric.name;
id = id.replace('\.', '_');
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
[% END %]

View File

@ -48,16 +48,16 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
<a class="nav-link dropdown-toggle" data-toggle="dropdown" href="#">Actions</a>
<div class="dropdown-menu">
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('create_jobset'), [eval.id]) %]">Create a jobset from this evaluation</a>
[% IF totalQueued > 0 %]
[% IF unfinished.size > 0 %]
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('cancel'), [eval.id]) %]">Cancel all scheduled builds</a>
[% END %]
[% IF totalFailed > 0 %]
[% IF aborted.size > 0 || stillFail.size > 0 || nowFail.size > 0 || failed.size > 0 %]
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_failed'), [eval.id]) %]">Restart all failed builds</a>
[% END %]
[% IF totalAborted > 0 %]
[% IF aborted.size > 0 %]
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_aborted'), [eval.id]) %]">Restart all aborted builds</a>
[% END %]
[% IF totalQueued > 0 %]
[% IF unfinished.size > 0 %]
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('bump'), [eval.id]) %]">Bump builds to front of queue</a>
[% END %]
</div>
@ -65,7 +65,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
[% END %]
[% IF aborted.size > 0 %]
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted / Timed out Jobs ([% aborted.size %])</span></a></li>
<li class="nav-item"><a class="nav-link" href="#tabs-aborted" data-toggle="tab"><span class="text-warning">Aborted Jobs ([% aborted.size %])</span></a></li>
[% END %]
[% IF nowFail.size > 0 %]
<li class="nav-item"><a class="nav-link" href="#tabs-now-fail" data-toggle="tab"><span class="text-warning">Newly Failing Jobs ([% nowFail.size %])</span></a></li>
@ -90,7 +90,7 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
[% END %]
<li class="nav-item"><a class="nav-link" href="#tabs-inputs" data-toggle="tab">Inputs</a></li>
[% IF eval.evaluationerror.has_error %]
[% IF eval.evaluationerror.errormsg %]
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
[% END %]
</ul>
@ -108,6 +108,13 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
<div class="tab-content">
[% IF eval.evaluationerror.errormsg %]
<div id="tabs-errors" class="tab-pane">
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
</div>
[% END %]
<div id="tabs-aborted" class="tab-pane">
[% INCLUDE renderSome builds=aborted tabname="#tabs-aborted" %]
</div>
@ -165,9 +172,10 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
[% END %]
</div>
[% IF eval.evaluationerror.has_error %]
[% IF eval.evaluationerror.errormsg %]
<div id="tabs-errors" class="tab-pane">
<iframe src="[% c.uri_for(c.controller('JobsetEval').action_for('errors'), [eval.id], params) %]" loading="lazy" frameBorder="0" width="100%"></iframe>
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(eval.evaluationerror.errortime || eval.timestamp) %].</p>
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(eval.evaluationerror.errormsg) %]</pre></div></div>
</div>
[% END %]
</div>

View File

@ -61,7 +61,7 @@
[% END %]
<li class="nav-item"><a class="nav-link active" href="#tabs-evaluations" data-toggle="tab">Evaluations</a></li>
[% IF jobset.has_error || jobset.fetcherrormsg %]
[% IF jobset.errormsg || jobset.fetcherrormsg %]
<li class="nav-item"><a class="nav-link" href="#tabs-errors" data-toggle="tab"><span class="text-warning">Evaluation Errors</span></a></li>
[% END %]
<li class="nav-item"><a class="nav-link" href="#tabs-jobs" data-toggle="tab">Jobs</a></li>
@ -79,7 +79,7 @@
<th>Last checked:</th>
<td>
[% IF jobset.lastcheckedtime %]
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.has_error || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
[% INCLUDE renderDateTime timestamp = jobset.lastcheckedtime %], [% IF jobset.errormsg || jobset.fetcherrormsg %]<em class="text-warning">with errors!</em>[% ELSE %]<em>no errors</em>[% END %]
[% ELSE %]
<em>never</em>
[% END %]
@ -117,9 +117,10 @@
</div>
[% IF jobset.has_error || jobset.fetcherrormsg %]
[% IF jobset.errormsg || jobset.fetcherrormsg %]
<div id="tabs-errors" class="tab-pane">
<iframe src="[% c.uri_for('/jobset' project.name jobset.name "errors") %]" loading="lazy" frameBorder="0" width="100%"></iframe>
<p>Errors occurred at [% INCLUDE renderDateTime timestamp=(jobset.errortime || jobset.lastcheckedtime) %].</p>
<div class="card bg-light"><div class="card-body"><pre>[% HTML.escape(jobset.fetcherrormsg || jobset.errormsg) %]</pre></div></div>
</div>
[% END %]

View File

@ -10,7 +10,31 @@
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=Edge" />
[% INCLUDE style.tt %]
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-3.4.1.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-ui-1.10.4.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/js/moment/moment-2.24.0.min.js") %]"></script>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link href="[% c.uri_for("/static/fontawesome/css/all.css") %]" rel="stylesheet" />
<script type="text/javascript" src="[% c.uri_for("/static/js/popper.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/bootstrap/js/bootstrap.min.js") %]"></script>
<link href="[% c.uri_for("/static/bootstrap/css/bootstrap.min.css") %]" rel="stylesheet" />
<!-- hydra.css may need to be moved to before boostrap to make the @media rule work. -->
<link rel="stylesheet" href="[% c.uri_for("/static/css/hydra.css") %]" type="text/css" />
<link rel="stylesheet" href="[% c.uri_for("/static/css/rotated-th.css") %]" type="text/css" />
<style>
.popover { max-width: 40%; }
</style>
<script type="text/javascript" src="[% c.uri_for("/static/js/bootbox.min.js") %]"></script>
<link rel="stylesheet" href="[% c.uri_for("/static/css/tree.css") %]" type="text/css" />
<script type="text/javascript" src="[% c.uri_for("/static/js/common.js") %]"></script>
[% IF c.config.enable_google_login %]
<meta name="google-signin-client_id" content="[% c.config.google_client_id %]">

View File

@ -6,10 +6,10 @@
<thead>
<tr>
<th>Job</th>
<th>System</th>
<th>Build</th>
<th>Step</th>
<th>What</th>
<th>Status</th>
<th>Since</th>
</tr>
</thead>
@ -17,7 +17,7 @@
[% name = m.key ? stripSSHUser(m.key) : "localhost" %]
<thead>
<tr>
<th colspan="7">
<th colspan="6">
<tt [% IF m.value.disabled %]style="text-decoration: line-through;"[% END %]>[% INCLUDE renderMachineName machine=m.key %]</tt>
[% IF m.value.systemTypes %]
<span class="muted" style="font-weight: normal;">
@ -40,10 +40,10 @@
[% idle = 0 %]
<tr>
<td><tt>[% INCLUDE renderFullJobName project=step.project jobset=step.jobset job=step.job %]</tt></td>
<td><tt>[% step.system %]</tt></td>
<td><a href="[% c.uri_for('/build' step.build) %]">[% step.build %]</a></td>
<td>[% IF step.busy >= 30 %]<a class="row-link" href="[% c.uri_for('/build' step.build 'nixlog' step.stepnr 'tail') %]">[% step.stepnr %]</a>[% ELSE; step.stepnr; END %]</td>
<td><tt>[% step.drvpath.match('-(.*)').0 %]</tt></td>
<td>[% INCLUDE renderBusyStatus %]</td>
<td style="width: 10em">[% INCLUDE renderDuration duration = curTime - step.starttime %] </td>
</tr>
[% END %]

View File

@ -129,12 +129,6 @@ $(document).ready(function() {
el.addClass("is-local");
}
});
[...document.getElementsByTagName("iframe")].forEach((element) => {
element.contentWindow.addEventListener("DOMContentLoaded", (_) => {
element.style.height = element.contentWindow.document.body.scrollHeight + 'px';
})
})
});
var tabsLoaded = {};

View File

@ -7,7 +7,7 @@
[% ELSE %]
[% INCLUDE renderBuildList builds=resource showSchedulingInfo=1 hideResultInfo=1 busy=1 showStepName=1 %]
[% INCLUDE renderBuildList builds=resource showSchedulingInfo=1 hideResultInfo=1 busy=1 %]
[% END %]

View File

@ -1,24 +0,0 @@
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-3.4.1.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/js/jquery/jquery-ui-1.10.4.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/js/moment/moment-2.24.0.min.js") %]"></script>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link href="[% c.uri_for("/static/fontawesome/css/all.css") %]" rel="stylesheet" />
<script type="text/javascript" src="[% c.uri_for("/static/js/popper.min.js") %]"></script>
<script type="text/javascript" src="[% c.uri_for("/static/bootstrap/js/bootstrap.min.js") %]"></script>
<link href="[% c.uri_for("/static/bootstrap/css/bootstrap.min.css") %]" rel="stylesheet" />
<!-- hydra.css may need to be moved to before boostrap to make the @media rule work. -->
<link rel="stylesheet" href="[% c.uri_for("/static/css/hydra.css") %]" type="text/css" />
<link rel="stylesheet" href="[% c.uri_for("/static/css/rotated-th.css") %]" type="text/css" />
<style>
.popover { max-width: 40%; }
</style>
<script type="text/javascript" src="[% c.uri_for("/static/js/bootbox.min.js") %]"></script>
<link rel="stylesheet" href="[% c.uri_for("/static/css/tree.css") %]" type="text/css" />
<script type="text/javascript" src="[% c.uri_for("/static/js/common.js") %]"></script>

View File

@ -372,7 +372,6 @@ sub evalJobs {
or die "cannot find the input containing the job expression\n";
@cmd = ("nix-eval-jobs",
"--option", "restrict-eval", "true",
"<" . $nixExprInputName . "/" . $nixExprPath . ">",
inputsToArgs($inputInfo));
}
@ -774,9 +773,6 @@ sub checkJobsetWrapped {
my $jobsetChanged = 0;
my %buildMap;
my @jobs;
push @jobs, $_ while defined($_ = $jobsIter->());
$db->txn_do(sub {
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
@ -800,7 +796,7 @@ sub checkJobsetWrapped {
my @jobsWithConstituents;
foreach my $job (@jobs) {
while (defined(my $job = $jobsIter->())) {
if ($jobsetsJobset) {
die "The .jobsets jobset must only have a single job named 'jobsets'"
unless $job->{attr} eq "jobsets";

View File

@ -32,9 +32,4 @@ subtest "/jobset/PROJECT/JOBSET/evals" => sub {
ok($jobsetevals->is_success, "The page showing the jobset evals returns 200.");
};
subtest "/jobset/PROJECT/JOBSET/errors" => sub {
my $jobsetevals = request(GET '/jobset/' . $project->name . '/' . $jobset->name . '/errors');
ok($jobsetevals->is_success, "The page showing the jobset eval errors returns 200.");
};
done_testing;

View File

@ -35,10 +35,6 @@ subtest "Fetching the eval's overview" => sub {
is($fetch->code, 200, "channel page is 200");
};
subtest "Fetching the eval's overview" => sub {
my $fetch = request(GET '/eval/' . $eval->id, '/errors');
is($fetch->code, 200, "errors page is 200");
};
done_testing;

View File

@ -25,10 +25,7 @@ subtest "empty diff" => sub {
removed => [],
unfinished => [],
aborted => [],
totalAborted => 0,
totalFailed => 0,
totalQueued => 0,
failed => [],
},
"empty list of jobs returns empty diff"
);
@ -51,7 +48,12 @@ subtest "2 different jobs" => sub {
"succeed_with_failed is a new job"
);
is($ret->{totalFailed}, 1, "total failed jobs is 1");
is(scalar(@{$ret->{failed}}), 1, "list of failed jobs is 1 element long");
is(
$ret->{failed}[0]->get_column('id'),
$builds->{"succeed_with_failed"}->get_column('id'),
"succeed_with_failed is a failed job"
);
is(
$ret->{removed},
@ -68,9 +70,9 @@ subtest "2 different jobs" => sub {
subtest "failed job with no previous history" => sub {
my $ret = buildDiff([$builds->{"fails"}], []);
is($ret->{totalFailed}, 1, "total failed jobs is 1");
is(scalar(@{$ret->{failed}}), 1, "list of failed jobs is 1 element long");
is(
$ret->{new}[0]->get_column('id'),
$ret->{failed}[0]->get_column('id'),
$builds->{"fails"}->get_column('id'),
"fails is a failed job"
);
@ -91,6 +93,7 @@ subtest "not-yet-built job with no previous history" => sub {
is($ret->{removed}, [], "removed");
is($ret->{unfinished}, [], "unfinished");
is($ret->{aborted}, [], "aborted");
is($ret->{failed}, [], "failed");
is(scalar(@{$ret->{new}}), 1, "list of new jobs is 1 element long");
is(

View File

@ -6,55 +6,27 @@ use Hydra::Helper::Exec;
my $ctx = test_context();
subtest "broken constituents expression" => sub {
my $jobsetCtx = $ctx->makeJobset(
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-broken.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
isnt($res, 0, "hydra-eval-jobset exits non-zero");
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
like(
);
isnt($res, 0, "hydra-eval-jobset exits non-zero");
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
like(
$stderr,
qr/aggregate job 'mixed_aggregate' references non-existent job 'constituentA'/,
qr/aggregate job mixed_aggregate failed with the error: "constituentA": does not exist/,
"The stderr record includes a relevant error message"
);
);
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->errormsg,
qr/aggregate job mixed_aggregate failed with the error: constituentA: does not exist/,
qr/aggregate job mixed_aggregate failed with the error: "constituentA": does not exist/,
"The jobset records a relevant error message"
);
};
subtest "no matches" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-no-matches.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
isnt($res, 0, "hydra-eval-jobset exits non-zero");
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
like(
$stderr,
qr/aggregate job 'non_match_aggregate' references constituent glob pattern 'tests\.\*' with no matches/,
"The stderr record includes a relevant error message"
);
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->errormsg,
qr/aggregate job non_match_aggregate failed with the error: tests\.\*: constituent glob pattern had no matches/,
qr/in job non_match_aggregate:\ntests\.\*: constituent glob pattern had no matches/,
"The jobset records a relevant error message"
);
};
);
done_testing;

View File

@ -1,138 +0,0 @@
use strict;
use warnings;
use Setup;
use Test2::V0;
use Hydra::Helper::Exec;
use Data::Dumper;
my $ctx = test_context();
subtest "general glob testing" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-glob.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
is($res, 0, "hydra-eval-jobset exits zero");
my $builds = {};
for my $build ($jobset->builds) {
$builds->{$build->job} = $build;
}
subtest "basic globbing works" => sub {
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
};
subtest "transitivity is OK" => sub {
ok(defined $builds->{"indirect_aggregate"}, "'indirect_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"indirect_aggregate"}->constituents->all;
is(1, scalar @constituents, "'indirect_aggregate' has one constituent");
is($constituents[0]->nixname, "direct_aggregate", "'indirect_aggregate' has 'direct_aggregate' as single constituent");
};
};
subtest "* selects all except current aggregate" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-glob-all.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
subtest "no eval errors" => sub {
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
ok(
$stderr !~ "aggregate job ok_aggregate has a constituent .* that doesn't correspond to a Hydra build",
"Catchall wildcard must not select itself as constituent"
);
$jobset->discard_changes; # refresh from DB
is(
$jobset->has_error,
0,
"eval-errors non-empty"
);
};
my $builds = {};
for my $build ($jobset->builds) {
$builds->{$build->job} = $build;
}
subtest "two constituents" => sub {
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
};
};
subtest "trivial cycle check" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-cycle.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
ok(
$stderr =~ "Found dependency cycle between jobs 'indirect_aggregate' and 'ok_aggregate'",
"Dependency cycle error is on stderr"
);
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->errormsg,
qr/Dependency cycle: indirect_aggregate <-> ok_aggregate/,
"eval-errors non-empty"
);
is(0, $jobset->builds->count, "No builds should be scheduled");
};
subtest "cycle check with globbing" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-cycle-glob.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->errormsg,
qr/aggregate job indirect_aggregate failed with the error: Dependency cycle: indirect_aggregate <-> packages.constituentA/,
"packages.constituentA error missing"
);
# on this branch of Hydra, hydra-eval-jobset fails hard if an aggregate
# job is broken.
is(0, $jobset->builds->count, "Zero jobs are scheduled");
};
done_testing;

View File

@ -1,14 +0,0 @@
rec {
path = "/nix/store/l9mg93sgx50y88p5rr6x1vib6j1rjsds-coreutils-9.1/bin";
mkDerivation = args:
derivation ({
system = builtins.currentSystem;
PATH = path;
} // args);
mkContentAddressedDerivation = args: mkDerivation ({
__contentAddressed = true;
outputHashMode = "recursive";
outputHashAlgo = "sha256";
} // args);
}

View File

@ -1,34 +0,0 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [ "*_aggregate" ];
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"packages.*"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -1,21 +0,0 @@
with import ./config.nix;
{
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"indirect_aggregate"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -1,22 +0,0 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"*"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -1,31 +0,0 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"packages.*"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -1,20 +0,0 @@
with import ./config.nix;
{
non_match_aggregate = mkDerivation {
name = "mixed_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"tests.*"
];
builder = ./empty-dir-builder.sh;
};
# Without a second job no jobset is attempted to be created
# (the only job would be broken)
# and thus the constituent validation is never reached.
dummy = mkDerivation {
name = "dummy";
builder = ./empty-dir-builder.sh;
};
}

View File

@ -1,24 +0,0 @@
{
"enabled": 1,
"hidden": false,
"description": "declarative-jobset-example",
"nixexprinput": "src",
"nixexprpath": "declarative/generator.nix",
"checkinterval": 300,
"schedulingshares": 100,
"enableemail": false,
"emailoverride": "",
"keepnr": 3,
"inputs": {
"src": {
"type": "path",
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
"emailresponsible": false
},
"jobspath": {
"type": "string",
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
"emailresponsible": false
}
}
}

View File

@ -14,7 +14,7 @@ our @EXPORT = qw(
sub evalSucceeds {
my ($jobset) = @_;
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
$jobset->discard_changes; # refresh from DB
if ($res) {
chomp $stdout; chomp $stderr;
utf8::decode($stdout) or die "Invalid unicode in stdout.";
@ -29,7 +29,7 @@ sub evalSucceeds {
sub evalFails {
my ($jobset) = @_;
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-eval-jobset", $jobset->project->name, $jobset->name));
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
$jobset->discard_changes; # refresh from DB
if (!$res) {
chomp $stdout; chomp $stderr;
utf8::decode($stdout) or die "Invalid unicode in stdout.";

View File

@ -22,11 +22,11 @@ is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/broken-constituent.nix
like(
$jobset->errormsg,
qr/^does-not-exist: does not exist$/m,
qr/^"does-not-exist": does not exist$/m,
"Evaluating jobs/broken-constituent.nix should log an error for does-not-exist");
like(
$jobset->errormsg,
qr/^does-not-evaluate: error: assertion 'false' failed/m,
qr/^"does-not-evaluate": "error: assertion 'false' failed/m,
"Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate");
done_testing;

View File

@ -13,7 +13,7 @@ my $constituentBuildA = $builds->{"constituentA"};
my $constituentBuildB = $builds->{"constituentB"};
my $eval = $constituentBuildA->jobsetevals->first();
is($eval->evaluationerror->has_error, 0);
is($eval->evaluationerror->errormsg, "");
subtest "Verifying the direct aggregate" => sub {
my $aggBuild = $builds->{"direct_aggregate"};