Merge remote-tracking branch 'origin/master' into add-gitea-pulls
This commit is contained in:
commit
ac47bb8607
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,6 +3,7 @@
|
||||
/src/sql/hydra-postgresql.sql
|
||||
/src/sql/hydra-sqlite.sql
|
||||
/src/sql/tmp.sqlite
|
||||
.hydra-data
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
|
27
README.md
27
README.md
@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
|
||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||
|
||||
```
|
||||
$ nix-build
|
||||
$ nix build
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix-shell
|
||||
$ autoreconfPhase
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
$ nix develop
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
### Executing Hydra During Development
|
||||
@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ nix develop
|
||||
$ # hack hack
|
||||
$ make
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
|
||||
Then, you can run the tests and the perlcritic linter together with:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
$ make check
|
||||
$ nix develop
|
||||
$ ninja -C build test
|
||||
```
|
||||
|
||||
You can run a single test with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ yath test ./t/foo/bar.t
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||
```
|
||||
|
||||
And you can run just perlcritic with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ make perlcritic
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test perlcritic
|
||||
```
|
||||
|
||||
### JSON API
|
||||
|
@ -11,12 +11,6 @@ $ cd hydra
|
||||
To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||
and dependencies can be found:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
```
|
||||
|
||||
of when flakes are enabled:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
```
|
||||
@ -24,15 +18,15 @@ $ nix develop
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ autoreconfPhase
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make -j$(nproc)
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
foreman:
|
||||
|
||||
```console
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
@ -47,18 +41,11 @@ $ ./src/script/hydra-server
|
||||
You can run Hydra's test suite with the following:
|
||||
|
||||
```console
|
||||
[nix-shell]$ make check
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
||||
[nix-shell]$ # or run yath directly:
|
||||
[nix-shell]$ yath test
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
||||
$ meson test
|
||||
# to run as many tests as you have cores:
|
||||
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||
```
|
||||
|
||||
When using `yath` instead of `make check`, ensure you have run `make`
|
||||
in the root of the repository at least once.
|
||||
|
||||
**Warning**: Currently, the tests can fail
|
||||
if run with high parallelism [due to an issue in
|
||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||
@ -75,7 +62,7 @@ will reload the page every time you save.
|
||||
To build Hydra and its dependencies:
|
||||
|
||||
```console
|
||||
$ nix-build release.nix -A build.x86_64-linux
|
||||
$ nix build .#packages.x86_64-linux.default
|
||||
```
|
||||
|
||||
## Development Tasks
|
||||
|
6
flake.lock
generated
6
flake.lock
generated
@ -29,11 +29,11 @@
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1739500569,
|
||||
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
|
||||
"lastModified": 1743008255,
|
||||
"narHash": "sha256-Lo4KFBNcY8tmBuCmEr2XV0IUZtxXHmbXPNLkov/QSU0=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "4b392b284877d203ae262e16af269f702df036bc",
|
||||
"rev": "f7418fc1fa45b96d37baa95ff3c016dd5be3876b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -15,7 +15,6 @@
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_12;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
|
@ -468,7 +468,7 @@ in
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compression="zstd --rm"
|
||||
fi
|
||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
};
|
||||
|
@ -241,7 +241,7 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
|
@ -9,10 +9,13 @@
|
||||
#include "path.hh"
|
||||
#include "legacy-ssh-store.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "state.hh"
|
||||
#include "current-process.hh"
|
||||
#include "processes.hh"
|
||||
#include "util.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "ssh.hh"
|
||||
#include "finally.hh"
|
||||
#include "url.hh"
|
||||
@ -36,6 +39,38 @@ bool ::Machine::isLocalhost() const
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||
if (remoteStore != machine->storeUri.params.end()) {
|
||||
command.push_back("--store");
|
||||
command.push_back(shellEscape(remoteStore->second));
|
||||
}
|
||||
}
|
||||
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
@ -52,8 +87,8 @@ static void copyClosureTo(
|
||||
// FIXME: substitute output pollutes our build log
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = conn.store->queryValidPaths(
|
||||
closure, true, useSubstitutes);
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
@ -68,7 +103,12 @@ static void copyClosureTo(
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
conn.store->addMultipleToStoreLegacy(destStore, missing);
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
@ -188,7 +228,7 @@ static BuildResult performBuild(
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
auto kont = conn.store->buildDerivationAsync(drvPath, drv, options);
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
@ -197,10 +237,7 @@ static BuildResult performBuild(
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = kont();
|
||||
// Without proper call-once functions, we need to manually
|
||||
// delete after calling.
|
||||
kont = {};
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
@ -216,7 +253,7 @@ static BuildResult performBuild(
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6)
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
@ -249,25 +286,26 @@ static void copyPathFromRemote(
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.store->narFromPath(info.path, [&](Source & source) {
|
||||
TeeSource tee{source, sink};
|
||||
extractNarData(tee, conn.store->printStorePath(info.path), narMembers);
|
||||
});
|
||||
});
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
@ -366,39 +404,30 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
::Machine::Connection conn {
|
||||
.machine = machine,
|
||||
.store = [&]{
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&machine->storeUri.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
auto storeRef = machine->completeStoreReference();
|
||||
|
||||
auto remoteStore = machine->openStore().dynamic_pointer_cast<LegacySSHStore>();
|
||||
assert(remoteStore);
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
|
||||
remoteStore->connPipeSize = 1024 * 1024;
|
||||
|
||||
if (machine->isLocalhost()) {
|
||||
auto rp_new = remoteStore->remoteProgram.get();
|
||||
rp_new.push_back("--builders");
|
||||
rp_new.push_back("");
|
||||
const_cast<nix::Setting<Strings> &>(remoteStore->remoteProgram).assign(rp_new);
|
||||
}
|
||||
remoteStore->extraSshArgs = {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
};
|
||||
const_cast<nix::Setting<int> &>(remoteStore->logFD).assign(logFD.get());
|
||||
|
||||
return nix::ref{remoteStore};
|
||||
}(),
|
||||
LegacySSHStoreConfig storeConfig {
|
||||
pSpecified->scheme,
|
||||
pSpecified->authority,
|
||||
storeRef.params
|
||||
};
|
||||
|
||||
auto master = storeConfig.createSSHMaster(
|
||||
false, // no SSH master yet
|
||||
logFD.get());
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = conn.store->getConnectionPid();
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@ -413,12 +442,35 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
auto stats = conn.store->getConnectionStats();
|
||||
bytesReceived += stats.bytesReceived;
|
||||
bytesSent += stats.bytesSent;
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->storeUri.render());
|
||||
} catch (EndOfFile & e) {
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||
}
|
||||
|
||||
{
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
info->consecutiveFailures = 0;
|
||||
@ -487,7 +539,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
auto infos = conn.store->queryPathInfosUncached(outputs);
|
||||
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||
|
||||
size_t totalNarSize = 0;
|
||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||
@ -522,11 +574,9 @@ void State::buildRemote(ref<Store> destStore,
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection done by RAII.
|
||||
|
||||
Only difference is kill() instead of wait() (i.e. send signal
|
||||
then wait())
|
||||
*/
|
||||
/* Shut down the connection. */
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
|
@ -20,7 +20,9 @@
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include "nar-extractor.hh"
|
||||
#include "legacy-ssh-store.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "serve-protocol-connection.hh"
|
||||
#include "machines.hh"
|
||||
|
||||
|
||||
@ -290,11 +292,9 @@ struct Machine : nix::Machine
|
||||
bool isLocalhost() const;
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection {
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
// Opened store
|
||||
nix::ref<nix::LegacySSHStore> store;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -6,6 +6,7 @@ use base 'Catalyst::View::TT';
|
||||
use Template::Plugin::HTML;
|
||||
use Hydra::Helper::Nix;
|
||||
use Time::Seconds;
|
||||
use Digest::SHA qw(sha1_hex);
|
||||
|
||||
__PACKAGE__->config(
|
||||
TEMPLATE_EXTENSION => '.tt',
|
||||
@ -25,8 +26,14 @@ __PACKAGE__->config(
|
||||
makeNameTextForJobset
|
||||
relativeDuration
|
||||
stripSSHUser
|
||||
metricDivId
|
||||
/]);
|
||||
|
||||
sub metricDivId {
|
||||
my ($self, $c, $text) = @_;
|
||||
return "metric-" . sha1_hex($text);
|
||||
}
|
||||
|
||||
sub buildLogExists {
|
||||
my ($self, $c, $build) = @_;
|
||||
return 1 if defined $c->config->{log_prefix};
|
||||
|
@ -18,8 +18,7 @@
|
||||
|
||||
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
|
||||
|
||||
[% id = "metric-" _ metric.name;
|
||||
id = id.replace('\.', '_');
|
||||
[% id = metricDivId(metric.name);
|
||||
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
|
||||
|
||||
[% END %]
|
||||
|
@ -773,6 +773,9 @@ sub checkJobsetWrapped {
|
||||
my $jobsetChanged = 0;
|
||||
my %buildMap;
|
||||
|
||||
my @jobs;
|
||||
push @jobs, $_ while defined($_ = $jobsIter->());
|
||||
|
||||
$db->txn_do(sub {
|
||||
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
||||
|
||||
@ -796,7 +799,7 @@ sub checkJobsetWrapped {
|
||||
|
||||
my @jobsWithConstituents;
|
||||
|
||||
while (defined(my $job = $jobsIter->())) {
|
||||
foreach my $job (@jobs) {
|
||||
if ($jobsetsJobset) {
|
||||
die "The .jobsets jobset must only have a single job named 'jobsets'"
|
||||
unless $job->{attr} eq "jobsets";
|
||||
|
@ -6,27 +6,55 @@ use Hydra::Helper::Exec;
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-broken.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
subtest "broken constituents expression" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-broken.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
like(
|
||||
$stderr,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
||||
"The stderr record includes a relevant error message"
|
||||
);
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
like(
|
||||
$stderr,
|
||||
qr/aggregate job 'mixed_aggregate' references non-existent job 'constituentA'/,
|
||||
"The stderr record includes a relevant error message"
|
||||
);
|
||||
|
||||
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
||||
"The jobset records a relevant error message"
|
||||
);
|
||||
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/,
|
||||
"The jobset records a relevant error message"
|
||||
);
|
||||
};
|
||||
|
||||
subtest "no matches" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-no-matches.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
like(
|
||||
$stderr,
|
||||
qr/aggregate job 'non_match_aggregate' references constituent glob pattern 'tests\.\*' with no matches/,
|
||||
"The stderr record includes a relevant error message"
|
||||
);
|
||||
|
||||
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/aggregate job ‘non_match_aggregate’ failed with the error: tests\.\*: constituent glob pattern had no matches/,
|
||||
qr/in job ‘non_match_aggregate’:\ntests\.\*: constituent glob pattern had no matches/,
|
||||
"The jobset records a relevant error message"
|
||||
);
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
138
t/evaluator/evaluate-constituents-globbing.t
Normal file
138
t/evaluator/evaluate-constituents-globbing.t
Normal file
@ -0,0 +1,138 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
use Test2::V0;
|
||||
use Hydra::Helper::Exec;
|
||||
use Data::Dumper;
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
subtest "general glob testing" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-glob.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
is($res, 0, "hydra-eval-jobset exits zero");
|
||||
|
||||
my $builds = {};
|
||||
for my $build ($jobset->builds) {
|
||||
$builds->{$build->job} = $build;
|
||||
}
|
||||
|
||||
subtest "basic globbing works" => sub {
|
||||
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
|
||||
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
|
||||
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
|
||||
|
||||
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
|
||||
|
||||
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
|
||||
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
|
||||
};
|
||||
|
||||
subtest "transitivity is OK" => sub {
|
||||
ok(defined $builds->{"indirect_aggregate"}, "'indirect_aggregate' is part of the jobset evaluation");
|
||||
my @constituents = $builds->{"indirect_aggregate"}->constituents->all;
|
||||
is(1, scalar @constituents, "'indirect_aggregate' has one constituent");
|
||||
is($constituents[0]->nixname, "direct_aggregate", "'indirect_aggregate' has 'direct_aggregate' as single constituent");
|
||||
};
|
||||
};
|
||||
|
||||
subtest "* selects all except current aggregate" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-glob-all.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
|
||||
subtest "no eval errors" => sub {
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
ok(
|
||||
$stderr !~ "aggregate job ‘ok_aggregate’ has a constituent .* that doesn't correspond to a Hydra build",
|
||||
"Catchall wildcard must not select itself as constituent"
|
||||
);
|
||||
|
||||
$jobset->discard_changes; # refresh from DB
|
||||
is(
|
||||
$jobset->errormsg,
|
||||
"",
|
||||
"eval-errors non-empty"
|
||||
);
|
||||
};
|
||||
|
||||
my $builds = {};
|
||||
for my $build ($jobset->builds) {
|
||||
$builds->{$build->job} = $build;
|
||||
}
|
||||
|
||||
subtest "two constituents" => sub {
|
||||
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
|
||||
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
|
||||
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
|
||||
|
||||
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
|
||||
|
||||
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
|
||||
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
|
||||
};
|
||||
};
|
||||
|
||||
subtest "trivial cycle check" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-cycle.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
|
||||
ok(
|
||||
$stderr =~ "Found dependency cycle between jobs 'indirect_aggregate' and 'ok_aggregate'",
|
||||
"Dependency cycle error is on stderr"
|
||||
);
|
||||
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
|
||||
$jobset->discard_changes; # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/Dependency cycle: indirect_aggregate <-> ok_aggregate/,
|
||||
"eval-errors non-empty"
|
||||
);
|
||||
|
||||
is(0, $jobset->builds->count, "No builds should be scheduled");
|
||||
};
|
||||
|
||||
subtest "cycle check with globbing" => sub {
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => 'constituents-cycle-glob.nix',
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
|
||||
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
|
||||
);
|
||||
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
|
||||
$jobset->discard_changes; # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/aggregate job ‘indirect_aggregate’ failed with the error: Dependency cycle: indirect_aggregate <-> packages.constituentA/,
|
||||
"packages.constituentA error missing"
|
||||
);
|
||||
|
||||
# on this branch of Hydra, hydra-eval-jobset fails hard if an aggregate
|
||||
# job is broken.
|
||||
is(0, $jobset->builds->count, "Zero jobs are scheduled");
|
||||
};
|
||||
|
||||
done_testing;
|
14
t/jobs/config.nix
Normal file
14
t/jobs/config.nix
Normal file
@ -0,0 +1,14 @@
|
||||
rec {
|
||||
path = "/nix/store/l9mg93sgx50y88p5rr6x1vib6j1rjsds-coreutils-9.1/bin";
|
||||
|
||||
mkDerivation = args:
|
||||
derivation ({
|
||||
system = builtins.currentSystem;
|
||||
PATH = path;
|
||||
} // args);
|
||||
mkContentAddressedDerivation = args: mkDerivation ({
|
||||
__contentAddressed = true;
|
||||
outputHashMode = "recursive";
|
||||
outputHashAlgo = "sha256";
|
||||
} // args);
|
||||
}
|
34
t/jobs/constituents-cycle-glob.nix
Normal file
34
t/jobs/constituents-cycle-glob.nix
Normal file
@ -0,0 +1,34 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
packages.constituentA = mkDerivation {
|
||||
name = "empty-dir-A";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [ "*_aggregate" ];
|
||||
};
|
||||
|
||||
packages.constituentB = mkDerivation {
|
||||
name = "empty-dir-B";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
ok_aggregate = mkDerivation {
|
||||
name = "direct_aggregate";
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [
|
||||
"packages.*"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
indirect_aggregate = mkDerivation {
|
||||
name = "indirect_aggregate";
|
||||
_hydraAggregate = true;
|
||||
constituents = [
|
||||
"ok_aggregate"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
}
|
21
t/jobs/constituents-cycle.nix
Normal file
21
t/jobs/constituents-cycle.nix
Normal file
@ -0,0 +1,21 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
ok_aggregate = mkDerivation {
|
||||
name = "direct_aggregate";
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [
|
||||
"indirect_aggregate"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
indirect_aggregate = mkDerivation {
|
||||
name = "indirect_aggregate";
|
||||
_hydraAggregate = true;
|
||||
constituents = [
|
||||
"ok_aggregate"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
}
|
22
t/jobs/constituents-glob-all.nix
Normal file
22
t/jobs/constituents-glob-all.nix
Normal file
@ -0,0 +1,22 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
packages.constituentA = mkDerivation {
|
||||
name = "empty-dir-A";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
packages.constituentB = mkDerivation {
|
||||
name = "empty-dir-B";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
ok_aggregate = mkDerivation {
|
||||
name = "direct_aggregate";
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [
|
||||
"*"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
}
|
31
t/jobs/constituents-glob.nix
Normal file
31
t/jobs/constituents-glob.nix
Normal file
@ -0,0 +1,31 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
packages.constituentA = mkDerivation {
|
||||
name = "empty-dir-A";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
packages.constituentB = mkDerivation {
|
||||
name = "empty-dir-B";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
ok_aggregate = mkDerivation {
|
||||
name = "direct_aggregate";
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [
|
||||
"packages.*"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
indirect_aggregate = mkDerivation {
|
||||
name = "indirect_aggregate";
|
||||
_hydraAggregate = true;
|
||||
constituents = [
|
||||
"ok_aggregate"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
}
|
20
t/jobs/constituents-no-matches.nix
Normal file
20
t/jobs/constituents-no-matches.nix
Normal file
@ -0,0 +1,20 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
non_match_aggregate = mkDerivation {
|
||||
name = "mixed_aggregate";
|
||||
_hydraAggregate = true;
|
||||
_hydraGlobConstituents = true;
|
||||
constituents = [
|
||||
"tests.*"
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
# Without a second job no jobset is attempted to be created
|
||||
# (the only job would be broken)
|
||||
# and thus the constituent validation is never reached.
|
||||
dummy = mkDerivation {
|
||||
name = "dummy";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
}
|
24
t/jobs/declarative/project.json
Normal file
24
t/jobs/declarative/project.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "declarative-jobset-example",
|
||||
"nixexprinput": "src",
|
||||
"nixexprpath": "declarative/generator.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"src": {
|
||||
"type": "path",
|
||||
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
|
||||
"emailresponsible": false
|
||||
},
|
||||
"jobspath": {
|
||||
"type": "string",
|
||||
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
}
|
@ -22,11 +22,11 @@ is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/broken-constituent.nix
|
||||
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/^"does-not-exist": does not exist$/m,
|
||||
qr/^does-not-exist: does not exist$/m,
|
||||
"Evaluating jobs/broken-constituent.nix should log an error for does-not-exist");
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/^"does-not-evaluate": "error: assertion 'false' failed/m,
|
||||
qr/^does-not-evaluate: error: assertion 'false' failed/m,
|
||||
"Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate");
|
||||
|
||||
done_testing;
|
||||
|
Loading…
x
Reference in New Issue
Block a user