merge/upstream #2

Open
ahuston-0 wants to merge 21 commits from merge/upstream into add-gitea-pulls
26 changed files with 819 additions and 144 deletions

1
.gitignore vendored
View File

@ -3,6 +3,7 @@
/src/sql/hydra-postgresql.sql /src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql /src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite /src/sql/tmp.sqlite
.hydra-data
result result
result-* result-*
outputs outputs

View File

@ -72,17 +72,16 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix): You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
``` ```
$ nix-build $ nix build
``` ```
### Development Environment ### Development Environment
You can use the provided shell.nix to get a working development environment: You can use the provided shell.nix to get a working development environment:
``` ```
$ nix-shell $ nix develop
$ autoreconfPhase $ mesonConfigurePhase
$ configurePhase # NOTE: not ./configure $ ninja
$ make
``` ```
### Executing Hydra During Development ### Executing Hydra During Development
@ -91,9 +90,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
can be done using [foreman](https://github.com/ddollar/foreman): can be done using [foreman](https://github.com/ddollar/foreman):
``` ```
$ nix-shell $ nix develop
$ # hack hack $ # hack hack
$ make $ ninja -C build
$ foreman start $ foreman start
``` ```
@ -115,22 +114,24 @@ Start by following the steps in [Development Environment](#development-environme
Then, you can run the tests and the perlcritic linter together with: Then, you can run the tests and the perlcritic linter together with:
```console ```console
$ nix-shell $ nix develop
$ make check $ ninja -C build test
``` ```
You can run a single test with: You can run a single test with:
``` ```
$ nix-shell $ nix develop
$ yath test ./t/foo/bar.t $ cd build
$ meson test --test-args=../t/Hydra/Event.t testsuite
``` ```
And you can run just perlcritic with: And you can run just perlcritic with:
``` ```
$ nix-shell $ nix develop
$ make perlcritic $ cd build
$ meson test perlcritic
``` ```
### JSON API ### JSON API

View File

@ -11,12 +11,6 @@ $ cd hydra
To enter a shell in which all environment variables (such as `PERL5LIB`) To enter a shell in which all environment variables (such as `PERL5LIB`)
and dependencies can be found: and dependencies can be found:
```console
$ nix-shell
```
of when flakes are enabled:
```console ```console
$ nix develop $ nix develop
``` ```
@ -24,15 +18,15 @@ $ nix develop
To build Hydra, you should then do: To build Hydra, you should then do:
```console ```console
[nix-shell]$ autoreconfPhase $ mesonConfigurePhase
[nix-shell]$ configurePhase $ ninja
[nix-shell]$ make -j$(nproc)
``` ```
You start a local database, the webserver, and other components with You start a local database, the webserver, and other components with
foreman: foreman:
```console ```console
$ ninja -C build
$ foreman start $ foreman start
``` ```
@ -47,18 +41,11 @@ $ ./src/script/hydra-server
You can run Hydra's test suite with the following: You can run Hydra's test suite with the following:
```console ```console
[nix-shell]$ make check $ meson test
[nix-shell]$ # to run as many tests as you have cores: # to run as many tests as you have cores:
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES $ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
[nix-shell]$ # or run yath directly:
[nix-shell]$ yath test
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ yath test -j $NIX_BUILD_CORES
``` ```
When using `yath` instead of `make check`, ensure you have run `make`
in the root of the repository at least once.
**Warning**: Currently, the tests can fail **Warning**: Currently, the tests can fail
if run with high parallelism [due to an issue in if run with high parallelism [due to an issue in
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40) `Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
@ -75,7 +62,7 @@ will reload the page every time you save.
To build Hydra and its dependencies: To build Hydra and its dependencies:
```console ```console
$ nix-build release.nix -A build.x86_64-linux $ nix build .#packages.x86_64-linux.default
``` ```
## Development Tasks ## Development Tasks

View File

@ -92,6 +92,23 @@ Sets Gitea CI status
- `gitea_authorization.<repo-owner>` - `gitea_authorization.<repo-owner>`
## Gitea pulls
Create jobs based on open Gitea pull requests
### Configuration options
- `gitea_authorization.<repo-owner>`
## Gitea refs
Hydra plugin for retrieving the list of references (branches or tags) from
Gitea following a certain naming scheme.
### Configuration options
- `gitea_authorization.<repo-owner>`
## GitHub pulls ## GitHub pulls
Create jobs based on open GitHub pull requests Create jobs based on open GitHub pull requests

6
flake.lock generated
View File

@ -29,11 +29,11 @@
"nix-eval-jobs": { "nix-eval-jobs": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1739500569, "lastModified": 1743008255,
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=", "narHash": "sha256-Lo4KFBNcY8tmBuCmEr2XV0IUZtxXHmbXPNLkov/QSU0=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nix-eval-jobs", "repo": "nix-eval-jobs",
"rev": "4b392b284877d203ae262e16af269f702df036bc", "rev": "f7418fc1fa45b96d37baa95ff3c016dd5be3876b",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@ -15,7 +15,6 @@
systemd.services.hydra-send-stats.enable = false; systemd.services.hydra-send-stats.enable = false;
services.postgresql.enable = true; services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_12;
# The following is to work around the following error from hydra-server: # The following is to work around the following error from hydra-server:
# [error] Caught exception in engine "Cannot determine local time zone" # [error] Caught exception in engine "Cannot determine local time zone"

View File

@ -468,7 +468,7 @@ in
elif [[ $compression == zstd ]]; then elif [[ $compression == zstd ]]; then
compression="zstd --rm" compression="zstd --rm"
fi fi
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
''; '';
startAt = "Sun 01:45"; startAt = "Sun 01:45";
}; };

View File

@ -145,10 +145,24 @@ in
git -C /tmp/repo add . git -C /tmp/repo add .
git config --global user.email test@localhost git config --global user.email test@localhost
git config --global user.name test git config --global user.name test
# Create initial commit
git -C /tmp/repo commit -m 'Initial import' git -C /tmp/repo commit -m 'Initial import'
git -C /tmp/repo remote add origin gitea@machine:root/repo git -C /tmp/repo remote add origin gitea@machine:root/repo
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \ export GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no'
git -C /tmp/repo push origin master git -C /tmp/repo push origin master
git -C /tmp/repo log >&2
# Create PR branch
git -C /tmp/repo checkout -b pr
git -C /tmp/repo commit --allow-empty -m 'Additional change'
git -C /tmp/repo push origin pr
git -C /tmp/repo log >&2
# Create release branch
git -C /tmp/repo checkout -b release/release-1.0
git -C /tmp/repo commit --allow-empty -m 'Additional change'
git -C /tmp/repo push origin release/release-1.0
git -C /tmp/repo log >&2 git -C /tmp/repo log >&2
''; '';
@ -185,7 +199,7 @@ in
cat >data.json <<EOF cat >data.json <<EOF
{ {
"description": "Trivial", "description": "Trivial",
"checkinterval": "60", "checkinterval": "20",
"enabled": "1", "enabled": "1",
"visible": "1", "visible": "1",
"keepnr": "1", "keepnr": "1",
@ -199,7 +213,17 @@ in
"gitea_repo_name": {"value": "repo", "type": "string"}, "gitea_repo_name": {"value": "repo", "type": "string"},
"gitea_repo_owner": {"value": "root", "type": "string"}, "gitea_repo_owner": {"value": "root", "type": "string"},
"gitea_status_repo": {"value": "git", "type": "string"}, "gitea_status_repo": {"value": "git", "type": "string"},
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"} "gitea_http_url": {"value": "http://localhost:3001", "type": "string"},
"pulls": {
"type": "giteapulls",
"value": "localhost:3001 root repo http",
"emailresponsible": false
},
"releases": {
"type": "gitea_refs",
"value": "localhost:3001 root repo heads http - release",
"emailresponseible": false
}
} }
} }
EOF EOF
@ -227,15 +251,41 @@ in
}; };
smallDrv = pkgs.writeText "jobset.nix" '' smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation { { pulls, releases, ... }:
name = "trivial";
system = "${system}"; let
builder = "/bin/sh"; genDrv = name: builtins.derivation {
allowSubstitutes = false; inherit name;
preferLocalBuild = true; system = "${system}";
args = ["-c" "echo success > $out; exit 0"]; builder = "/bin/sh";
allowSubstitutes = false;
preferLocalBuild = true;
args = ["-c" "echo success > $out; exit 0"];
}; };
}
prs = builtins.fromJSON (builtins.readFile pulls);
prJobNames = map (n: "pr-''${n}") (builtins.attrNames prs);
prJobset = builtins.listToAttrs (
map (
name: {
inherit name;
value = genDrv name;
}
) prJobNames
);
rels = builtins.fromJSON (builtins.readFile releases);
relJobNames = builtins.attrNames rels;
relJobset = builtins.listToAttrs (
map (
name: {
inherit name;
value = genDrv name;
}
) relJobNames
);
in {
trivial = genDrv "trivial";
} // prJobset // relJobset
''; '';
in in
'' ''
@ -279,18 +329,34 @@ in
+ '| jq .buildstatus | xargs test 0 -eq' + '| jq .buildstatus | xargs test 0 -eq'
) )
machine.sleep(3)
data = machine.succeed( data = machine.succeed(
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" ' 'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show master | head -n1 | awk "{print \\$2}")?sort=leastindex" '
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' " + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'" + f"-H 'Authorization: token ${api_token}'"
) )
response = json.loads(data) response = json.loads(data)
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!" assert len(response) == 2, "Expected exactly two status updates for latest commit (queued, finished)!"
assert response[0]['status'] == "success", "Expected finished status to be success!" assert response[0]['status'] == "success", "Expected finished status to be success!"
assert response[1]['status'] == "pending", "Expected queued status to be pending!" assert response[1]['status'] == "pending", "Expected queued status to be pending!"
# giteapulls test
machine.succeed(
"curl --fail -X POST http://localhost:3001/api/v1/repos/root/repo/pulls "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
+ ' -d \'{"title":"Test PR", "base":"master", "head": "pr"}\'''
)
machine.wait_until_succeeds(
'curl -Lf -s http://localhost:3000/build/2 -H "Accept: application/json" '
+ '| jq .buildstatus | xargs test 0 -eq'
)
machine.shutdown() machine.shutdown()
''; '';
}); });

View File

@ -241,7 +241,7 @@ stdenv.mkDerivation (finalAttrs: {
shellHook = '' shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null pushd $(git rev-parse --show-toplevel) >/dev/null
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/build/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
PERL5LIB=$(pwd)/src/lib:$PERL5LIB PERL5LIB=$(pwd)/src/lib:$PERL5LIB
export HYDRA_HOME="$(pwd)/src/" export HYDRA_HOME="$(pwd)/src/"
mkdir -p .hydra-data mkdir -p .hydra-data

View File

@ -9,10 +9,13 @@
#include "path.hh" #include "path.hh"
#include "legacy-ssh-store.hh" #include "legacy-ssh-store.hh"
#include "serve-protocol.hh" #include "serve-protocol.hh"
#include "serve-protocol-impl.hh"
#include "state.hh" #include "state.hh"
#include "current-process.hh" #include "current-process.hh"
#include "processes.hh" #include "processes.hh"
#include "util.hh" #include "util.hh"
#include "serve-protocol.hh"
#include "serve-protocol-impl.hh"
#include "ssh.hh" #include "ssh.hh"
#include "finally.hh" #include "finally.hh"
#include "url.hh" #include "url.hh"
@ -36,6 +39,38 @@ bool ::Machine::isLocalhost() const
namespace nix::build_remote { namespace nix::build_remote {
static std::unique_ptr<SSHMaster::Connection> openConnection(
::Machine::ptr machine, SSHMaster & master)
{
Strings command = {"nix-store", "--serve", "--write"};
if (machine->isLocalhost()) {
command.push_back("--builders");
command.push_back("");
} else {
auto remoteStore = machine->storeUri.params.find("remote-store");
if (remoteStore != machine->storeUri.params.end()) {
command.push_back("--store");
command.push_back(shellEscape(remoteStore->second));
}
}
auto ret = master.startCommand(std::move(command), {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
});
// XXX: determine the actual max value we can use from /proc.
// FIXME: Should this be upstreamed into `startCommand` in Nix?
int pipesize = 1024 * 1024;
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
return ret;
}
static void copyClosureTo( static void copyClosureTo(
::Machine::Connection & conn, ::Machine::Connection & conn,
Store & destStore, Store & destStore,
@ -52,8 +87,8 @@ static void copyClosureTo(
// FIXME: substitute output pollutes our build log // FIXME: substitute output pollutes our build log
/* Get back the set of paths that are already valid on the remote /* Get back the set of paths that are already valid on the remote
host. */ host. */
auto present = conn.store->queryValidPaths( auto present = conn.queryValidPaths(
closure, true, useSubstitutes); destStore, true, closure, useSubstitutes);
if (present.size() == closure.size()) return; if (present.size() == closure.size()) return;
@ -68,7 +103,12 @@ static void copyClosureTo(
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock, std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
std::chrono::seconds(600)); std::chrono::seconds(600));
conn.store->addMultipleToStoreLegacy(destStore, missing); conn.to << ServeProto::Command::ImportPaths;
destStore.exportPaths(missing, conn.to);
conn.to.flush();
if (readInt(conn.from) != 1)
throw Error("remote machine failed to import closure");
} }
@ -188,7 +228,7 @@ static BuildResult performBuild(
counter & nrStepsBuilding counter & nrStepsBuilding
) )
{ {
auto kont = conn.store->buildDerivationAsync(drvPath, drv, options); conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
BuildResult result; BuildResult result;
@ -197,10 +237,7 @@ static BuildResult performBuild(
startTime = time(0); startTime = time(0);
{ {
MaintainCount<counter> mc(nrStepsBuilding); MaintainCount<counter> mc(nrStepsBuilding);
result = kont(); result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
// Without proper call-once functions, we need to manually
// delete after calling.
kont = {};
} }
stopTime = time(0); stopTime = time(0);
@ -216,7 +253,7 @@ static BuildResult performBuild(
// If the protocol was too old to give us `builtOutputs`, initialize // If the protocol was too old to give us `builtOutputs`, initialize
// it manually by introspecting the derivation. // it manually by introspecting the derivation.
if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6) if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
{ {
// If the remote is too old to handle CA derivations, we cant get this // If the remote is too old to handle CA derivations, we cant get this
// far anyways // far anyways
@ -249,25 +286,26 @@ static void copyPathFromRemote(
const ValidPathInfo & info const ValidPathInfo & info
) )
{ {
/* Receive the NAR from the remote and add it to the /* Receive the NAR from the remote and add it to the
destination store. Meanwhile, extract all the info from the destination store. Meanwhile, extract all the info from the
NAR that getBuildOutput() needs. */ NAR that getBuildOutput() needs. */
auto source2 = sinkToSource([&](Sink & sink) auto source2 = sinkToSource([&](Sink & sink)
{ {
/* Note: we should only send the command to dump the store /* Note: we should only send the command to dump the store
path to the remote if the NAR is actually going to get read path to the remote if the NAR is actually going to get read
by the destination store, which won't happen if this path by the destination store, which won't happen if this path
is already valid on the destination store. Since this is already valid on the destination store. Since this
lambda function only gets executed if someone tries to read lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather from source2, we will send the command from here rather
than outside the lambda. */ than outside the lambda. */
conn.store->narFromPath(info.path, [&](Source & source) { conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
TeeSource tee{source, sink}; conn.to.flush();
extractNarData(tee, conn.store->printStorePath(info.path), narMembers);
});
});
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs); TeeSource tee(conn.from, sink);
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
});
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
} }
static void copyPathsFromRemote( static void copyPathsFromRemote(
@ -366,39 +404,30 @@ void State::buildRemote(ref<Store> destStore,
updateStep(ssConnecting); updateStep(ssConnecting);
// FIXME: rewrite to use Store. auto storeRef = machine->completeStoreReference();
::Machine::Connection conn {
.machine = machine,
.store = [&]{
auto * pSpecified = std::get_if<StoreReference::Specified>(&machine->storeUri.variant);
if (!pSpecified || pSpecified->scheme != "ssh") {
throw Error("Currently, only (legacy-)ssh stores are supported!");
}
auto remoteStore = machine->openStore().dynamic_pointer_cast<LegacySSHStore>(); auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
assert(remoteStore); if (!pSpecified || pSpecified->scheme != "ssh") {
throw Error("Currently, only (legacy-)ssh stores are supported!");
}
remoteStore->connPipeSize = 1024 * 1024; LegacySSHStoreConfig storeConfig {
pSpecified->scheme,
if (machine->isLocalhost()) { pSpecified->authority,
auto rp_new = remoteStore->remoteProgram.get(); storeRef.params
rp_new.push_back("--builders");
rp_new.push_back("");
const_cast<nix::Setting<Strings> &>(remoteStore->remoteProgram).assign(rp_new);
}
remoteStore->extraSshArgs = {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
};
const_cast<nix::Setting<int> &>(remoteStore->logFD).assign(logFD.get());
return nix::ref{remoteStore};
}(),
}; };
auto master = storeConfig.createSSHMaster(
false, // no SSH master yet
logFD.get());
// FIXME: rewrite to use Store.
auto child = build_remote::openConnection(machine, master);
{ {
auto activeStepState(activeStep->state_.lock()); auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) throw Error("step cancelled"); if (activeStepState->cancelled) throw Error("step cancelled");
activeStepState->pid = conn.store->getConnectionPid(); activeStepState->pid = child->sshPid;
} }
Finally clearPid([&]() { Finally clearPid([&]() {
@ -413,12 +442,35 @@ void State::buildRemote(ref<Store> destStore,
process. Meh. */ process. Meh. */
}); });
::Machine::Connection conn {
{
.to = child->in.get(),
.from = child->out.get(),
/* Handshake. */
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
},
/*.machine =*/ machine,
};
Finally updateStats([&]() { Finally updateStats([&]() {
auto stats = conn.store->getConnectionStats(); bytesReceived += conn.from.read;
bytesReceived += stats.bytesReceived; bytesSent += conn.to.written;
bytesSent += stats.bytesSent;
}); });
constexpr ServeProto::Version our_version = 0x206;
try {
conn.remoteVersion = decltype(conn)::handshake(
conn.to,
conn.from,
our_version,
machine->storeUri.render());
} catch (EndOfFile & e) {
child->sshPid.wait();
std::string s = chomp(readFile(result.logFile));
throw Error("cannot connect to %1%: %2%", machine->storeUri.render(), s);
}
{ {
auto info(machine->state->connectInfo.lock()); auto info(machine->state->connectInfo.lock());
info->consecutiveFailures = 0; info->consecutiveFailures = 0;
@ -487,7 +539,7 @@ void State::buildRemote(ref<Store> destStore,
auto now1 = std::chrono::steady_clock::now(); auto now1 = std::chrono::steady_clock::now();
auto infos = conn.store->queryPathInfosUncached(outputs); auto infos = conn.queryPathInfos(*localStore, outputs);
size_t totalNarSize = 0; size_t totalNarSize = 0;
for (auto & [_, info] : infos) totalNarSize += info.narSize; for (auto & [_, info] : infos) totalNarSize += info.narSize;
@ -522,11 +574,9 @@ void State::buildRemote(ref<Store> destStore,
} }
} }
/* Shut down the connection done by RAII. /* Shut down the connection. */
child->in = -1;
Only difference is kill() instead of wait() (i.e. send signal child->sshPid.wait();
then wait())
*/
} catch (Error & e) { } catch (Error & e) {
/* Disable this machine until a certain period of time has /* Disable this machine until a certain period of time has

View File

@ -20,7 +20,9 @@
#include "store-api.hh" #include "store-api.hh"
#include "sync.hh" #include "sync.hh"
#include "nar-extractor.hh" #include "nar-extractor.hh"
#include "legacy-ssh-store.hh" #include "serve-protocol.hh"
#include "serve-protocol-impl.hh"
#include "serve-protocol-connection.hh"
#include "machines.hh" #include "machines.hh"
@ -290,11 +292,9 @@ struct Machine : nix::Machine
bool isLocalhost() const; bool isLocalhost() const;
// A connection to a machine // A connection to a machine
struct Connection { struct Connection : nix::ServeProto::BasicClientConnection {
// Backpointer to the machine // Backpointer to the machine
ptr machine; ptr machine;
// Opened store
nix::ref<nix::LegacySSHStore> store;
}; };
}; };

View File

@ -0,0 +1,84 @@
# Allow building based on Gitea pull requests.
#
# Example input:
# "pulls": {
# "type": "giteapulls",
# "value": "example.com alice repo"
# "emailresponsible": false
# }
package Hydra::Plugin::GiteaPulls;
use strict;
use warnings;
use parent 'Hydra::Plugin';
use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use File::Temp;
use POSIX qw(strftime);
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
$inputTypes->{'giteapulls'} = 'Open Gitea Pull Requests';
}
sub _iterate {
my ($url, $auth, $pulls, $ua) = @_;
my $req = HTTP::Request->new('GET', $url);
$req->header('Authorization' => 'token ' . $auth) if defined $auth;
my $res = $ua->request($req);
my $content = $res->decoded_content;
die "Error pulling from the gitea pulls API: $content\n"
unless $res->is_success;
my $pulls_list = decode_json $content;
foreach my $pull (@$pulls_list) {
$pulls->{$pull->{number}} = $pull;
}
# TODO Make Link header parsing more robust!!!
my @links = split ',', ($res->header("Link") // "");
my $next = "";
foreach my $link (@links) {
my ($url, $rel) = split ";", $link;
if (trim($rel) eq 'rel="next"') {
$next = substr trim($url), 1, -1;
last;
}
}
_iterate($next, $auth, $pulls, $ua) unless $next eq "";
}
sub fetchInput {
my ($self, $type, $name, $value, $project, $jobset) = @_;
return undef if $type ne "giteapulls";
my ($baseUrl, $owner, $repo, $proto) = split ' ', $value;
if (not defined $proto) { # the protocol handler is exposed as an option in order to do integration testing
$proto = "https"
}
my $auth = $self->{config}->{gitea_authorization}->{$owner};
my $ua = LWP::UserAgent->new();
my %pulls;
_iterate("$proto://$baseUrl/api/v1/repos/$owner/$repo/pulls?limit=100", $auth, \%pulls, $ua);
my $tempdir = File::Temp->newdir("gitea-pulls" . "XXXXX", TMPDIR => 1);
my $filename = "$tempdir/gitea-pulls.json";
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
print $fh encode_json \%pulls;
close $fh;
my $storePath = trim(`nix-store --add "$filename"`
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}
1;

View File

@ -0,0 +1,129 @@
package Hydra::Plugin::GiteaRefs;
use strict;
use warnings;
use parent 'Hydra::Plugin';
use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use File::Temp;
use POSIX qw(strftime);
=head1 NAME
GiteaRefs - Hydra plugin for retrieving the list of references (branches or
tags) from Gitea following a certain naming scheme
=head1 DESCRIPTION
This plugin reads the list of branches or tags using Gitea's REST API. The name
of the reference must follow a particular prefix. This list is stored in the
nix-store and used as an input to declarative jobsets.
=head1 CONFIGURATION
The plugin doesn't require any dedicated configuration block, but it has to
consult C<gitea_authorization> entry for obtaining the API token. In addition,
The declarative project C<spec.json> file must contains an input such as
"pulls": {
"type": "gitea_refs",
"value": "[gitea_hostname] [owner] [repo] heads|tags [scheme] - [prefix]",
"emailresponsible": false
}
In the above snippet, C<[gitea_hostname]> must be set to the hostname of the
repository's Gitea instance.
C<[owner]> is the repository owner and C<[repo]> is the repository name. Also
note a literal C<->, which is placed there for the future use.
C<heads|tags> denotes that one of these two is allowed, that is, the third
position should hold either the C<heads> or the C<tags> keyword. In case of the former, the plugin
will fetch all branches, while in case of the latter, it will fetch the tags.
C<scheme> should be set to either https or http, depending on what the Gitea
host supports.
C<prefix> denotes the prefix the reference name must start with, in order to be
included.
For example, C<"value": "projects.blender.org blender blender heads https - blender-v/"> refers to
L<https://projects.blender.org/blender/blender> repository, and will fetch all branches that
begin with C<blender-v/>.
=head1 USE
The result is stored in the nix-store as a JSON I<map>, where the key is the
name of the reference, while the value is the complete Gitea response. Thus,
any of the values listed in
L<https://docs.gitea.com/api#tag/repository/operation/repoListAllGitRefs> can be
used to build the git input value in C<jobsets.nix>.
=cut
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
$inputTypes->{'gitea_refs'} = 'Open Gitea Refs';
}
sub _iterate {
my ($url, $auth, $refs, $ua) = @_;
my $req = HTTP::Request->new('GET', $url);
$req->header('Accept' => 'application/json');
$req->header('Authorization' => $auth) if defined $auth;
my $res = $ua->request($req);
my $content = $res->decoded_content;
die "Error pulling from the gitea refs API: $content\n"
unless $res->is_success;
my $refs_list = decode_json $content;
# TODO Stream out the json instead
foreach my $ref (@$refs_list) {
my $ref_name = $ref->{ref};
$ref_name =~ s,^refs/(?:heads|tags)/,,o;
$refs->{$ref_name} = $ref;
}
# TODO Make Link header parsing more robust!!!
my @links = split ',', $res->header("Link");
my $next = "";
foreach my $link (@links) {
my ($url, $rel) = split ";", $link;
if (trim($rel) eq 'rel="next"') {
$next = substr trim($url), 1, -1;
last;
}
}
_iterate($next, $auth, $refs, $ua) unless $next eq "";
}
sub fetchInput {
my ($self, $input_type, $name, $value, $project, $jobset) = @_;
return undef if $input_type ne "gitea_refs";
my ($giteaHostname, $owner, $repo, $type, $scheme, $fut, $prefix) = split ' ', $value;
die "type field is neither 'heads' nor 'tags', but '$type'"
unless $type eq 'heads' or $type eq 'tags';
die "scheme field is neither 'https' nor 'http' but '$scheme'"
unless $scheme eq 'https' or $scheme eq 'http';
my $auth = $self->{config}->{gitea_authorization}->{$owner};
my $giteaEndpoint = "$scheme://$giteaHostname";
my %refs;
my $ua = LWP::UserAgent->new();
_iterate("$giteaEndpoint/api/v1/repos/$owner/$repo/git/refs/$type/$prefix?per_page=100", $auth, \%refs, $ua);
my $tempdir = File::Temp->newdir("gitea-refs" . "XXXXX", TMPDIR => 1);
my $filename = "$tempdir/gitea-refs.json";
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
print $fh encode_json \%refs;
close $fh;
system("jq -S . < $filename > $tempdir/gitea-refs-sorted.json");
my $storePath = trim(qx{nix-store --add "$tempdir/gitea-refs-sorted.json"}
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}
1;

View File

@ -6,6 +6,7 @@ use base 'Catalyst::View::TT';
use Template::Plugin::HTML; use Template::Plugin::HTML;
use Hydra::Helper::Nix; use Hydra::Helper::Nix;
use Time::Seconds; use Time::Seconds;
use Digest::SHA qw(sha1_hex);
__PACKAGE__->config( __PACKAGE__->config(
TEMPLATE_EXTENSION => '.tt', TEMPLATE_EXTENSION => '.tt',
@ -25,8 +26,14 @@ __PACKAGE__->config(
makeNameTextForJobset makeNameTextForJobset
relativeDuration relativeDuration
stripSSHUser stripSSHUser
metricDivId
/]); /]);
sub metricDivId {
my ($self, $c, $text) = @_;
return "metric-" . sha1_hex($text);
}
sub buildLogExists { sub buildLogExists {
my ($self, $c, $build) = @_; my ($self, $c, $build) = @_;
return 1 if defined $c->config->{log_prefix}; return 1 if defined $c->config->{log_prefix};

View File

@ -18,8 +18,7 @@
<h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3> <h3>Metric: <a [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]><tt>[%HTML.escape(metric.name)%]</tt></a></h3>
[% id = "metric-" _ metric.name; [% id = metricDivId(metric.name);
id = id.replace('\.', '_');
INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %] INCLUDE createChart dataUrl=c.uri_for('/job' project.name jobset.name job 'metric' metric.name); %]
[% END %] [% END %]

View File

@ -773,6 +773,9 @@ sub checkJobsetWrapped {
my $jobsetChanged = 0; my $jobsetChanged = 0;
my %buildMap; my %buildMap;
my @jobs;
push @jobs, $_ while defined($_ = $jobsIter->());
$db->txn_do(sub { $db->txn_do(sub {
my $prevEval = getPrevJobsetEval($db, $jobset, 1); my $prevEval = getPrevJobsetEval($db, $jobset, 1);
@ -796,7 +799,7 @@ sub checkJobsetWrapped {
my @jobsWithConstituents; my @jobsWithConstituents;
while (defined(my $job = $jobsIter->())) { foreach my $job (@jobs) {
if ($jobsetsJobset) { if ($jobsetsJobset) {
die "The .jobsets jobset must only have a single job named 'jobsets'" die "The .jobsets jobset must only have a single job named 'jobsets'"
unless $job->{attr} eq "jobsets"; unless $job->{attr} eq "jobsets";

View File

@ -6,27 +6,55 @@ use Hydra::Helper::Exec;
my $ctx = test_context(); my $ctx = test_context();
my $jobsetCtx = $ctx->makeJobset( subtest "broken constituents expression" => sub {
expression => 'constituents-broken.nix', my $jobsetCtx = $ctx->makeJobset(
); expression => 'constituents-broken.nix',
my $jobset = $jobsetCtx->{"jobset"}; );
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60, my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name) ("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
); );
isnt($res, 0, "hydra-eval-jobset exits non-zero"); isnt($res, 0, "hydra-eval-jobset exits non-zero");
ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
like( like(
$stderr, $stderr,
qr/aggregate job mixed_aggregate failed with the error: "constituentA": does not exist/, qr/aggregate job 'mixed_aggregate' references non-existent job 'constituentA'/,
"The stderr record includes a relevant error message" "The stderr record includes a relevant error message"
); );
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB $jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like( like(
$jobset->errormsg, $jobset->errormsg,
qr/aggregate job mixed_aggregate failed with the error: "constituentA": does not exist/, qr/aggregate job mixed_aggregate failed with the error: constituentA: does not exist/,
"The jobset records a relevant error message" "The jobset records a relevant error message"
); );
};
subtest "no matches" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-no-matches.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
isnt($res, 0, "hydra-eval-jobset exits non-zero");
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
like(
$stderr,
qr/aggregate job 'non_match_aggregate' references constituent glob pattern 'tests\.\*' with no matches/,
"The stderr record includes a relevant error message"
);
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
like(
$jobset->errormsg,
qr/aggregate job non_match_aggregate failed with the error: tests\.\*: constituent glob pattern had no matches/,
qr/in job non_match_aggregate:\ntests\.\*: constituent glob pattern had no matches/,
"The jobset records a relevant error message"
);
};
done_testing; done_testing;

View File

@ -0,0 +1,138 @@
use strict;
use warnings;
use Setup;
use Test2::V0;
use Hydra::Helper::Exec;
use Data::Dumper;
my $ctx = test_context();
subtest "general glob testing" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-glob.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
is($res, 0, "hydra-eval-jobset exits zero");
my $builds = {};
for my $build ($jobset->builds) {
$builds->{$build->job} = $build;
}
subtest "basic globbing works" => sub {
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
};
subtest "transitivity is OK" => sub {
ok(defined $builds->{"indirect_aggregate"}, "'indirect_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"indirect_aggregate"}->constituents->all;
is(1, scalar @constituents, "'indirect_aggregate' has one constituent");
is($constituents[0]->nixname, "direct_aggregate", "'indirect_aggregate' has 'direct_aggregate' as single constituent");
};
};
subtest "* selects all except current aggregate" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-glob-all.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
subtest "no eval errors" => sub {
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
ok(
$stderr !~ "aggregate job ok_aggregate has a constituent .* that doesn't correspond to a Hydra build",
"Catchall wildcard must not select itself as constituent"
);
$jobset->discard_changes; # refresh from DB
is(
$jobset->errormsg,
"",
"eval-errors non-empty"
);
};
my $builds = {};
for my $build ($jobset->builds) {
$builds->{$build->job} = $build;
}
subtest "two constituents" => sub {
ok(defined $builds->{"ok_aggregate"}, "'ok_aggregate' is part of the jobset evaluation");
my @constituents = $builds->{"ok_aggregate"}->constituents->all;
is(2, scalar @constituents, "'ok_aggregate' has two constituents");
my @sortedConstituentNames = sort (map { $_->nixname } @constituents);
is($sortedConstituentNames[0], "empty-dir-A", "first constituent of 'ok_aggregate' is 'empty-dir-A'");
is($sortedConstituentNames[1], "empty-dir-B", "second constituent of 'ok_aggregate' is 'empty-dir-B'");
};
};
subtest "trivial cycle check" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-cycle.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
ok(
$stderr =~ "Found dependency cycle between jobs 'indirect_aggregate' and 'ok_aggregate'",
"Dependency cycle error is on stderr"
);
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
$jobset->discard_changes; # refresh from DB
like(
$jobset->errormsg,
qr/Dependency cycle: indirect_aggregate <-> ok_aggregate/,
"eval-errors non-empty"
);
is(0, $jobset->builds->count, "No builds should be scheduled");
};
subtest "cycle check with globbing" => sub {
my $jobsetCtx = $ctx->makeJobset(
expression => 'constituents-cycle-glob.nix',
);
my $jobset = $jobsetCtx->{"jobset"};
my ($res, $stdout, $stderr) = captureStdoutStderr(60,
("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name)
);
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
$jobset->discard_changes; # refresh from DB
like(
$jobset->errormsg,
qr/aggregate job indirect_aggregate failed with the error: Dependency cycle: indirect_aggregate <-> packages.constituentA/,
"packages.constituentA error missing"
);
# on this branch of Hydra, hydra-eval-jobset fails hard if an aggregate
# job is broken.
is(0, $jobset->builds->count, "Zero jobs are scheduled");
};
done_testing;

14
t/jobs/config.nix Normal file
View File

@ -0,0 +1,14 @@
rec {
path = "/nix/store/l9mg93sgx50y88p5rr6x1vib6j1rjsds-coreutils-9.1/bin";
mkDerivation = args:
derivation ({
system = builtins.currentSystem;
PATH = path;
} // args);
mkContentAddressedDerivation = args: mkDerivation ({
__contentAddressed = true;
outputHashMode = "recursive";
outputHashAlgo = "sha256";
} // args);
}

View File

@ -0,0 +1,34 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [ "*_aggregate" ];
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"packages.*"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -0,0 +1,21 @@
with import ./config.nix;
{
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"indirect_aggregate"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -0,0 +1,22 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"*"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -0,0 +1,31 @@
with import ./config.nix;
{
packages.constituentA = mkDerivation {
name = "empty-dir-A";
builder = ./empty-dir-builder.sh;
};
packages.constituentB = mkDerivation {
name = "empty-dir-B";
builder = ./empty-dir-builder.sh;
};
ok_aggregate = mkDerivation {
name = "direct_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"packages.*"
];
builder = ./empty-dir-builder.sh;
};
indirect_aggregate = mkDerivation {
name = "indirect_aggregate";
_hydraAggregate = true;
constituents = [
"ok_aggregate"
];
builder = ./empty-dir-builder.sh;
};
}

View File

@ -0,0 +1,20 @@
with import ./config.nix;
{
non_match_aggregate = mkDerivation {
name = "mixed_aggregate";
_hydraAggregate = true;
_hydraGlobConstituents = true;
constituents = [
"tests.*"
];
builder = ./empty-dir-builder.sh;
};
# Without a second job no jobset is attempted to be created
# (the only job would be broken)
# and thus the constituent validation is never reached.
dummy = mkDerivation {
name = "dummy";
builder = ./empty-dir-builder.sh;
};
}

View File

@ -0,0 +1,24 @@
{
"enabled": 1,
"hidden": false,
"description": "declarative-jobset-example",
"nixexprinput": "src",
"nixexprpath": "declarative/generator.nix",
"checkinterval": 300,
"schedulingshares": 100,
"enableemail": false,
"emailoverride": "",
"keepnr": 3,
"inputs": {
"src": {
"type": "path",
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
"emailresponsible": false
},
"jobspath": {
"type": "string",
"value": "/home/ma27/Projects/hydra-cppnix/t/jobs",
"emailresponsible": false
}
}
}

View File

@ -22,11 +22,11 @@ is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/broken-constituent.nix
like( like(
$jobset->errormsg, $jobset->errormsg,
qr/^"does-not-exist": does not exist$/m, qr/^does-not-exist: does not exist$/m,
"Evaluating jobs/broken-constituent.nix should log an error for does-not-exist"); "Evaluating jobs/broken-constituent.nix should log an error for does-not-exist");
like( like(
$jobset->errormsg, $jobset->errormsg,
qr/^"does-not-evaluate": "error: assertion 'false' failed/m, qr/^does-not-evaluate: error: assertion 'false' failed/m,
"Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate"); "Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate");
done_testing; done_testing;