forked from mirrors/nixpkgs
Merge remote-tracking branch 'origin/master' into haskell-updates
This commit is contained in:
commit
7f31ab271c
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -23,7 +23,7 @@ Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-
|
|||
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
|
||||
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review wip"`
|
||||
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
|
||||
- [21.11 Release Notes](./CONTRIBUTING.md#generating-2111-release-notes)
|
||||
- [21.11 Release Notes](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md#generating-2111-release-notes)
|
||||
- [ ] (Package updates) Added a release notes entry if the change is major or breaking
|
||||
- [ ] (Module updates) Added a release notes entry if the change is significant
|
||||
- [ ] (Module addition) Added a release notes entry if adding a new NixOS module
|
||||
|
|
|
@ -183,9 +183,6 @@
|
|||
|
||||
- Arguments should be listed in the order they are used, with the exception of `lib`, which always goes first.
|
||||
|
||||
- The top-level `lib` must be used in the master and 21.05 branch over its alias `stdenv.lib` as it now causes evaluation errors when aliases are disabled which is the case for ofborg.
|
||||
`lib` is unrelated to `stdenv`, and so `stdenv.lib` should only be used as a convenience alias when developing locally to avoid having to modify the function inputs just to test something out.
|
||||
|
||||
## Package naming {#sec-package-naming}
|
||||
|
||||
The key words _must_, _must not_, _required_, _shall_, _shall not_, _should_, _should not_, _recommended_, _may_, and _optional_ in this section are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). Only _emphasized_ words are to be interpreted in this way.
|
||||
|
|
|
@ -7,7 +7,7 @@ You can quickly check your edits with `make`:
|
|||
```ShellSession
|
||||
$ cd /path/to/nixpkgs/doc
|
||||
$ nix-shell
|
||||
[nix-shell]$ make $makeFlags
|
||||
[nix-shell]$ make
|
||||
```
|
||||
|
||||
If you experience problems, run `make debug` to help understand the docbook errors.
|
||||
|
|
|
@ -17,10 +17,6 @@ in pkgs.stdenv.mkDerivation {
|
|||
|
||||
src = lib.cleanSource ./.;
|
||||
|
||||
makeFlags = [
|
||||
"PANDOC_LUA_FILTERS_DIR=${pkgs.pandoc-lua-filters}/share/pandoc/filters"
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
ln -s ${doc-support} ./doc-support/result
|
||||
'';
|
||||
|
@ -37,4 +33,7 @@ in pkgs.stdenv.mkDerivation {
|
|||
echo "doc manual $dest manual.html" >> $out/nix-support/hydra-build-products
|
||||
echo "doc manual $dest nixpkgs-manual.epub" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
# Environment variables
|
||||
PANDOC_LUA_FILTERS_DIR = "${pkgs.pandoc-lua-filters}/share/pandoc/filters";
|
||||
}
|
||||
|
|
|
@ -734,6 +734,12 @@ lib.mapAttrs (n: v: v // { shortName = n; }) ({
|
|||
free = false;
|
||||
};
|
||||
|
||||
stk = {
|
||||
shortName = "stk";
|
||||
fullName = "Synthesis Tool Kit 4.3";
|
||||
url = https://github.com/thestk/stk/blob/master/LICENSE;
|
||||
};
|
||||
|
||||
tcltk = spdx {
|
||||
spdxId = "TCL";
|
||||
fullName = "TCL/TK License";
|
||||
|
|
|
@ -300,6 +300,12 @@
|
|||
githubId = 335271;
|
||||
name = "James Alexander Feldman-Crough";
|
||||
};
|
||||
afontain = {
|
||||
email = "antoine.fontaine@epfl.ch";
|
||||
github = "necessarily-equal";
|
||||
githubId = 59283660;
|
||||
name = "Antoine Fontaine";
|
||||
};
|
||||
aforemny = {
|
||||
email = "aforemny@posteo.de";
|
||||
github = "aforemny";
|
||||
|
@ -7255,6 +7261,16 @@
|
|||
githubId = 10180857;
|
||||
name = "Anmol Sethi";
|
||||
};
|
||||
nicbk = {
|
||||
email = "nicolas@nicbk.com";
|
||||
github = "nicbk";
|
||||
githubId = 77309427;
|
||||
name = "Nicolás Kennedy";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0xC061089EFEBF7A35";
|
||||
fingerprint = "7BC1 77D9 C222 B1DC FB2F 0484 C061 089E FEBF 7A35";
|
||||
}];
|
||||
};
|
||||
nichtsfrei = {
|
||||
email = "philipp.eder@posteo.net";
|
||||
github = "nichtsfrei";
|
||||
|
@ -8425,6 +8441,12 @@
|
|||
githubId = 1891350;
|
||||
name = "Michael Raskin";
|
||||
};
|
||||
ratsclub = {
|
||||
email = "victor@freire.dev.br";
|
||||
github = "ratsclub";
|
||||
githubId = 25647735;
|
||||
name = "Victor Freire";
|
||||
};
|
||||
ravloony = {
|
||||
email = "ravloony@gmail.com";
|
||||
name = "Tom Macdonald";
|
||||
|
@ -8637,6 +8659,12 @@
|
|||
githubId = 449990;
|
||||
name = "Cedric Cellier";
|
||||
};
|
||||
rkitover = {
|
||||
email = "rkitover@gmail.com";
|
||||
github = "rkitover";
|
||||
githubId = 77611;
|
||||
name = "Rafael Kitover";
|
||||
};
|
||||
rkoe = {
|
||||
email = "rk@simple-is-better.org";
|
||||
github = "rkoe";
|
||||
|
|
|
@ -274,8 +274,29 @@ start_all()
|
|||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Execute a shell command, raising an exception if the exit status is not
|
||||
zero, otherwise returning the standard output.
|
||||
Execute a shell command, raising an exception if the exit status
|
||||
is not zero, otherwise returning the standard output. Commands
|
||||
are run with <literal>set -euo pipefail</literal> set:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
If several commands are separated by <literal>;</literal>
|
||||
and one fails, the command as a whole will fail.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
For pipelines, the last non-zero exit status will be
|
||||
returned (if there is one, zero will be returned
|
||||
otherwise).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Dereferencing unset variables fail the command.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
|
|
@ -181,6 +181,15 @@
|
|||
<para>GNOME desktop environment was upgraded to 40, see the release notes for <link xlink:href="https://help.gnome.org/misc/release-notes/40.0/">40.0</link> and <link xlink:href="https://help.gnome.org/misc/release-notes/3.38/">3.38</link>. The <code>gnome3</code> attribute set has been renamed to <code>gnome</code> and so have been the NixOS options.</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Enabling wireless networking now requires specifying at least one network
|
||||
interface using <xref linkend="opt-networking.wireless.interfaces"/>.
|
||||
This is to avoid a race condition with the card initialisation (see
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/101963">issue
|
||||
#101963</link> for more information).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If you are using <option>services.udev.extraRules</option> to assign
|
||||
|
|
|
@ -441,7 +441,7 @@ class Machine:
|
|||
def execute(self, command: str) -> Tuple[int, str]:
|
||||
self.connect()
|
||||
|
||||
out_command = "( {} ); echo '|!=EOF' $?\n".format(command)
|
||||
out_command = "( set -euo pipefail; {} ); echo '|!=EOF' $?\n".format(command)
|
||||
self.shell.send(out_command.encode())
|
||||
|
||||
output = ""
|
||||
|
|
|
@ -6,28 +6,33 @@ let
|
|||
im = config.i18n.inputMethod;
|
||||
cfg = im.fcitx5;
|
||||
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit (cfg) addons; };
|
||||
in
|
||||
{
|
||||
options = {
|
||||
i18n.inputMethod.fcitx5 = {
|
||||
addons = mkOption {
|
||||
type = with types; listOf package;
|
||||
default = [];
|
||||
example = with pkgs; [ fcitx5-rime ];
|
||||
description = ''
|
||||
Enabled Fcitx5 addons.
|
||||
'';
|
||||
};
|
||||
in {
|
||||
options = {
|
||||
i18n.inputMethod.fcitx5 = {
|
||||
addons = mkOption {
|
||||
type = with types; listOf package;
|
||||
default = [];
|
||||
example = with pkgs; [ fcitx5-rime ];
|
||||
description = ''
|
||||
Enabled Fcitx5 addons.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (im.enabled == "fcitx5") {
|
||||
i18n.inputMethod.package = fcitx5Package;
|
||||
config = mkIf (im.enabled == "fcitx5") {
|
||||
i18n.inputMethod.package = fcitx5Package;
|
||||
|
||||
environment.variables = {
|
||||
GTK_IM_MODULE = "fcitx";
|
||||
QT_IM_MODULE = "fcitx";
|
||||
XMODIFIERS = "@im=fcitx";
|
||||
};
|
||||
environment.variables = {
|
||||
GTK_IM_MODULE = "fcitx";
|
||||
QT_IM_MODULE = "fcitx";
|
||||
XMODIFIERS = "@im=fcitx";
|
||||
};
|
||||
}
|
||||
|
||||
systemd.user.services.fcitx5-daemon = {
|
||||
enable = true;
|
||||
script = "${fcitx5Package}/bin/fcitx5";
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -139,6 +139,7 @@
|
|||
./programs/flexoptix-app.nix
|
||||
./programs/freetds.nix
|
||||
./programs/fuse.nix
|
||||
./programs/gamemode.nix
|
||||
./programs/geary.nix
|
||||
./programs/gnome-disks.nix
|
||||
./programs/gnome-documents.nix
|
||||
|
@ -553,6 +554,7 @@
|
|||
./services/misc/siproxd.nix
|
||||
./services/misc/snapper.nix
|
||||
./services/misc/sonarr.nix
|
||||
./services/misc/sourcehut
|
||||
./services/misc/spice-vdagentd.nix
|
||||
./services/misc/ssm-agent.nix
|
||||
./services/misc/sssd.nix
|
||||
|
|
96
nixos/modules/programs/gamemode.nix
Normal file
96
nixos/modules/programs/gamemode.nix
Normal file
|
@ -0,0 +1,96 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.programs.gamemode;
|
||||
settingsFormat = pkgs.formats.ini { };
|
||||
configFile = settingsFormat.generate "gamemode.ini" cfg.settings;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
programs.gamemode = {
|
||||
enable = mkEnableOption "GameMode to optimise system performance on demand";
|
||||
|
||||
enableRenice = mkEnableOption "CAP_SYS_NICE on gamemoded to support lowering process niceness" // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = settingsFormat.type;
|
||||
default = {};
|
||||
description = ''
|
||||
System-wide configuration for GameMode (/etc/gamemode.ini).
|
||||
See gamemoded(8) man page for available settings.
|
||||
'';
|
||||
example = literalExample ''
|
||||
{
|
||||
general = {
|
||||
renice = 10;
|
||||
};
|
||||
|
||||
# Warning: GPU optimisations have the potential to damage hardware
|
||||
gpu = {
|
||||
apply_gpu_optimisations = "accept-responsibility";
|
||||
gpu_device = 0;
|
||||
amd_performance_level = "high";
|
||||
};
|
||||
|
||||
custom = {
|
||||
start = "''${pkgs.libnotify}/bin/notify-send 'GameMode started'";
|
||||
end = "''${pkgs.libnotify}/bin/notify-send 'GameMode ended'";
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment = {
|
||||
systemPackages = [ pkgs.gamemode ];
|
||||
etc."gamemode.ini".source = configFile;
|
||||
};
|
||||
|
||||
security = {
|
||||
polkit.enable = true;
|
||||
wrappers = mkIf cfg.enableRenice {
|
||||
gamemoded = {
|
||||
source = "${pkgs.gamemode}/bin/gamemoded";
|
||||
capabilities = "cap_sys_nice+ep";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
packages = [ pkgs.gamemode ];
|
||||
user.services.gamemoded = {
|
||||
# The upstream service already defines this, but doesn't get applied.
|
||||
# See https://github.com/NixOS/nixpkgs/issues/81138
|
||||
wantedBy = [ "default.target" ];
|
||||
|
||||
# Use pkexec from the security wrappers to allow users to
|
||||
# run libexec/cpugovctl & libexec/gpuclockctl as root with
|
||||
# the the actions defined in share/polkit-1/actions.
|
||||
#
|
||||
# This uses a link farm to make sure other wrapped executables
|
||||
# aren't included in PATH.
|
||||
environment.PATH = mkForce (pkgs.linkFarm "pkexec" [
|
||||
{
|
||||
name = "pkexec";
|
||||
path = "${config.security.wrapperDir}/pkexec";
|
||||
}
|
||||
]);
|
||||
|
||||
serviceConfig.ExecStart = mkIf cfg.enableRenice [
|
||||
"" # Tell systemd to clear the existing ExecStart list, to prevent appending to it.
|
||||
"${config.security.wrapperDir}/gamemoded"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with maintainers; [ kira-bruneau ];
|
||||
};
|
||||
}
|
|
@ -4,7 +4,7 @@ with lib;
|
|||
|
||||
let
|
||||
|
||||
inherit (pkgs) mysql gzip;
|
||||
inherit (pkgs) mariadb gzip;
|
||||
|
||||
cfg = config.services.mysqlBackup;
|
||||
defaultUser = "mysqlbackup";
|
||||
|
@ -20,7 +20,7 @@ let
|
|||
'';
|
||||
backupDatabaseScript = db: ''
|
||||
dest="${cfg.location}/${db}.gz"
|
||||
if ${mysql}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
|
||||
if ${mariadb}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
|
||||
mv $dest.tmp $dest
|
||||
echo "Backed up to $dest"
|
||||
else
|
||||
|
|
|
@ -81,11 +81,14 @@ in
|
|||
# supporting it, or their bundled containerd
|
||||
systemd.enableUnifiedCgroupHierarchy = false;
|
||||
|
||||
environment.systemPackages = [ config.services.k3s.package ];
|
||||
|
||||
systemd.services.k3s = {
|
||||
description = "k3s service";
|
||||
after = [ "network.service" "firewall.service" ] ++ (optional cfg.docker "docker.service");
|
||||
wants = [ "network.service" "firewall.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = optional config.boot.zfs.enabled config.boot.zfs.package;
|
||||
serviceConfig = {
|
||||
# See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
|
||||
Type = if cfg.role == "agent" then "exec" else "notify";
|
||||
|
|
|
@ -34,7 +34,7 @@ in
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
example = literalExample "pkgs.mysql";
|
||||
example = literalExample "pkgs.mariadb";
|
||||
description = "
|
||||
Which MySQL derivation to use. MariaDB packages are supported too.
|
||||
";
|
||||
|
|
|
@ -7,7 +7,7 @@ let
|
|||
fpm = config.services.phpfpm.pools.roundcube;
|
||||
localDB = cfg.database.host == "localhost";
|
||||
user = cfg.database.username;
|
||||
phpWithPspell = pkgs.php.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
|
||||
phpWithPspell = pkgs.php74.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
|
||||
in
|
||||
{
|
||||
options.services.roundcube = {
|
||||
|
|
220
nixos/modules/services/misc/sourcehut/builds.nix
Normal file
220
nixos/modules/services/misc/sourcehut/builds.nix
Normal file
|
@ -0,0 +1,220 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
scfg = cfg.builds;
|
||||
rcfg = config.services.redis;
|
||||
iniKey = "builds.sr.ht";
|
||||
|
||||
drv = pkgs.sourcehut.buildsrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.builds = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "buildsrht";
|
||||
description = ''
|
||||
User for builds.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5002;
|
||||
description = ''
|
||||
Port on which the "builds" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "builds.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for builds.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/buildsrht";
|
||||
description = ''
|
||||
State path for builds.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
enableWorker = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Run workers for builds.sr.ht.
|
||||
Perform manually on machine: `cd ${scfg.statePath}/images; docker build -t qemu -f qemu/Dockerfile .`
|
||||
'';
|
||||
};
|
||||
|
||||
images = mkOption {
|
||||
type = types.attrsOf (types.attrsOf (types.attrsOf types.package));
|
||||
default = { };
|
||||
example = lib.literalExample ''(let
|
||||
# Pinning unstable to allow usage with flakes and limit rebuilds.
|
||||
pkgs_unstable = builtins.fetchGit {
|
||||
url = "https://github.com/NixOS/nixpkgs";
|
||||
rev = "ff96a0fa5635770390b184ae74debea75c3fd534";
|
||||
ref = "nixos-unstable";
|
||||
};
|
||||
image_from_nixpkgs = pkgs_unstable: (import ("${pkgs.sourcehut.buildsrht}/lib/images/nixos/image.nix") {
|
||||
pkgs = (import pkgs_unstable {});
|
||||
});
|
||||
in
|
||||
{
|
||||
nixos.unstable.x86_64 = image_from_nixpkgs pkgs_unstable;
|
||||
}
|
||||
)'';
|
||||
description = ''
|
||||
Images for builds.sr.ht. Each package should be distro.release.arch and point to a /nix/store/package/root.img.qcow2.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = with scfg; let
|
||||
image_dirs = lib.lists.flatten (
|
||||
lib.attrsets.mapAttrsToList
|
||||
(distro: revs:
|
||||
lib.attrsets.mapAttrsToList
|
||||
(rev: archs:
|
||||
lib.attrsets.mapAttrsToList
|
||||
(arch: image:
|
||||
pkgs.runCommandNoCC "buildsrht-images" { } ''
|
||||
mkdir -p $out/${distro}/${rev}/${arch}
|
||||
ln -s ${image}/*.qcow2 $out/${distro}/${rev}/${arch}/root.img.qcow2
|
||||
'')
|
||||
archs)
|
||||
revs)
|
||||
scfg.images);
|
||||
image_dir_pre = pkgs.symlinkJoin {
|
||||
name = "builds.sr.ht-worker-images-pre";
|
||||
paths = image_dirs ++ [
|
||||
"${pkgs.sourcehut.buildsrht}/lib/images"
|
||||
];
|
||||
};
|
||||
image_dir = pkgs.runCommandNoCC "builds.sr.ht-worker-images" { } ''
|
||||
mkdir -p $out/images
|
||||
cp -Lr ${image_dir_pre}/* $out/images
|
||||
'';
|
||||
in
|
||||
lib.mkIf (cfg.enable && elem "builds" cfg.services) {
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
extraGroups = lib.optionals cfg.builds.enableWorker [ "docker" ];
|
||||
description = "builds.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0755 ${user} ${user} -"
|
||||
] ++ (lib.optionals cfg.builds.enableWorker
|
||||
[ "d ${statePath}/logs 0775 ${user} ${user} - -" ]
|
||||
);
|
||||
|
||||
services = {
|
||||
buildsrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey
|
||||
{
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "builds.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
|
||||
# Hack to bypass this hack: https://git.sr.ht/~sircmpwn/core.sr.ht/tree/master/item/srht-update-profiles#L6
|
||||
} // { preStart = " "; };
|
||||
|
||||
buildsrht-worker = {
|
||||
enable = scfg.enableWorker;
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
partOf = [ "buildsrht.service" ];
|
||||
description = "builds.sr.ht worker service";
|
||||
path = [ pkgs.openssh pkgs.docker ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Group = "nginx";
|
||||
Restart = "always";
|
||||
};
|
||||
serviceConfig.ExecStart = "${pkgs.sourcehut.buildsrht}/bin/builds.sr.ht-worker";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL builds.sr.ht is being served at (protocol://domain)
|
||||
"builds.sr.ht".origin = mkDefault "http://builds.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"builds.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"builds.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"builds.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"builds.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# builds.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"builds.sr.ht".oauth-client-id = mkDefault null;
|
||||
"builds.sr.ht".oauth-client-secret = mkDefault null;
|
||||
# The redis connection used for the celery worker
|
||||
"builds.sr.ht".redis = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/3";
|
||||
# The shell used for ssh
|
||||
"builds.sr.ht".shell = mkDefault "runner-shell";
|
||||
# Register the builds.sr.ht dispatcher
|
||||
"git.sr.ht::dispatch".${builtins.unsafeDiscardStringContext "${pkgs.sourcehut.buildsrht}/bin/buildsrht-keys"} = mkDefault "${user}:${user}";
|
||||
|
||||
# Location for build logs, images, and control command
|
||||
} // lib.attrsets.optionalAttrs scfg.enableWorker {
|
||||
# Default worker stores logs that are accessible via this address:port
|
||||
"builds.sr.ht::worker".name = mkDefault "127.0.0.1:5020";
|
||||
"builds.sr.ht::worker".buildlogs = mkDefault "${scfg.statePath}/logs";
|
||||
"builds.sr.ht::worker".images = mkDefault "${image_dir}/images";
|
||||
"builds.sr.ht::worker".controlcmd = mkDefault "${image_dir}/images/control";
|
||||
"builds.sr.ht::worker".timeout = mkDefault "3m";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."logs.${cfg.originBase}" =
|
||||
if scfg.enableWorker then {
|
||||
listen = with builtins; let address = split ":" cfg.settings."builds.sr.ht::worker".name;
|
||||
in [{ addr = elemAt address 0; port = lib.toInt (elemAt address 2); }];
|
||||
locations."/logs".root = "${scfg.statePath}";
|
||||
} else { };
|
||||
|
||||
services.nginx.virtualHosts."builds.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.buildsrht}/${pkgs.sourcehut.python.sitePackages}/buildsrht";
|
||||
};
|
||||
};
|
||||
}
|
198
nixos/modules/services/misc/sourcehut/default.nix
Normal file
198
nixos/modules/services/misc/sourcehut/default.nix
Normal file
|
@ -0,0 +1,198 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
settingsFormat = pkgs.formats.ini { };
|
||||
|
||||
# Specialized python containing all the modules
|
||||
python = pkgs.sourcehut.python.withPackages (ps: with ps; [
|
||||
gunicorn
|
||||
# Sourcehut services
|
||||
srht
|
||||
buildsrht
|
||||
dispatchsrht
|
||||
gitsrht
|
||||
hgsrht
|
||||
hubsrht
|
||||
listssrht
|
||||
mansrht
|
||||
metasrht
|
||||
pastesrht
|
||||
todosrht
|
||||
]);
|
||||
in
|
||||
{
|
||||
imports =
|
||||
[
|
||||
./git.nix
|
||||
./hg.nix
|
||||
./hub.nix
|
||||
./todo.nix
|
||||
./man.nix
|
||||
./meta.nix
|
||||
./paste.nix
|
||||
./builds.nix
|
||||
./lists.nix
|
||||
./dispatch.nix
|
||||
(mkRemovedOptionModule [ "services" "sourcehut" "nginx" "enable" ] ''
|
||||
The sourcehut module supports `nginx` as a local reverse-proxy by default and doesn't
|
||||
support other reverse-proxies officially.
|
||||
|
||||
However it's possible to use an alternative reverse-proxy by
|
||||
|
||||
* disabling nginx
|
||||
* adjusting the relevant settings for server addresses and ports directly
|
||||
|
||||
Further details about this can be found in the `Sourcehut`-section of the NixOS-manual.
|
||||
'')
|
||||
];
|
||||
|
||||
options.services.sourcehut = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable sourcehut - git hosting, continuous integration, mailing list, ticket tracking,
|
||||
task dispatching, wiki and account management services
|
||||
'';
|
||||
};
|
||||
|
||||
services = mkOption {
|
||||
type = types.nonEmptyListOf (types.enum [ "builds" "dispatch" "git" "hub" "hg" "lists" "man" "meta" "paste" "todo" ]);
|
||||
default = [ "man" "meta" "paste" ];
|
||||
example = [ "builds" "dispatch" "git" "hub" "hg" "lists" "man" "meta" "paste" "todo" ];
|
||||
description = ''
|
||||
Services to enable on the sourcehut network.
|
||||
'';
|
||||
};
|
||||
|
||||
originBase = mkOption {
|
||||
type = types.str;
|
||||
default = with config.networking; hostName + lib.optionalString (domain != null) ".${domain}";
|
||||
description = ''
|
||||
Host name used by reverse-proxy and for default settings. Will host services at git."''${originBase}". For example: git.sr.ht
|
||||
'';
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = ''
|
||||
Address to bind to.
|
||||
'';
|
||||
};
|
||||
|
||||
python = mkOption {
|
||||
internal = true;
|
||||
type = types.package;
|
||||
default = python;
|
||||
description = ''
|
||||
The python package to use. It should contain references to the *srht modules and also
|
||||
gunicorn.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/sourcehut";
|
||||
description = ''
|
||||
Root state path for the sourcehut network. If left as the default value
|
||||
this directory will automatically be created before the sourcehut server
|
||||
starts, otherwise the sysadmin is responsible for ensuring the
|
||||
directory exists with appropriate ownership and permissions.
|
||||
'';
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = lib.types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
};
|
||||
default = { };
|
||||
description = ''
|
||||
The configuration for the sourcehut network.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions =
|
||||
[
|
||||
{
|
||||
assertion = with cfgIni.webhooks; private-key != null && stringLength private-key == 44;
|
||||
message = "The webhook's private key must be defined and of a 44 byte length.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = hasAttrByPath [ "meta.sr.ht" "origin" ] cfgIni && cfgIni."meta.sr.ht".origin != null;
|
||||
message = "meta.sr.ht's origin must be defined.";
|
||||
}
|
||||
];
|
||||
|
||||
virtualisation.docker.enable = true;
|
||||
environment.etc."sr.ht/config.ini".source =
|
||||
settingsFormat.generate "sourcehut-config.ini" (mapAttrsRecursive
|
||||
(
|
||||
path: v: if v == null then "" else v
|
||||
)
|
||||
cfg.settings);
|
||||
|
||||
environment.systemPackages = [ pkgs.sourcehut.coresrht ];
|
||||
|
||||
# PostgreSQL server
|
||||
services.postgresql.enable = mkOverride 999 true;
|
||||
# Mail server
|
||||
services.postfix.enable = mkOverride 999 true;
|
||||
# Cron daemon
|
||||
services.cron.enable = mkOverride 999 true;
|
||||
# Redis server
|
||||
services.redis.enable = mkOverride 999 true;
|
||||
services.redis.bind = mkOverride 999 "127.0.0.1";
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# The name of your network of sr.ht-based sites
|
||||
"sr.ht".site-name = mkDefault "sourcehut";
|
||||
# The top-level info page for your site
|
||||
"sr.ht".site-info = mkDefault "https://sourcehut.org";
|
||||
# {{ site-name }}, {{ site-blurb }}
|
||||
"sr.ht".site-blurb = mkDefault "the hacker's forge";
|
||||
# If this != production, we add a banner to each page
|
||||
"sr.ht".environment = mkDefault "development";
|
||||
# Contact information for the site owners
|
||||
"sr.ht".owner-name = mkDefault "Drew DeVault";
|
||||
"sr.ht".owner-email = mkDefault "sir@cmpwn.com";
|
||||
# The source code for your fork of sr.ht
|
||||
"sr.ht".source-url = mkDefault "https://git.sr.ht/~sircmpwn/srht";
|
||||
# A secret key to encrypt session cookies with
|
||||
"sr.ht".secret-key = mkDefault null;
|
||||
"sr.ht".global-domain = mkDefault null;
|
||||
|
||||
# Outgoing SMTP settings
|
||||
mail.smtp-host = mkDefault null;
|
||||
mail.smtp-port = mkDefault null;
|
||||
mail.smtp-user = mkDefault null;
|
||||
mail.smtp-password = mkDefault null;
|
||||
mail.smtp-from = mkDefault null;
|
||||
# Application exceptions are emailed to this address
|
||||
mail.error-to = mkDefault null;
|
||||
mail.error-from = mkDefault null;
|
||||
# Your PGP key information (DO NOT mix up pub and priv here)
|
||||
# You must remove the password from your secret key, if present.
|
||||
# You can do this with gpg --edit-key [key-id], then use the passwd
|
||||
# command and do not enter a new password.
|
||||
mail.pgp-privkey = mkDefault null;
|
||||
mail.pgp-pubkey = mkDefault null;
|
||||
mail.pgp-key-id = mkDefault null;
|
||||
|
||||
# base64-encoded Ed25519 key for signing webhook payloads. This should be
|
||||
# consistent for all *.sr.ht sites, as we'll use this key to verify signatures
|
||||
# from other sites in your network.
|
||||
#
|
||||
# Use the srht-webhook-keygen command to generate a key.
|
||||
webhooks.private-key = mkDefault null;
|
||||
};
|
||||
};
|
||||
meta.doc = ./sourcehut.xml;
|
||||
meta.maintainers = with maintainers; [ tomberek ];
|
||||
}
|
125
nixos/modules/services/misc/sourcehut/dispatch.nix
Normal file
125
nixos/modules/services/misc/sourcehut/dispatch.nix
Normal file
|
@ -0,0 +1,125 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.dispatch;
|
||||
iniKey = "dispatch.sr.ht";
|
||||
|
||||
drv = pkgs.sourcehut.dispatchsrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.dispatch = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "dispatchsrht";
|
||||
description = ''
|
||||
User for dispatch.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5005;
|
||||
description = ''
|
||||
Port on which the "dispatch" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "dispatch.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for dispatch.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/dispatchsrht";
|
||||
description = ''
|
||||
State path for dispatch.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "dispatch" cfg.services) {
|
||||
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
description = "dispatch.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services.dispatchsrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "dispatch.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL dispatch.sr.ht is being served at (protocol://domain)
|
||||
"dispatch.sr.ht".origin = mkDefault "http://dispatch.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"dispatch.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"dispatch.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"dispatch.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"dispatch.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# dispatch.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"dispatch.sr.ht".oauth-client-id = mkDefault null;
|
||||
"dispatch.sr.ht".oauth-client-secret = mkDefault null;
|
||||
|
||||
# Github Integration
|
||||
"dispatch.sr.ht::github".oauth-client-id = mkDefault null;
|
||||
"dispatch.sr.ht::github".oauth-client-secret = mkDefault null;
|
||||
|
||||
# Gitlab Integration
|
||||
"dispatch.sr.ht::gitlab".enabled = mkDefault null;
|
||||
"dispatch.sr.ht::gitlab".canonical-upstream = mkDefault "gitlab.com";
|
||||
"dispatch.sr.ht::gitlab".repo-cache = mkDefault "./repo-cache";
|
||||
# "dispatch.sr.ht::gitlab"."gitlab.com" = mkDefault "GitLab:application id:secret";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."dispatch.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.dispatchsrht}/${pkgs.sourcehut.python.sitePackages}/dispatchsrht";
|
||||
};
|
||||
};
|
||||
}
|
214
nixos/modules/services/misc/sourcehut/git.nix
Normal file
214
nixos/modules/services/misc/sourcehut/git.nix
Normal file
|
@ -0,0 +1,214 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
scfg = cfg.git;
|
||||
iniKey = "git.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.gitsrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.git = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
visible = false;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
default = "git";
|
||||
description = ''
|
||||
User for git.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5001;
|
||||
description = ''
|
||||
Port on which the "git" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "git.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for git.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/gitsrht";
|
||||
description = ''
|
||||
State path for git.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.git;
|
||||
example = literalExample "pkgs.gitFull";
|
||||
description = ''
|
||||
Git package for git.sr.ht. This can help silence collisions.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "git" cfg.services) {
|
||||
# sshd refuses to run with `Unsafe AuthorizedKeysCommand ... bad ownership or modes for directory /nix/store`
|
||||
environment.etc."ssh/gitsrht-dispatch" = {
|
||||
mode = "0755";
|
||||
text = ''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
${cfg.python}/bin/gitsrht-dispatch "$@"
|
||||
'';
|
||||
};
|
||||
|
||||
# Needs this in the $PATH when sshing into the server
|
||||
environment.systemPackages = [ cfg.git.package ];
|
||||
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
# https://stackoverflow.com/questions/22314298/git-push-results-in-fatal-protocol-error-bad-line-length-character-this
|
||||
# Probably could use gitsrht-shell if output is restricted to just parameters...
|
||||
shell = pkgs.bash;
|
||||
description = "git.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
cron.systemCronJobs = [ "*/20 * * * * ${cfg.python}/bin/gitsrht-periodic" ];
|
||||
fcgiwrap.enable = true;
|
||||
|
||||
openssh.authorizedKeysCommand = ''/etc/ssh/gitsrht-dispatch "%u" "%h" "%t" "%k"'';
|
||||
openssh.authorizedKeysCommandUser = "root";
|
||||
openssh.extraConfig = ''
|
||||
PermitUserEnvironment SRHT_*
|
||||
'';
|
||||
|
||||
postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
# /var/log is owned by root
|
||||
"f /var/log/git-srht-shell 0644 ${user} ${user} -"
|
||||
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
"d ${cfg.settings."${iniKey}".repos} 2755 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services = {
|
||||
gitsrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "redis.service" "postgresql.service" "network.target" ];
|
||||
requires = [ "redis.service" "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
# Needs internally to create repos at the very least
|
||||
path = [ pkgs.git ];
|
||||
description = "git.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
|
||||
gitsrht-webhooks = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "git.sr.ht webhooks service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
};
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.webhooks worker --loglevel=info";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL git.sr.ht is being served at (protocol://domain)
|
||||
"git.sr.ht".origin = mkDefault "http://git.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"git.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"git.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"git.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"git.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# The redis connection used for the webhooks worker
|
||||
"git.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/1";
|
||||
|
||||
# A post-update script which is installed in every git repo.
|
||||
"git.sr.ht".post-update-script = mkDefault "${pkgs.sourcehut.gitsrht}/bin/gitsrht-update-hook";
|
||||
|
||||
# git.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"git.sr.ht".oauth-client-id = mkDefault null;
|
||||
"git.sr.ht".oauth-client-secret = mkDefault null;
|
||||
# Path to git repositories on disk
|
||||
"git.sr.ht".repos = mkDefault "/var/lib/git";
|
||||
|
||||
"git.sr.ht".outgoing-domain = mkDefault "http://git.${cfg.originBase}";
|
||||
|
||||
# The authorized keys hook uses this to dispatch to various handlers
|
||||
# The format is a program to exec into as the key, and the user to match as the
|
||||
# value. When someone tries to log in as this user, this program is executed
|
||||
# and is expected to omit an AuthorizedKeys file.
|
||||
#
|
||||
# Discard of the string context is in order to allow derivation-derived strings.
|
||||
# This is safe if the relevant package is installed which will be the case if the setting is utilized.
|
||||
"git.sr.ht::dispatch".${builtins.unsafeDiscardStringContext "${pkgs.sourcehut.gitsrht}/bin/gitsrht-keys"} = mkDefault "${user}:${user}";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."git.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.gitsrht}/${pkgs.sourcehut.python.sitePackages}/gitsrht";
|
||||
extraConfig = ''
|
||||
location = /authorize {
|
||||
proxy_pass http://${cfg.address}:${toString port};
|
||||
proxy_pass_request_body off;
|
||||
proxy_set_header Content-Length "";
|
||||
proxy_set_header X-Original-URI $request_uri;
|
||||
}
|
||||
location ~ ^/([^/]+)/([^/]+)/(HEAD|info/refs|objects/info/.*|git-upload-pack).*$ {
|
||||
auth_request /authorize;
|
||||
root /var/lib/git;
|
||||
fastcgi_pass unix:/run/fcgiwrap.sock;
|
||||
fastcgi_param SCRIPT_FILENAME ${pkgs.git}/bin/git-http-backend;
|
||||
fastcgi_param PATH_INFO $uri;
|
||||
fastcgi_param GIT_PROJECT_ROOT $document_root;
|
||||
fastcgi_read_timeout 500s;
|
||||
include ${pkgs.nginx}/conf/fastcgi_params;
|
||||
gzip off;
|
||||
}
|
||||
'';
|
||||
|
||||
};
|
||||
};
|
||||
}
|
173
nixos/modules/services/misc/sourcehut/hg.nix
Normal file
173
nixos/modules/services/misc/sourcehut/hg.nix
Normal file
|
@ -0,0 +1,173 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
scfg = cfg.hg;
|
||||
iniKey = "hg.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.hgsrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.hg = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
default = "hg";
|
||||
description = ''
|
||||
User for hg.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5010;
|
||||
description = ''
|
||||
Port on which the "hg" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "hg.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for hg.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/hgsrht";
|
||||
description = ''
|
||||
State path for hg.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
cloneBundles = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Generate clonebundles (which require more disk space but dramatically speed up cloning large repositories).
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "hg" cfg.services) {
|
||||
# In case it ever comes into being
|
||||
environment.etc."ssh/hgsrht-dispatch" = {
|
||||
mode = "0755";
|
||||
text = ''
|
||||
#! ${pkgs.stdenv.shell}
|
||||
${cfg.python}/bin/gitsrht-dispatch $@
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.mercurial ];
|
||||
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
# Assuming hg.sr.ht needs this too
|
||||
shell = pkgs.bash;
|
||||
description = "hg.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
cron.systemCronJobs = [ "*/20 * * * * ${cfg.python}/bin/hgsrht-periodic" ]
|
||||
++ optional cloneBundles "0 * * * * ${cfg.python}/bin/hgsrht-clonebundles";
|
||||
|
||||
openssh.authorizedKeysCommand = ''/etc/ssh/hgsrht-dispatch "%u" "%h" "%t" "%k"'';
|
||||
openssh.authorizedKeysCommandUser = "root";
|
||||
openssh.extraConfig = ''
|
||||
PermitUserEnvironment SRHT_*
|
||||
'';
|
||||
|
||||
postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
# /var/log is owned by root
|
||||
"f /var/log/hg-srht-shell 0644 ${user} ${user} -"
|
||||
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
"d ${cfg.settings."${iniKey}".repos} 2755 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services.hgsrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "redis.service" "postgresql.service" "network.target" ];
|
||||
requires = [ "redis.service" "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = [ pkgs.mercurial ];
|
||||
description = "hg.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL hg.sr.ht is being served at (protocol://domain)
|
||||
"hg.sr.ht".origin = mkDefault "http://hg.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"hg.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"hg.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"hg.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# The redis connection used for the webhooks worker
|
||||
"hg.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/1";
|
||||
# A post-update script which is installed in every mercurial repo.
|
||||
"hg.sr.ht".changegroup-script = mkDefault "${cfg.python}/bin/hgsrht-hook-changegroup";
|
||||
# hg.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"hg.sr.ht".oauth-client-id = mkDefault null;
|
||||
"hg.sr.ht".oauth-client-secret = mkDefault null;
|
||||
# Path to mercurial repositories on disk
|
||||
"hg.sr.ht".repos = mkDefault "/var/lib/hg";
|
||||
# Path to the srht mercurial extension
|
||||
# (defaults to where the hgsrht code is)
|
||||
# "hg.sr.ht".srhtext = mkDefault null;
|
||||
# .hg/store size (in MB) past which the nightly job generates clone bundles.
|
||||
# "hg.sr.ht".clone_bundle_threshold = mkDefault 50;
|
||||
# Path to hg-ssh (if not in $PATH)
|
||||
# "hg.sr.ht".hg_ssh = mkDefault /path/to/hg-ssh;
|
||||
|
||||
# The authorized keys hook uses this to dispatch to various handlers
|
||||
# The format is a program to exec into as the key, and the user to match as the
|
||||
# value. When someone tries to log in as this user, this program is executed
|
||||
# and is expected to omit an AuthorizedKeys file.
|
||||
#
|
||||
# Uncomment the relevant lines to enable the various sr.ht dispatchers.
|
||||
"hg.sr.ht::dispatch"."/run/current-system/sw/bin/hgsrht-keys" = mkDefault "${user}:${user}";
|
||||
};
|
||||
|
||||
# TODO: requires testing and addition of hg-specific requirements
|
||||
services.nginx.virtualHosts."hg.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.hgsrht}/${pkgs.sourcehut.python.sitePackages}/hgsrht";
|
||||
};
|
||||
};
|
||||
}
|
118
nixos/modules/services/misc/sourcehut/hub.nix
Normal file
118
nixos/modules/services/misc/sourcehut/hub.nix
Normal file
|
@ -0,0 +1,118 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.hub;
|
||||
iniKey = "hub.sr.ht";
|
||||
|
||||
drv = pkgs.sourcehut.hubsrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.hub = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "hubsrht";
|
||||
description = ''
|
||||
User for hub.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5014;
|
||||
description = ''
|
||||
Port on which the "hub" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "hub.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for hub.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/hubsrht";
|
||||
description = ''
|
||||
State path for hub.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "hub" cfg.services) {
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
description = "hub.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services.hubsrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "hub.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL hub.sr.ht is being served at (protocol://domain)
|
||||
"hub.sr.ht".origin = mkDefault "http://hub.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"hub.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"hub.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"hub.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"hub.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# hub.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"hub.sr.ht".oauth-client-id = mkDefault null;
|
||||
"hub.sr.ht".oauth-client-secret = mkDefault null;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.hubsrht}/${pkgs.sourcehut.python.sitePackages}/hubsrht";
|
||||
};
|
||||
services.nginx.virtualHosts."hub.${cfg.originBase}" = {
|
||||
globalRedirect = "${cfg.originBase}";
|
||||
forceSSL = true;
|
||||
};
|
||||
};
|
||||
}
|
185
nixos/modules/services/misc/sourcehut/lists.nix
Normal file
185
nixos/modules/services/misc/sourcehut/lists.nix
Normal file
|
@ -0,0 +1,185 @@
|
|||
# Email setup is fairly involved, useful references:
|
||||
# https://drewdevault.com/2018/08/05/Local-mail-server.html
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.lists;
|
||||
iniKey = "lists.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.listssrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.lists = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "listssrht";
|
||||
description = ''
|
||||
User for lists.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5006;
|
||||
description = ''
|
||||
Port on which the "lists" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "lists.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for lists.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/listssrht";
|
||||
description = ''
|
||||
State path for lists.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "lists" cfg.services) {
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
extraGroups = [ "postfix" ];
|
||||
description = "lists.sr.ht user";
|
||||
};
|
||||
};
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services = {
|
||||
listssrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "lists.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
|
||||
listssrht-process = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "lists.sr.ht process service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.process worker --loglevel=info";
|
||||
};
|
||||
};
|
||||
|
||||
listssrht-lmtp = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "lists.sr.ht process service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/listssrht-lmtp";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
listssrht-webhooks = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "lists.sr.ht webhooks service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.webhooks worker --loglevel=info";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL lists.sr.ht is being served at (protocol://domain)
|
||||
"lists.sr.ht".origin = mkDefault "http://lists.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"lists.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"lists.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"lists.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"lists.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# lists.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"lists.sr.ht".oauth-client-id = mkDefault null;
|
||||
"lists.sr.ht".oauth-client-secret = mkDefault null;
|
||||
# Outgoing email for notifications generated by users
|
||||
"lists.sr.ht".notify-from = mkDefault "CHANGEME@example.org";
|
||||
# The redis connection used for the webhooks worker
|
||||
"lists.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/2";
|
||||
# The redis connection used for the celery worker
|
||||
"lists.sr.ht".redis = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/4";
|
||||
# Network-key
|
||||
"lists.sr.ht".network-key = mkDefault null;
|
||||
# Allow creation
|
||||
"lists.sr.ht".allow-new-lists = mkDefault "no";
|
||||
# Posting Domain
|
||||
"lists.sr.ht".posting-domain = mkDefault "lists.${cfg.originBase}";
|
||||
|
||||
# Path for the lmtp daemon's unix socket. Direct incoming mail to this socket.
|
||||
# Alternatively, specify IP:PORT and an SMTP server will be run instead.
|
||||
"lists.sr.ht::worker".sock = mkDefault "/tmp/lists.sr.ht-lmtp.sock";
|
||||
# The lmtp daemon will make the unix socket group-read/write for users in this
|
||||
# group.
|
||||
"lists.sr.ht::worker".sock-group = mkDefault "postfix";
|
||||
"lists.sr.ht::worker".reject-url = mkDefault "https://man.sr.ht/lists.sr.ht/etiquette.md";
|
||||
"lists.sr.ht::worker".reject-mimetypes = mkDefault "text/html";
|
||||
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."lists.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.listssrht}/${pkgs.sourcehut.python.sitePackages}/listssrht";
|
||||
};
|
||||
};
|
||||
}
|
122
nixos/modules/services/misc/sourcehut/man.nix
Normal file
122
nixos/modules/services/misc/sourcehut/man.nix
Normal file
|
@ -0,0 +1,122 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.man;
|
||||
iniKey = "man.sr.ht";
|
||||
|
||||
drv = pkgs.sourcehut.mansrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.man = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "mansrht";
|
||||
description = ''
|
||||
User for man.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5004;
|
||||
description = ''
|
||||
Port on which the "man" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "man.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for man.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/mansrht";
|
||||
description = ''
|
||||
State path for man.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "man" cfg.services) {
|
||||
assertions =
|
||||
[
|
||||
{
|
||||
assertion = hasAttrByPath [ "git.sr.ht" "oauth-client-id" ] cfgIni;
|
||||
message = "man.sr.ht needs access to git.sr.ht.";
|
||||
}
|
||||
];
|
||||
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
description = "man.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services.mansrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "man.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL man.sr.ht is being served at (protocol://domain)
|
||||
"man.sr.ht".origin = mkDefault "http://man.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"man.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"man.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"man.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"man.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# man.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"man.sr.ht".oauth-client-id = mkDefault null;
|
||||
"man.sr.ht".oauth-client-secret = mkDefault null;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."man.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.mansrht}/${pkgs.sourcehut.python.sitePackages}/mansrht";
|
||||
};
|
||||
};
|
||||
}
|
211
nixos/modules/services/misc/sourcehut/meta.nix
Normal file
211
nixos/modules/services/misc/sourcehut/meta.nix
Normal file
|
@ -0,0 +1,211 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.meta;
|
||||
iniKey = "meta.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.metasrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.meta = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "metasrht";
|
||||
description = ''
|
||||
User for meta.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5000;
|
||||
description = ''
|
||||
Port on which the "meta" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "meta.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for meta.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/metasrht";
|
||||
description = ''
|
||||
State path for meta.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "meta" cfg.services) {
|
||||
assertions =
|
||||
[
|
||||
{
|
||||
assertion = with cfgIni."meta.sr.ht::billing"; enabled == "yes" -> (stripe-public-key != null && stripe-secret-key != null);
|
||||
message = "If meta.sr.ht::billing is enabled, the keys should be defined.";
|
||||
}
|
||||
];
|
||||
|
||||
users = {
|
||||
users = {
|
||||
${user} = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
description = "meta.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.cron.systemCronJobs = [ "0 0 * * * ${cfg.python}/bin/metasrht-daily" ];
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services = {
|
||||
metasrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "meta.sr.ht website service";
|
||||
|
||||
preStart = ''
|
||||
# Configure client(s) as "preauthorized"
|
||||
${concatMapStringsSep "\n\n"
|
||||
(attr: ''
|
||||
if ! test -e "${statePath}/${attr}.oauth" || [ "$(cat ${statePath}/${attr}.oauth)" != "${cfgIni."${attr}".oauth-client-id}" ]; then
|
||||
# Configure ${attr}'s OAuth client as "preauthorized"
|
||||
psql ${database} \
|
||||
-c "UPDATE oauthclient SET preauthorized = true WHERE client_id = '${cfgIni."${attr}".oauth-client-id}'"
|
||||
|
||||
printf "%s" "${cfgIni."${attr}".oauth-client-id}" > "${statePath}/${attr}.oauth"
|
||||
fi
|
||||
'')
|
||||
(builtins.attrNames (filterAttrs
|
||||
(k: v: !(hasInfix "::" k) && builtins.hasAttr "oauth-client-id" v && v.oauth-client-id != null)
|
||||
cfg.settings))}
|
||||
'';
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
|
||||
metasrht-api = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "meta.sr.ht api service";
|
||||
|
||||
preStart = ''
|
||||
# Configure client(s) as "preauthorized"
|
||||
${concatMapStringsSep "\n\n"
|
||||
(attr: ''
|
||||
if ! test -e "${statePath}/${attr}.oauth" || [ "$(cat ${statePath}/${attr}.oauth)" != "${cfgIni."${attr}".oauth-client-id}" ]; then
|
||||
# Configure ${attr}'s OAuth client as "preauthorized"
|
||||
psql ${database} \
|
||||
-c "UPDATE oauthclient SET preauthorized = true WHERE client_id = '${cfgIni."${attr}".oauth-client-id}'"
|
||||
|
||||
printf "%s" "${cfgIni."${attr}".oauth-client-id}" > "${statePath}/${attr}.oauth"
|
||||
fi
|
||||
'')
|
||||
(builtins.attrNames (filterAttrs
|
||||
(k: v: !(hasInfix "::" k) && builtins.hasAttr "oauth-client-id" v && v.oauth-client-id != null)
|
||||
cfg.settings))}
|
||||
'';
|
||||
|
||||
serviceConfig.ExecStart = "${pkgs.sourcehut.metasrht}/bin/metasrht-api -b :${toString (port + 100)}";
|
||||
};
|
||||
|
||||
metasrht-webhooks = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "meta.sr.ht webhooks service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.webhooks worker --loglevel=info";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL meta.sr.ht is being served at (protocol://domain)
|
||||
"meta.sr.ht".origin = mkDefault "https://meta.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"meta.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"meta.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"meta.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"meta.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# If "yes", the user will be sent the stock sourcehut welcome emails after
|
||||
# signup (requires cron to be configured properly). These are specific to the
|
||||
# sr.ht instance so you probably want to patch these before enabling this.
|
||||
"meta.sr.ht".welcome-emails = mkDefault "no";
|
||||
|
||||
# The redis connection used for the webhooks worker
|
||||
"meta.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/6";
|
||||
|
||||
# If "no", public registration will not be permitted.
|
||||
"meta.sr.ht::settings".registration = mkDefault "no";
|
||||
# Where to redirect new users upon registration
|
||||
"meta.sr.ht::settings".onboarding-redirect = mkDefault "https://meta.${cfg.originBase}";
|
||||
# How many invites each user is issued upon registration (only applicable if
|
||||
# open registration is disabled)
|
||||
"meta.sr.ht::settings".user-invites = mkDefault 5;
|
||||
|
||||
# Origin URL for API, 100 more than web
|
||||
"meta.sr.ht".api-origin = mkDefault "http://localhost:5100";
|
||||
|
||||
# You can add aliases for the client IDs of commonly used OAuth clients here.
|
||||
#
|
||||
# Example:
|
||||
"meta.sr.ht::aliases" = mkDefault { };
|
||||
# "meta.sr.ht::aliases"."git.sr.ht" = 12345;
|
||||
|
||||
# "yes" to enable the billing system
|
||||
"meta.sr.ht::billing".enabled = mkDefault "no";
|
||||
# Get your keys at https://dashboard.stripe.com/account/apikeys
|
||||
"meta.sr.ht::billing".stripe-public-key = mkDefault null;
|
||||
"meta.sr.ht::billing".stripe-secret-key = mkDefault null;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."meta.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.metasrht}/${pkgs.sourcehut.python.sitePackages}/metasrht";
|
||||
};
|
||||
};
|
||||
}
|
133
nixos/modules/services/misc/sourcehut/paste.nix
Normal file
133
nixos/modules/services/misc/sourcehut/paste.nix
Normal file
|
@ -0,0 +1,133 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.paste;
|
||||
iniKey = "paste.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.pastesrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.paste = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "pastesrht";
|
||||
description = ''
|
||||
User for paste.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5011;
|
||||
description = ''
|
||||
Port on which the "paste" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "paste.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for paste.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/pastesrht";
|
||||
description = ''
|
||||
State path for pastesrht.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "paste" cfg.services) {
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
description = "paste.sr.ht user";
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services = {
|
||||
pastesrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "paste.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
|
||||
pastesrht-webhooks = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "paste.sr.ht webhooks service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.webhooks worker --loglevel=info";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL paste.sr.ht is being served at (protocol://domain)
|
||||
"paste.sr.ht".origin = mkDefault "http://paste.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"paste.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"paste.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"paste.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"paste.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# paste.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"paste.sr.ht".oauth-client-id = mkDefault null;
|
||||
"paste.sr.ht".oauth-client-secret = mkDefault null;
|
||||
"paste.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/5";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."paste.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.pastesrht}/${pkgs.sourcehut.python.sitePackages}/pastesrht";
|
||||
};
|
||||
};
|
||||
}
|
66
nixos/modules/services/misc/sourcehut/service.nix
Normal file
66
nixos/modules/services/misc/sourcehut/service.nix
Normal file
|
@ -0,0 +1,66 @@
|
|||
{ config, pkgs, lib }:
|
||||
serviceCfg: serviceDrv: iniKey: attrs:
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings."${iniKey}";
|
||||
pgSuperUser = config.services.postgresql.superUser;
|
||||
|
||||
setupDB = pkgs.writeScript "${serviceDrv.pname}-gen-db" ''
|
||||
#! ${cfg.python}/bin/python
|
||||
from ${serviceDrv.pname}.app import db
|
||||
db.create()
|
||||
'';
|
||||
in
|
||||
with serviceCfg; with lib; recursiveUpdate
|
||||
{
|
||||
environment.HOME = statePath;
|
||||
path = [ config.services.postgresql.package ] ++ (attrs.path or [ ]);
|
||||
restartTriggers = [ config.environment.etc."sr.ht/config.ini".source ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Group = user;
|
||||
Restart = "always";
|
||||
WorkingDirectory = statePath;
|
||||
} // (if (cfg.statePath == "/var/lib/sourcehut/${serviceDrv.pname}") then {
|
||||
StateDirectory = [ "sourcehut/${serviceDrv.pname}" ];
|
||||
} else {})
|
||||
;
|
||||
|
||||
preStart = ''
|
||||
if ! test -e ${statePath}/db; then
|
||||
# Setup the initial database
|
||||
${setupDB}
|
||||
|
||||
# Set the initial state of the database for future database upgrades
|
||||
if test -e ${cfg.python}/bin/${serviceDrv.pname}-migrate; then
|
||||
# Run alembic stamp head once to tell alembic the schema is up-to-date
|
||||
${cfg.python}/bin/${serviceDrv.pname}-migrate stamp head
|
||||
fi
|
||||
|
||||
printf "%s" "${serviceDrv.version}" > ${statePath}/db
|
||||
fi
|
||||
|
||||
# Update copy of each users' profile to the latest
|
||||
# See https://lists.sr.ht/~sircmpwn/sr.ht-admins/<20190302181207.GA13778%40cirno.my.domain>
|
||||
if ! test -e ${statePath}/webhook; then
|
||||
# Update ${iniKey}'s users' profile copy to the latest
|
||||
${cfg.python}/bin/srht-update-profiles ${iniKey}
|
||||
|
||||
touch ${statePath}/webhook
|
||||
fi
|
||||
|
||||
${optionalString (builtins.hasAttr "migrate-on-upgrade" cfgIni && cfgIni.migrate-on-upgrade == "yes") ''
|
||||
if [ "$(cat ${statePath}/db)" != "${serviceDrv.version}" ]; then
|
||||
# Manage schema migrations using alembic
|
||||
${cfg.python}/bin/${serviceDrv.pname}-migrate -a upgrade head
|
||||
|
||||
# Mark down current package version
|
||||
printf "%s" "${serviceDrv.version}" > ${statePath}/db
|
||||
fi
|
||||
''}
|
||||
|
||||
${attrs.preStart or ""}
|
||||
'';
|
||||
}
|
||||
(builtins.removeAttrs attrs [ "path" "preStart" ])
|
115
nixos/modules/services/misc/sourcehut/sourcehut.xml
Normal file
115
nixos/modules/services/misc/sourcehut/sourcehut.xml
Normal file
|
@ -0,0 +1,115 @@
|
|||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="module-services-sourcehut">
|
||||
<title>Sourcehut</title>
|
||||
<para>
|
||||
<link xlink:href="https://sr.ht.com/">Sourcehut</link> is an open-source,
|
||||
self-hostable software development platform. The server setup can be automated using
|
||||
<link linkend="opt-services.sourcehut.enable">services.sourcehut</link>.
|
||||
</para>
|
||||
|
||||
<section xml:id="module-services-sourcehut-basic-usage">
|
||||
<title>Basic usage</title>
|
||||
<para>
|
||||
Sourcehut is a Python and Go based set of applications.
|
||||
<literal><link linkend="opt-services.sourcehut.enable">services.sourcehut</link></literal>
|
||||
by default will use
|
||||
<literal><link linkend="opt-services.nginx.enable">services.nginx</link></literal>,
|
||||
<literal><link linkend="opt-services.nginx.enable">services.redis</link></literal>,
|
||||
<literal><link linkend="opt-services.nginx.enable">services.cron</link></literal>,
|
||||
and
|
||||
<literal><link linkend="opt-services.postgresql.enable">services.postgresql</link></literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A very basic configuration may look like this:
|
||||
<programlisting>
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
fqdn =
|
||||
let
|
||||
join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
|
||||
in join config.networking.hostName config.networking.domain;
|
||||
in {
|
||||
|
||||
networking = {
|
||||
<link linkend="opt-networking.hostName">hostName</link> = "srht";
|
||||
<link linkend="opt-networking.domain">domain</link> = "tld";
|
||||
<link linkend="opt-networking.firewall.allowedTCPPorts">firewall.allowedTCPPorts</link> = [ 22 80 443 ];
|
||||
};
|
||||
|
||||
services.sourcehut = {
|
||||
<link linkend="opt-services.sourcehut.enable">enable</link> = true;
|
||||
<link linkend="opt-services.sourcehut.originBase">originBase</link> = fqdn;
|
||||
<link linkend="opt-services.sourcehut.services">services</link> = [ "meta" "man" "git" ];
|
||||
<link linkend="opt-services.sourcehut.settings">settings</link> = {
|
||||
"sr.ht" = {
|
||||
environment = "production";
|
||||
global-domain = fqdn;
|
||||
origin = "https://${fqdn}";
|
||||
# Produce keys with srht-keygen from <package>sourcehut.coresrht</package>.
|
||||
network-key = "SECRET";
|
||||
service-key = "SECRET";
|
||||
};
|
||||
webhooks.private-key= "SECRET";
|
||||
};
|
||||
};
|
||||
|
||||
<link linkend="opt-security.acme.certs._name_.extraDomainNames">security.acme.certs."${fqdn}".extraDomainNames</link> = [
|
||||
"meta.${fqdn}"
|
||||
"man.${fqdn}"
|
||||
"git.${fqdn}"
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
<link linkend="opt-services.nginx.enable">enable</link> = true;
|
||||
# only recommendedProxySettings are strictly required, but the rest make sense as well.
|
||||
<link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true;
|
||||
<link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true;
|
||||
<link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true;
|
||||
<link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true;
|
||||
|
||||
# Settings to setup what certificates are used for which endpoint.
|
||||
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
|
||||
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">"${fqdn}".enableACME</link> = true;
|
||||
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"meta.${fqdn}".useACMEHost</link> = fqdn:
|
||||
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"man.${fqdn}".useACMEHost</link> = fqdn:
|
||||
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"git.${fqdn}".useACMEHost</link> = fqdn:
|
||||
};
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <literal>hostName</literal> option is used internally to configure the nginx
|
||||
reverse-proxy. The <literal>settings</literal> attribute set is
|
||||
used by the configuration generator and the result is placed in <literal>/etc/sr.ht/config.ini</literal>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-sourcehut-configuration">
|
||||
<title>Configuration</title>
|
||||
|
||||
<para>
|
||||
All configuration parameters are also stored in
|
||||
<literal>/etc/sr.ht/config.ini</literal> which is generated by
|
||||
the module and linked from the store to ensure that all values from <literal>config.ini</literal>
|
||||
can be modified by the module.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="module-services-sourcehut-httpd">
|
||||
<title>Using an alternative webserver as reverse-proxy (e.g. <literal>httpd</literal>)</title>
|
||||
<para>
|
||||
By default, <package>nginx</package> is used as reverse-proxy for <package>sourcehut</package>.
|
||||
However, it's possible to use e.g. <package>httpd</package> by explicitly disabling
|
||||
<package>nginx</package> using <xref linkend="opt-services.nginx.enable" /> and fixing the
|
||||
<literal>settings</literal>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
161
nixos/modules/services/misc/sourcehut/todo.nix
Normal file
161
nixos/modules/services/misc/sourcehut/todo.nix
Normal file
|
@ -0,0 +1,161 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sourcehut;
|
||||
cfgIni = cfg.settings;
|
||||
scfg = cfg.todo;
|
||||
iniKey = "todo.sr.ht";
|
||||
|
||||
rcfg = config.services.redis;
|
||||
drv = pkgs.sourcehut.todosrht;
|
||||
in
|
||||
{
|
||||
options.services.sourcehut.todo = {
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "todosrht";
|
||||
description = ''
|
||||
User for todo.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5003;
|
||||
description = ''
|
||||
Port on which the "todo" module should listen.
|
||||
'';
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = types.str;
|
||||
default = "todo.sr.ht";
|
||||
description = ''
|
||||
PostgreSQL database name for todo.sr.ht.
|
||||
'';
|
||||
};
|
||||
|
||||
statePath = mkOption {
|
||||
type = types.path;
|
||||
default = "${cfg.statePath}/todosrht";
|
||||
description = ''
|
||||
State path for todo.sr.ht.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = with scfg; lib.mkIf (cfg.enable && elem "todo" cfg.services) {
|
||||
users = {
|
||||
users = {
|
||||
"${user}" = {
|
||||
isSystemUser = true;
|
||||
group = user;
|
||||
extraGroups = [ "postfix" ];
|
||||
description = "todo.sr.ht user";
|
||||
};
|
||||
};
|
||||
groups = {
|
||||
"${user}" = { };
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
authentication = ''
|
||||
local ${database} ${user} trust
|
||||
'';
|
||||
ensureDatabases = [ database ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = user;
|
||||
ensurePermissions = { "DATABASE \"${database}\"" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${statePath} 0750 ${user} ${user} -"
|
||||
];
|
||||
|
||||
services = {
|
||||
todosrht = import ./service.nix { inherit config pkgs lib; } scfg drv iniKey {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "todo.sr.ht website service";
|
||||
|
||||
serviceConfig.ExecStart = "${cfg.python}/bin/gunicorn ${drv.pname}.app:app -b ${cfg.address}:${toString port}";
|
||||
};
|
||||
|
||||
todosrht-lmtp = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
bindsTo = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "todo.sr.ht process service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/todosrht-lmtp";
|
||||
};
|
||||
};
|
||||
|
||||
todosrht-webhooks = {
|
||||
after = [ "postgresql.service" "network.target" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
description = "todo.sr.ht webhooks service";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = user;
|
||||
Restart = "always";
|
||||
ExecStart = "${cfg.python}/bin/celery -A ${drv.pname}.webhooks worker --loglevel=info";
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.sourcehut.settings = {
|
||||
# URL todo.sr.ht is being served at (protocol://domain)
|
||||
"todo.sr.ht".origin = mkDefault "http://todo.${cfg.originBase}";
|
||||
# Address and port to bind the debug server to
|
||||
"todo.sr.ht".debug-host = mkDefault "0.0.0.0";
|
||||
"todo.sr.ht".debug-port = mkDefault port;
|
||||
# Configures the SQLAlchemy connection string for the database.
|
||||
"todo.sr.ht".connection-string = mkDefault "postgresql:///${database}?user=${user}&host=/var/run/postgresql";
|
||||
# Set to "yes" to automatically run migrations on package upgrade.
|
||||
"todo.sr.ht".migrate-on-upgrade = mkDefault "yes";
|
||||
# todo.sr.ht's OAuth client ID and secret for meta.sr.ht
|
||||
# Register your client at meta.example.org/oauth
|
||||
"todo.sr.ht".oauth-client-id = mkDefault null;
|
||||
"todo.sr.ht".oauth-client-secret = mkDefault null;
|
||||
# Outgoing email for notifications generated by users
|
||||
"todo.sr.ht".notify-from = mkDefault "CHANGEME@example.org";
|
||||
# The redis connection used for the webhooks worker
|
||||
"todo.sr.ht".webhooks = mkDefault "redis://${rcfg.bind}:${toString rcfg.port}/1";
|
||||
# Network-key
|
||||
"todo.sr.ht".network-key = mkDefault null;
|
||||
|
||||
# Path for the lmtp daemon's unix socket. Direct incoming mail to this socket.
|
||||
# Alternatively, specify IP:PORT and an SMTP server will be run instead.
|
||||
"todo.sr.ht::mail".sock = mkDefault "/tmp/todo.sr.ht-lmtp.sock";
|
||||
# The lmtp daemon will make the unix socket group-read/write for users in this
|
||||
# group.
|
||||
"todo.sr.ht::mail".sock-group = mkDefault "postfix";
|
||||
|
||||
"todo.sr.ht::mail".posting-domain = mkDefault "todo.${cfg.originBase}";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."todo.${cfg.originBase}" = {
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://${cfg.address}:${toString port}";
|
||||
locations."/query".proxyPass = "http://${cfg.address}:${toString (port + 100)}";
|
||||
locations."/static".root = "${pkgs.sourcehut.todosrht}/${pkgs.sourcehut.python.sitePackages}/todosrht";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -51,6 +51,7 @@ let
|
|||
"pihole"
|
||||
"postfix"
|
||||
"postgres"
|
||||
"process"
|
||||
"py-air-control"
|
||||
"redis"
|
||||
"rspamd"
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.process;
|
||||
configFile = pkgs.writeText "process-exporter.yaml" (builtins.toJSON cfg.settings);
|
||||
in
|
||||
{
|
||||
port = 9256;
|
||||
extraOpts = {
|
||||
settings.process_names = mkOption {
|
||||
type = types.listOf types.anything;
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
process_names = [
|
||||
# Remove nix store path from process name
|
||||
{ name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
|
||||
];
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
All settings expressed as an Nix attrset.
|
||||
|
||||
Check the official documentation for the corresponding YAML
|
||||
settings that can all be used here: <link xlink:href="https://github.com/ncabatoff/process-exporter" />
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-process-exporter}/bin/process-exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--config.path ${configFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
NoNewPrivileges = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -40,8 +40,7 @@ in {
|
|||
default = [];
|
||||
example = [ "wlan0" "wlan1" ];
|
||||
description = ''
|
||||
The interfaces <command>wpa_supplicant</command> will use. If empty, it will
|
||||
automatically use all wireless interfaces.
|
||||
The interfaces <command>wpa_supplicant</command> will use.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -220,7 +219,14 @@ in {
|
|||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = flip mapAttrsToList cfg.networks (name: cfg: {
|
||||
assertions = [
|
||||
{ assertion = cfg.interfaces != [];
|
||||
message = ''
|
||||
No network interfaces for wpa_supplicant have been configured.
|
||||
Please, specify at least one using networking.wireless.interfaces.
|
||||
'';
|
||||
}
|
||||
] ++ flip mapAttrsToList cfg.networks (name: cfg: {
|
||||
assertion = with cfg; count (x: x != null) [ psk pskRaw auth ] <= 1;
|
||||
message = ''options networking.wireless."${name}".{psk,pskRaw,auth} are mutually exclusive'';
|
||||
});
|
||||
|
@ -255,20 +261,7 @@ in {
|
|||
then echo >&2 "<3>/etc/wpa_supplicant.conf present but ignored. Generated ${configFile} is used instead."
|
||||
fi
|
||||
iface_args="-s -u -D${cfg.driver} ${configStr}"
|
||||
${if ifaces == [] then ''
|
||||
for i in $(cd /sys/class/net && echo *); do
|
||||
DEVTYPE=
|
||||
UEVENT_PATH=/sys/class/net/$i/uevent
|
||||
if [ -e "$UEVENT_PATH" ]; then
|
||||
source "$UEVENT_PATH"
|
||||
if [ "$DEVTYPE" = "wlan" -o -e /sys/class/net/$i/wireless ]; then
|
||||
args+="''${args:+ -N} -i$i $iface_args"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
'' else ''
|
||||
args="${concatMapStringsSep " -N " (i: "-i${i} $iface_args") ifaces}"
|
||||
''}
|
||||
args="${concatMapStringsSep " -N " (i: "-i${i} $iface_args") ifaces}"
|
||||
exec wpa_supplicant $args
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -27,7 +27,7 @@ let
|
|||
|
||||
# NOTE: Use password authentication, since mysqljs does not yet support auth_socket
|
||||
if [ ! -e /var/lib/epgstation/db-created ]; then
|
||||
${pkgs.mysql}/bin/mysql -e \
|
||||
${pkgs.mariadb}/bin/mysql -e \
|
||||
"GRANT ALL ON \`${cfg.database.name}\`.* TO '${username}'@'localhost' IDENTIFIED by '$DB_PASSWORD';"
|
||||
touch /var/lib/epgstation/db-created
|
||||
fi
|
||||
|
@ -224,7 +224,7 @@ in
|
|||
|
||||
services.mysql = {
|
||||
enable = mkDefault true;
|
||||
package = mkDefault pkgs.mysql;
|
||||
package = mkDefault pkgs.mariadb;
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
# FIXME: enable once mysqljs supports auth_socket
|
||||
# ensureUsers = [ {
|
||||
|
|
|
@ -728,7 +728,7 @@ in
|
|||
|
||||
services.postgresql.enable = lib.mkDefault createLocalPostgreSQL;
|
||||
services.mysql.enable = lib.mkDefault createLocalMySQL;
|
||||
services.mysql.package = lib.mkIf createLocalMySQL pkgs.mysql;
|
||||
services.mysql.package = lib.mkIf createLocalMySQL pkgs.mariadb;
|
||||
};
|
||||
|
||||
meta.doc = ./keycloak.xml;
|
||||
|
|
|
@ -644,7 +644,7 @@ let
|
|||
|
||||
services.mysql = mkIf mysqlLocal {
|
||||
enable = true;
|
||||
package = mkDefault pkgs.mysql;
|
||||
package = mkDefault pkgs.mariadb;
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
ensureUsers = [
|
||||
{
|
||||
|
|
|
@ -66,9 +66,7 @@ in {
|
|||
};
|
||||
|
||||
in (mkMerge [{
|
||||
|
||||
environment.systemPackages = [ cfg.package pkgs.ipsecTools ];
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
boot.kernelModules = [ "tun" "openvswitch" ];
|
||||
|
||||
boot.extraModulePackages = [ cfg.package ];
|
||||
|
@ -146,6 +144,8 @@ in {
|
|||
|
||||
}
|
||||
(mkIf (cfg.ipsec && (versionOlder cfg.package.version "2.6.0")) {
|
||||
environment.systemPackages = [ pkgs.ipsecTools ];
|
||||
|
||||
services.racoon.enable = true;
|
||||
services.racoon.configPath = "${runDir}/ipsec/etc/racoon/racoon.conf";
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ let
|
|||
GRANT ALL ON `bitwarden`.* TO 'bitwardenuser'@'localhost';
|
||||
FLUSH PRIVILEGES;
|
||||
'';
|
||||
package = pkgs.mysql;
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
|
||||
services.bitwarden_rs.config.databaseUrl = "mysql://bitwardenuser:${dbPassword}@localhost/bitwarden";
|
||||
|
|
|
@ -36,7 +36,7 @@ import ./make-test-python.nix (
|
|||
default.wait_for_unit("calibre-web.service")
|
||||
default.wait_for_open_port(${toString defaultPort})
|
||||
default.succeed(
|
||||
"curl --fail 'http://localhost:${toString defaultPort}/basicconfig' | grep -q 'Basic Configuration'"
|
||||
"curl --fail 'http://localhost:${toString defaultPort}/basicconfig' | grep 'Basic Configuration'"
|
||||
)
|
||||
|
||||
customized.succeed(
|
||||
|
@ -46,7 +46,7 @@ import ./make-test-python.nix (
|
|||
customized.wait_for_unit("calibre-web.service")
|
||||
customized.wait_for_open_port(${toString port})
|
||||
customized.succeed(
|
||||
"curl --fail -H X-User:admin 'http://localhost:${toString port}' | grep -q test-book"
|
||||
"curl --fail -H X-User:admin 'http://localhost:${toString port}' | grep test-book"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -23,15 +23,15 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
|||
with subtest("includeStorePath"):
|
||||
with subtest("assumption"):
|
||||
docker.succeed("${examples.helloOnRoot} | docker load")
|
||||
docker.succeed("set -euo pipefail; docker run --rm hello | grep -i hello")
|
||||
docker.succeed("docker run --rm hello | grep -i hello")
|
||||
docker.succeed("docker image rm hello:latest")
|
||||
with subtest("includeStorePath = false; breaks example"):
|
||||
docker.succeed("${examples.helloOnRootNoStore} | docker load")
|
||||
docker.fail("set -euo pipefail; docker run --rm hello | grep -i hello")
|
||||
docker.fail("docker run --rm hello | grep -i hello")
|
||||
docker.succeed("docker image rm hello:latest")
|
||||
with subtest("includeStorePath = false; works with mounted store"):
|
||||
docker.succeed("${examples.helloOnRootNoStore} | docker load")
|
||||
docker.succeed("set -euo pipefail; docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
|
||||
docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
|
||||
docker.succeed("docker image rm hello:latest")
|
||||
|
||||
with subtest("Ensure Docker images use a stable date by default"):
|
||||
|
|
|
@ -38,6 +38,6 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
|
|||
machine.wait_for_unit("doh-proxy-rust.service")
|
||||
machine.wait_for_open_port(53)
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed(f"curl --fail '{url}?dns={query}' | grep -qF {bin_ip}")
|
||||
machine.succeed(f"curl --fail '{url}?dns={query}' | grep -F {bin_ip}")
|
||||
'';
|
||||
})
|
||||
|
|
|
@ -178,7 +178,7 @@ let
|
|||
one.systemctl("stop logstash")
|
||||
one.systemctl("start elasticsearch-curator")
|
||||
one.wait_until_succeeds(
|
||||
'! curl --silent --show-error "${esUrl}/_cat/indices" | grep logstash | grep -q ^'
|
||||
'! curl --silent --show-error "${esUrl}/_cat/indices" | grep logstash | grep ^'
|
||||
)
|
||||
'';
|
||||
}) {};
|
||||
|
|
|
@ -102,7 +102,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
|||
# `doSetup` is is true.
|
||||
test = doSetup: ''
|
||||
gitlab.succeed(
|
||||
"curl -isSf http://gitlab | grep -i location | grep -q http://gitlab/users/sign_in"
|
||||
"curl -isSf http://gitlab | grep -i location | grep http://gitlab/users/sign_in"
|
||||
)
|
||||
gitlab.succeed(
|
||||
"${pkgs.sudo}/bin/sudo -u gitlab -H gitlab-rake gitlab:check 1>&2"
|
||||
|
|
|
@ -42,7 +42,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
|||
"curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].uuid"
|
||||
)
|
||||
agent.succeed(
|
||||
"curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep -q Idle"
|
||||
"curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep Idle"
|
||||
)
|
||||
'';
|
||||
})
|
||||
|
|
|
@ -74,7 +74,7 @@ in {
|
|||
declarativePlugins.wait_for_unit("grafana.service")
|
||||
declarativePlugins.wait_for_open_port(3000)
|
||||
declarativePlugins.succeed(
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/plugins | grep -q grafana-clock-panel"
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/plugins | grep grafana-clock-panel"
|
||||
)
|
||||
declarativePlugins.shutdown()
|
||||
|
||||
|
@ -82,7 +82,7 @@ in {
|
|||
sqlite.wait_for_unit("grafana.service")
|
||||
sqlite.wait_for_open_port(3000)
|
||||
sqlite.succeed(
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
|
||||
)
|
||||
sqlite.shutdown()
|
||||
|
||||
|
@ -92,7 +92,7 @@ in {
|
|||
postgresql.wait_for_open_port(3000)
|
||||
postgresql.wait_for_open_port(5432)
|
||||
postgresql.succeed(
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
|
||||
)
|
||||
postgresql.shutdown()
|
||||
|
||||
|
@ -102,7 +102,7 @@ in {
|
|||
mysql.wait_for_open_port(3000)
|
||||
mysql.wait_for_open_port(3306)
|
||||
mysql.succeed(
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep -q testadmin\@localhost"
|
||||
"curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep testadmin\@localhost"
|
||||
)
|
||||
mysql.shutdown()
|
||||
'';
|
||||
|
|
|
@ -18,7 +18,7 @@ let
|
|||
};
|
||||
services.mysql = {
|
||||
enable = true;
|
||||
package = pkgs.mysql;
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
services.nginx.enable = true;
|
||||
};
|
||||
|
|
|
@ -48,23 +48,23 @@ with lib;
|
|||
|
||||
default.wait_for_unit("miniflux.service")
|
||||
default.wait_for_open_port(${toString defaultPort})
|
||||
default.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep -q OK")
|
||||
default.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
|
||||
default.succeed(
|
||||
"curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep -q '\"is_admin\":true'"
|
||||
"curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
|
||||
)
|
||||
|
||||
withoutSudo.wait_for_unit("miniflux.service")
|
||||
withoutSudo.wait_for_open_port(${toString defaultPort})
|
||||
withoutSudo.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep -q OK")
|
||||
withoutSudo.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
|
||||
withoutSudo.succeed(
|
||||
"curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep -q '\"is_admin\":true'"
|
||||
"curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
|
||||
)
|
||||
|
||||
customized.wait_for_unit("miniflux.service")
|
||||
customized.wait_for_open_port(${toString port})
|
||||
customized.succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep -q OK")
|
||||
customized.succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep OK")
|
||||
customized.succeed(
|
||||
"curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep -q '\"is_admin\":true'"
|
||||
"curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep '\"is_admin\":true'"
|
||||
)
|
||||
'';
|
||||
})
|
||||
|
|
|
@ -8,7 +8,7 @@ import ./../make-test-python.nix ({ pkgs, lib, ... }:
|
|||
{ pkgs, ... }:
|
||||
{
|
||||
services.mysql.enable = true;
|
||||
services.mysql.package = pkgs.mysql;
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.mysql.initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
|
||||
|
||||
services.automysqlbackup.enable = true;
|
||||
|
|
|
@ -10,7 +10,7 @@ import ./../make-test-python.nix ({ pkgs, ... } : {
|
|||
services.mysql = {
|
||||
enable = true;
|
||||
initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
|
||||
package = pkgs.mysql;
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
|
||||
services.mysqlBackup = {
|
||||
|
|
|
@ -17,7 +17,7 @@ in
|
|||
|
||||
{
|
||||
services.mysql.enable = true;
|
||||
services.mysql.package = pkgs.mysql;
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.mysql.replication.role = "master";
|
||||
services.mysql.replication.slaveHost = "%";
|
||||
services.mysql.replication.masterUser = replicateUser;
|
||||
|
@ -31,7 +31,7 @@ in
|
|||
|
||||
{
|
||||
services.mysql.enable = true;
|
||||
services.mysql.package = pkgs.mysql;
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.mysql.replication.role = "slave";
|
||||
services.mysql.replication.serverId = 2;
|
||||
services.mysql.replication.masterHost = nodes.master.config.networking.hostName;
|
||||
|
@ -44,7 +44,7 @@ in
|
|||
|
||||
{
|
||||
services.mysql.enable = true;
|
||||
services.mysql.package = pkgs.mysql;
|
||||
services.mysql.package = pkgs.mariadb;
|
||||
services.mysql.replication.role = "slave";
|
||||
services.mysql.replication.serverId = 3;
|
||||
services.mysql.replication.masterHost = nodes.master.config.networking.hostName;
|
||||
|
|
|
@ -29,5 +29,5 @@ builtins.listToAttrs (
|
|||
};
|
||||
}
|
||||
)
|
||||
[ "nginxStable" "nginxUnstable" "nginxShibboleth" "openresty" "tengine" ]
|
||||
[ "nginxStable" "nginxMainline" "nginxShibboleth" "openresty" "tengine" ]
|
||||
)
|
||||
|
|
|
@ -56,11 +56,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
|||
};
|
||||
|
||||
specialisation.reloadRestartSystem.configuration = {
|
||||
services.nginx.package = pkgs.nginxUnstable;
|
||||
services.nginx.package = pkgs.nginxMainline;
|
||||
};
|
||||
|
||||
specialisation.reloadWithErrorsSystem.configuration = {
|
||||
services.nginx.package = pkgs.nginxUnstable;
|
||||
services.nginx.package = pkgs.nginxMainline;
|
||||
services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -88,15 +88,15 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
|||
|
||||
with subtest("no authentication required"):
|
||||
pomerium.succeed(
|
||||
"curl --resolve my.website:80:127.0.0.1 http://my.website | grep -q 'hello world'"
|
||||
"curl --resolve my.website:80:127.0.0.1 http://my.website | grep 'hello world'"
|
||||
)
|
||||
|
||||
with subtest("login required"):
|
||||
pomerium.succeed(
|
||||
"curl -I --resolve login.required:80:127.0.0.1 http://login.required | grep -q pom-auth"
|
||||
"curl -I --resolve login.required:80:127.0.0.1 http://login.required | grep pom-auth"
|
||||
)
|
||||
pomerium.succeed(
|
||||
"curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep -q 'hello I am login page'"
|
||||
"curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep 'hello I am login page'"
|
||||
)
|
||||
'';
|
||||
})
|
||||
|
|
|
@ -71,7 +71,7 @@ let
|
|||
wait_for_open_port(3551)
|
||||
wait_for_unit("prometheus-apcupsd-exporter.service")
|
||||
wait_for_open_port(9162)
|
||||
succeed("curl -sSf http://localhost:9162/metrics | grep -q 'apcupsd_info'")
|
||||
succeed("curl -sSf http://localhost:9162/metrics | grep 'apcupsd_info'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -85,7 +85,7 @@ let
|
|||
wait_for_unit("prometheus-artifactory-exporter.service")
|
||||
wait_for_open_port(9531)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9531/metrics | grep -q 'artifactory_up'"
|
||||
"curl -sSf http://localhost:9531/metrics | grep 'artifactory_up'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -106,7 +106,7 @@ let
|
|||
wait_for_unit("prometheus-bind-exporter.service")
|
||||
wait_for_open_port(9119)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9119/metrics | grep -q 'bind_query_recursions_total 0'"
|
||||
"curl -sSf http://localhost:9119/metrics | grep 'bind_query_recursions_total 0'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -135,7 +135,7 @@ let
|
|||
wait_for_unit("prometheus-bird-exporter.service")
|
||||
wait_for_open_port(9324)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf http://localhost:9324/metrics | grep -q 'MyObviousTestString'"
|
||||
"curl -sSf http://localhost:9324/metrics | grep 'MyObviousTestString'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -154,7 +154,7 @@ let
|
|||
wait_for_unit("prometheus-bitcoin-exporter.service")
|
||||
wait_for_unit("bitcoind-default.service")
|
||||
wait_for_open_port(9332)
|
||||
succeed("curl -sSf http://localhost:9332/metrics | grep -q '^bitcoin_blocks '")
|
||||
succeed("curl -sSf http://localhost:9332/metrics | grep '^bitcoin_blocks '")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -172,7 +172,7 @@ let
|
|||
wait_for_unit("prometheus-blackbox-exporter.service")
|
||||
wait_for_open_port(9115)
|
||||
succeed(
|
||||
"curl -sSf 'http://localhost:9115/probe?target=localhost&module=icmp_v6' | grep -q 'probe_success 1'"
|
||||
"curl -sSf 'http://localhost:9115/probe?target=localhost&module=icmp_v6' | grep 'probe_success 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -204,7 +204,7 @@ let
|
|||
"curl -sSfH 'Content-Type: application/json' -X POST --data @/tmp/data.json localhost:9103/collectd"
|
||||
)
|
||||
succeed(
|
||||
"curl -sSf localhost:9103/metrics | grep -q 'collectd_testplugin_gauge{instance=\"testhost\"} 23'"
|
||||
"curl -sSf localhost:9103/metrics | grep 'collectd_testplugin_gauge{instance=\"testhost\"} 23'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -220,7 +220,7 @@ let
|
|||
exporterTest = ''
|
||||
wait_for_unit("prometheus-dnsmasq-exporter.service")
|
||||
wait_for_open_port(9153)
|
||||
succeed("curl -sSf http://localhost:9153/metrics | grep -q 'dnsmasq_leases 0'")
|
||||
succeed("curl -sSf http://localhost:9153/metrics | grep 'dnsmasq_leases 0'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -235,7 +235,7 @@ let
|
|||
wait_for_unit("prometheus-domain-exporter.service")
|
||||
wait_for_open_port(9222)
|
||||
succeed(
|
||||
"curl -sSf 'http://localhost:9222/probe?target=nixos.org' | grep -q 'domain_probe_success 0'"
|
||||
"curl -sSf 'http://localhost:9222/probe?target=nixos.org' | grep 'domain_probe_success 0'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -254,7 +254,7 @@ let
|
|||
wait_for_unit("prometheus-dovecot-exporter.service")
|
||||
wait_for_open_port(9166)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9166/metrics | grep -q 'dovecot_up{scope=\"global\"} 1'"
|
||||
"curl -sSf http://localhost:9166/metrics | grep 'dovecot_up{scope=\"global\"} 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -268,7 +268,7 @@ let
|
|||
wait_for_unit("prometheus-fritzbox-exporter.service")
|
||||
wait_for_open_port(9133)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9133/metrics | grep -q 'fritzbox_exporter_collect_errors 0'"
|
||||
"curl -sSf http://localhost:9133/metrics | grep 'fritzbox_exporter_collect_errors 0'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -290,9 +290,9 @@ let
|
|||
wait_for_unit("prometheus-jitsi-exporter.service")
|
||||
wait_for_open_port(9700)
|
||||
wait_until_succeeds(
|
||||
'journalctl -eu prometheus-jitsi-exporter.service -o cat | grep -q "key=participants"'
|
||||
'journalctl -eu prometheus-jitsi-exporter.service -o cat | grep "key=participants"'
|
||||
)
|
||||
succeed("curl -sSf 'localhost:9700/metrics' | grep -q 'jitsi_participants 0'")
|
||||
succeed("curl -sSf 'localhost:9700/metrics' | grep 'jitsi_participants 0'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -321,7 +321,7 @@ let
|
|||
wait_for_unit("prometheus-json-exporter.service")
|
||||
wait_for_open_port(7979)
|
||||
succeed(
|
||||
"curl -sSf 'localhost:7979/probe?target=http://localhost' | grep -q 'json_test_metric 1'"
|
||||
"curl -sSf 'localhost:7979/probe?target=http://localhost' | grep 'json_test_metric 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -426,7 +426,7 @@ let
|
|||
wait_for_unit("knot.service")
|
||||
wait_for_unit("prometheus-knot-exporter.service")
|
||||
wait_for_open_port(9433)
|
||||
succeed("curl -sSf 'localhost:9433' | grep -q 'knot_server_zone_count 1.0'")
|
||||
succeed("curl -sSf 'localhost:9433' | grep 'knot_server_zone_count 1.0'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -441,10 +441,10 @@ let
|
|||
wait_for_unit("prometheus-keylight-exporter.service")
|
||||
wait_for_open_port(9288)
|
||||
succeed(
|
||||
"curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics | grep -q '400'"
|
||||
"curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics | grep '400'"
|
||||
)
|
||||
succeed(
|
||||
"curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics?target=nosuchdevice | grep -q '500'"
|
||||
"curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics?target=nosuchdevice | grep '500'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -489,7 +489,7 @@ let
|
|||
wait_for_open_port(10009)
|
||||
wait_for_unit("prometheus-lnd-exporter.service")
|
||||
wait_for_open_port(9092)
|
||||
succeed("curl -sSf localhost:9092/metrics | grep -q '^promhttp_metric_handler'")
|
||||
succeed("curl -sSf localhost:9092/metrics | grep '^promhttp_metric_handler'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -531,7 +531,7 @@ let
|
|||
wait_for_unit("prometheus-mail-exporter.service")
|
||||
wait_for_open_port(9225)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf http://localhost:9225/metrics | grep -q 'mail_deliver_success{configname=\"testserver\"} 1'"
|
||||
"curl -sSf http://localhost:9225/metrics | grep 'mail_deliver_success{configname=\"testserver\"} 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -571,7 +571,7 @@ let
|
|||
wait_for_unit("prometheus-mikrotik-exporter.service")
|
||||
wait_for_open_port(9436)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9436/metrics | grep -q 'mikrotik_scrape_collector_success{device=\"router\"} 0'"
|
||||
"curl -sSf http://localhost:9436/metrics | grep 'mikrotik_scrape_collector_success{device=\"router\"} 0'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -596,7 +596,7 @@ let
|
|||
wait_for_unit("prometheus-modemmanager-exporter.service")
|
||||
wait_for_open_port(9539)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9539/metrics | grep -q 'modemmanager_info'"
|
||||
"curl -sSf http://localhost:9539/metrics | grep 'modemmanager_info'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -634,7 +634,7 @@ let
|
|||
wait_for_unit("nginx.service")
|
||||
wait_for_unit("prometheus-nextcloud-exporter.service")
|
||||
wait_for_open_port(9205)
|
||||
succeed("curl -sSf http://localhost:9205/metrics | grep -q 'nextcloud_up 1'")
|
||||
succeed("curl -sSf http://localhost:9205/metrics | grep 'nextcloud_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -653,7 +653,7 @@ let
|
|||
wait_for_unit("nginx.service")
|
||||
wait_for_unit("prometheus-nginx-exporter.service")
|
||||
wait_for_open_port(9113)
|
||||
succeed("curl -sSf http://localhost:9113/metrics | grep -q 'nginx_up 1'")
|
||||
succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -708,12 +708,12 @@ let
|
|||
succeed("curl http://localhost")
|
||||
execute("sleep 1")
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9117/metrics | grep 'filelogger_http_response_count_total' | grep -q 1"
|
||||
"curl -sSf http://localhost:9117/metrics | grep 'filelogger_http_response_count_total' | grep 1"
|
||||
)
|
||||
succeed("curl http://localhost:81")
|
||||
execute("sleep 1")
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9117/metrics | grep 'syslogger_http_response_count_total' | grep -q 1"
|
||||
"curl -sSf http://localhost:9117/metrics | grep 'syslogger_http_response_count_total' | grep 1"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -726,7 +726,7 @@ let
|
|||
wait_for_unit("prometheus-node-exporter.service")
|
||||
wait_for_open_port(9100)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9100/metrics | grep -q 'node_exporter_build_info{.\\+} 1'"
|
||||
"curl -sSf http://localhost:9100/metrics | grep 'node_exporter_build_info{.\\+} 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -786,7 +786,7 @@ let
|
|||
wait_for_open_port(389)
|
||||
wait_for_open_port(9330)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf http://localhost:9330/metrics | grep -q 'openldap_scrape{result=\"ok\"} 1'"
|
||||
"curl -sSf http://localhost:9330/metrics | grep 'openldap_scrape{result=\"ok\"} 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -812,7 +812,7 @@ let
|
|||
exporterTest = ''
|
||||
wait_for_unit("openvpn-test.service")
|
||||
wait_for_unit("prometheus-openvpn-exporter.service")
|
||||
succeed("curl -sSf http://localhost:9176/metrics | grep -q 'openvpn_up{.*} 1'")
|
||||
succeed("curl -sSf http://localhost:9176/metrics | grep 'openvpn_up{.*} 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -828,9 +828,9 @@ let
|
|||
wait_for_file("/var/lib/postfix/queue/public/showq")
|
||||
wait_for_open_port(9154)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9154/metrics | grep -q 'postfix_smtpd_connects_total 0'"
|
||||
"curl -sSf http://localhost:9154/metrics | grep 'postfix_smtpd_connects_total 0'"
|
||||
)
|
||||
succeed("curl -sSf http://localhost:9154/metrics | grep -q 'postfix_up{.*} 1'")
|
||||
succeed("curl -sSf http://localhost:9154/metrics | grep 'postfix_up{.*} 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -847,20 +847,39 @@ let
|
|||
wait_for_open_port(9187)
|
||||
wait_for_unit("postgresql.service")
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9187/metrics | grep -q 'pg_exporter_last_scrape_error 0'"
|
||||
"curl -sSf http://localhost:9187/metrics | grep 'pg_exporter_last_scrape_error 0'"
|
||||
)
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 1'")
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 1'")
|
||||
systemctl("stop postgresql.service")
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9187/metrics | grep -qv 'pg_exporter_last_scrape_error 0'"
|
||||
"curl -sSf http://localhost:9187/metrics | grep -v 'pg_exporter_last_scrape_error 0'"
|
||||
)
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 0'")
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 0'")
|
||||
systemctl("start postgresql.service")
|
||||
wait_for_unit("postgresql.service")
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9187/metrics | grep -q 'pg_exporter_last_scrape_error 0'"
|
||||
"curl -sSf http://localhost:9187/metrics | grep 'pg_exporter_last_scrape_error 0'"
|
||||
)
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
process = {
|
||||
exporterConfig = {
|
||||
enable = true;
|
||||
settings.process_names = [
|
||||
# Remove nix store path from process name
|
||||
{ name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
|
||||
];
|
||||
};
|
||||
exporterTest = ''
|
||||
wait_for_unit("prometheus-process-exporter.service")
|
||||
wait_for_open_port(9256)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf localhost:9256/metrics | grep -q '{}'".format(
|
||||
'namedprocess_namegroup_cpu_seconds_total{groupname="process-exporter '
|
||||
)
|
||||
)
|
||||
succeed("curl -sSf http://localhost:9187/metrics | grep -q 'pg_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -874,7 +893,7 @@ let
|
|||
wait_for_unit("prometheus-py-air-control-exporter.service")
|
||||
wait_for_open_port(9896)
|
||||
succeed(
|
||||
"curl -sSf http://localhost:9896/metrics | grep -q 'py_air_control_sampling_error_total'"
|
||||
"curl -sSf http://localhost:9896/metrics | grep 'py_air_control_sampling_error_total'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -889,7 +908,7 @@ let
|
|||
wait_for_unit("prometheus-redis-exporter.service")
|
||||
wait_for_open_port(6379)
|
||||
wait_for_open_port(9121)
|
||||
wait_until_succeeds("curl -sSf localhost:9121/metrics | grep -q 'redis_up 1'")
|
||||
wait_until_succeeds("curl -sSf localhost:9121/metrics | grep 'redis_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -907,7 +926,7 @@ let
|
|||
wait_for_open_port(11334)
|
||||
wait_for_open_port(7980)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf 'localhost:7980/probe?target=http://localhost:11334/stat' | grep -q 'rspamd_scanned{host=\"rspamd\"} 0'"
|
||||
"curl -sSf 'localhost:7980/probe?target=http://localhost:11334/stat' | grep 'rspamd_scanned{host=\"rspamd\"} 0'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -938,7 +957,7 @@ let
|
|||
wait_for_unit("prometheus-rtl_433-exporter.service")
|
||||
wait_for_open_port(9550)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf localhost:9550/metrics | grep -q '{}'".format(
|
||||
"curl -sSf localhost:9550/metrics | grep '{}'".format(
|
||||
'rtl_433_temperature_celsius{channel="3",id="55",location="",model="zopieux"} 18'
|
||||
)
|
||||
)
|
||||
|
@ -954,12 +973,12 @@ let
|
|||
wait_for_unit("prometheus-smokeping-exporter.service")
|
||||
wait_for_open_port(9374)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf localhost:9374/metrics | grep '{}' | grep -qv ' 0$'".format(
|
||||
"curl -sSf localhost:9374/metrics | grep '{}' | grep -v ' 0$'".format(
|
||||
'smokeping_requests_total{host="127.0.0.1",ip="127.0.0.1"} '
|
||||
)
|
||||
)
|
||||
wait_until_succeeds(
|
||||
"curl -sSf localhost:9374/metrics | grep -q '{}'".format(
|
||||
"curl -sSf localhost:9374/metrics | grep '{}'".format(
|
||||
'smokeping_response_ttl{host="127.0.0.1",ip="127.0.0.1"}'
|
||||
)
|
||||
)
|
||||
|
@ -977,7 +996,7 @@ let
|
|||
exporterTest = ''
|
||||
wait_for_unit("prometheus-snmp-exporter.service")
|
||||
wait_for_open_port(9116)
|
||||
succeed("curl -sSf localhost:9116/metrics | grep -q 'snmp_request_errors_total 0'")
|
||||
succeed("curl -sSf localhost:9116/metrics | grep 'snmp_request_errors_total 0'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -1021,7 +1040,7 @@ let
|
|||
exporterTest = ''
|
||||
wait_for_unit("prometheus-sql-exporter.service")
|
||||
wait_for_open_port(9237)
|
||||
succeed("curl http://localhost:9237/metrics | grep -c 'sql_points{' | grep -q 2")
|
||||
succeed("curl http://localhost:9237/metrics | grep -c 'sql_points{' | grep 2")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -1044,7 +1063,7 @@ let
|
|||
wait_for_open_port(80)
|
||||
wait_for_unit("prometheus-surfboard-exporter.service")
|
||||
wait_for_open_port(9239)
|
||||
succeed("curl -sSf localhost:9239/metrics | grep -q 'surfboard_up 1'")
|
||||
succeed("curl -sSf localhost:9239/metrics | grep 'surfboard_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -1057,7 +1076,7 @@ let
|
|||
wait_for_unit("prometheus-systemd-exporter.service")
|
||||
wait_for_open_port(9558)
|
||||
succeed(
|
||||
"curl -sSf localhost:9558/metrics | grep -q '{}'".format(
|
||||
"curl -sSf localhost:9558/metrics | grep '{}'".format(
|
||||
'systemd_unit_state{name="basic.target",state="active",type="target"} 1'
|
||||
)
|
||||
)
|
||||
|
@ -1079,7 +1098,7 @@ let
|
|||
wait_for_open_port(9051)
|
||||
wait_for_unit("prometheus-tor-exporter.service")
|
||||
wait_for_open_port(9130)
|
||||
succeed("curl -sSf localhost:9130/metrics | grep -q 'tor_version{.\\+} 1'")
|
||||
succeed("curl -sSf localhost:9130/metrics | grep 'tor_version{.\\+} 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -1091,7 +1110,7 @@ let
|
|||
wait_for_unit("prometheus-unifi-poller-exporter.service")
|
||||
wait_for_open_port(9130)
|
||||
succeed(
|
||||
"curl -sSf localhost:9130/metrics | grep -q 'unifipoller_build_info{.\\+} 1'"
|
||||
"curl -sSf localhost:9130/metrics | grep 'unifipoller_build_info{.\\+} 1'"
|
||||
)
|
||||
'';
|
||||
};
|
||||
|
@ -1115,7 +1134,7 @@ let
|
|||
wait_for_unit("unbound.service")
|
||||
wait_for_unit("prometheus-unbound-exporter.service")
|
||||
wait_for_open_port(9167)
|
||||
succeed("curl -sSf localhost:9167/metrics | grep -q 'unbound_up 1'")
|
||||
succeed("curl -sSf localhost:9167/metrics | grep 'unbound_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -1144,7 +1163,7 @@ let
|
|||
wait_for_unit("prometheus-varnish-exporter.service")
|
||||
wait_for_open_port(6081)
|
||||
wait_for_open_port(9131)
|
||||
succeed("curl -sSf http://localhost:9131/metrics | grep -q 'varnish_up 1'")
|
||||
succeed("curl -sSf http://localhost:9131/metrics | grep 'varnish_up 1'")
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import ./make-test-python.nix {
|
||||
name = "opensmtpd";
|
||||
name = "rss2email";
|
||||
|
||||
nodes = {
|
||||
server = { pkgs, ... }: {
|
||||
|
|
|
@ -28,7 +28,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...}:
|
|||
machine.wait_for_unit("shiori.service")
|
||||
machine.wait_for_open_port(8080)
|
||||
machine.succeed("curl --fail http://localhost:8080/")
|
||||
machine.succeed("curl --fail --location http://localhost:8080/ | grep -qi shiori")
|
||||
machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori")
|
||||
|
||||
with subtest("login"):
|
||||
auth_json = machine.succeed(
|
||||
|
|
|
@ -10,7 +10,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
|||
|
||||
services.mysql = {
|
||||
enable = true;
|
||||
package = pkgs.mysql;
|
||||
package = pkgs.mariadb;
|
||||
ensureDatabases = [ "sogo" ];
|
||||
ensureUsers = [{
|
||||
name = "sogo";
|
||||
|
|
29
nixos/tests/sourcehut.nix
Normal file
29
nixos/tests/sourcehut.nix
Normal file
|
@ -0,0 +1,29 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
|
||||
{
|
||||
name = "sourcehut";
|
||||
|
||||
meta.maintainers = [ pkgs.lib.maintainers.tomberek ];
|
||||
|
||||
machine = { config, pkgs, ... }: {
|
||||
virtualisation.memorySize = 2048;
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
|
||||
services.sourcehut = {
|
||||
enable = true;
|
||||
services = [ "meta" ];
|
||||
originBase = "sourcehut";
|
||||
settings."sr.ht".service-key = "8888888888888888888888888888888888888888888888888888888888888888";
|
||||
settings."sr.ht".network-key = "0000000000000000000000000000000000000000000=";
|
||||
settings.webhooks.private-key = "0000000000000000000000000000000000000000000=";
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_unit("metasrht.service")
|
||||
machine.wait_for_open_port(5000)
|
||||
machine.succeed("curl -sL http://localhost:5000 | grep meta.sourcehut")
|
||||
'';
|
||||
})
|
|
@ -119,7 +119,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
|
|||
|
||||
with subtest("Setup"):
|
||||
result = machine.succeed(
|
||||
"set -o pipefail; curl -sSf localhost:3000/finalize -X POST -d "
|
||||
"curl -sSf localhost:3000/finalize -X POST -d "
|
||||
+ "@${payloads.finalize} -H 'Content-Type: application/json' "
|
||||
+ "| jq .ok | xargs echo"
|
||||
)
|
||||
|
@ -132,7 +132,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
|
|||
|
||||
with subtest("Base functionality"):
|
||||
auth = machine.succeed(
|
||||
"set -o pipefail; curl -sSf localhost:3000/graphql -X POST "
|
||||
"curl -sSf localhost:3000/graphql -X POST "
|
||||
+ "-d @${payloads.login} -H 'Content-Type: application/json' "
|
||||
+ "| jq '.[0].data.authentication.login.jwt' | xargs echo"
|
||||
).strip()
|
||||
|
@ -140,7 +140,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
|
|||
assert auth
|
||||
|
||||
create = machine.succeed(
|
||||
"set -o pipefail; curl -sSf localhost:3000/graphql -X POST "
|
||||
"curl -sSf localhost:3000/graphql -X POST "
|
||||
+ "-d @${payloads.content} -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: Bearer {auth}' "
|
||||
+ "| jq '.[0].data.pages.create.responseResult.succeeded'|xargs echo"
|
||||
|
|
|
@ -44,7 +44,7 @@ import ./make-test-python.nix (
|
|||
xandikos_default.wait_for_open_port(8080)
|
||||
xandikos_default.succeed("curl --fail http://localhost:8080/")
|
||||
xandikos_default.succeed(
|
||||
"curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
|
||||
"curl -s --fail --location http://localhost:8080/ | grep -i Xandikos"
|
||||
)
|
||||
xandikos_client.wait_for_unit("network.target")
|
||||
xandikos_client.fail("curl --fail http://xandikos_default:8080/")
|
||||
|
@ -55,15 +55,15 @@ import ./make-test-python.nix (
|
|||
xandikos_proxy.wait_for_open_port(8080)
|
||||
xandikos_proxy.succeed("curl --fail http://localhost:8080/")
|
||||
xandikos_proxy.succeed(
|
||||
"curl -s --fail --location http://localhost:8080/ | grep -qi Xandikos"
|
||||
"curl -s --fail --location http://localhost:8080/ | grep -i Xandikos"
|
||||
)
|
||||
xandikos_client.wait_for_unit("network.target")
|
||||
xandikos_client.fail("curl --fail http://xandikos_proxy:8080/")
|
||||
xandikos_client.succeed(
|
||||
"curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -qi Xandikos"
|
||||
"curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -i Xandikos"
|
||||
)
|
||||
xandikos_client.succeed(
|
||||
"curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -qi Xandikos"
|
||||
"curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -i Xandikos"
|
||||
)
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
{ lib, stdenv, fetchFromGitHub, libX11, cairo, lv2, pkg-config, libsndfile }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "BJumblr";
|
||||
pname = "bjumblr";
|
||||
version = "1.6.6";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sjaehn";
|
||||
repo = pname;
|
||||
repo = "BJumblr";
|
||||
rev = version;
|
||||
sha256 = "1nbxi54023vck3qgmr385cjzinmdnvz62ywb6bcksmc3shl080mg";
|
||||
};
|
||||
|
|
28
pkgs/applications/audio/boops/default.nix
Normal file
28
pkgs/applications/audio/boops/default.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{ stdenv, lib, fetchFromGitHub, xorg, cairo, lv2, libsndfile, pkg-config }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "boops";
|
||||
version = "1.6.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sjaehn";
|
||||
repo = "BOops";
|
||||
rev = version;
|
||||
sha256 = "sha256-7eNvt8PxIZCp83Y5XX5fBolBon4j+HPtu8wrgG8Miok=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [
|
||||
xorg.libX11 cairo lv2 libsndfile
|
||||
];
|
||||
|
||||
installFlags = [ "PREFIX=$(out)" ];
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://github.com/sjaehn/BOops";
|
||||
description = "Sound glitch effect sequencer LV2 plugin";
|
||||
maintainers = [ maintainers.magnetophon ];
|
||||
platforms = platforms.linux;
|
||||
license = licenses.gpl3Plus;
|
||||
};
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{ lib, stdenv, fetchFromGitHub, xorg, cairo, lv2, pkg-config }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "BSEQuencer";
|
||||
pname = "bsequencer";
|
||||
version = "1.8.8";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sjaehn";
|
||||
repo = pname;
|
||||
repo = "BSEQuencer";
|
||||
rev = version;
|
||||
sha256 = "sha256-OArIMf0XP9CKDdb3H4s8jMzVRjoLFQDPmTS9rS2KW3w=";
|
||||
};
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
{ lib, stdenv, fetchFromGitHub, xorg, cairo, lv2, pkg-config }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "BShapr";
|
||||
pname = "bshapr";
|
||||
version = "0.12";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sjaehn";
|
||||
repo = pname;
|
||||
repo = "BShapr";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-2DySlD5ZTxeQ2U++Dr67bek5oVbAiOHCxM6S5rTTZN0=";
|
||||
};
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
{ lib, stdenv, fetchFromGitHub, xorg, cairo, lv2, pkg-config }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "BSlizr";
|
||||
pname = "bslizr";
|
||||
version = "1.2.14";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "sjaehn";
|
||||
repo = pname;
|
||||
repo = "BSlizr";
|
||||
rev = version;
|
||||
sha256 = "sha256-dut3I68tJWQH+X6acKROqb5HywufeBQ4/HkXFKsA3hY=";
|
||||
};
|
||||
|
|
76
pkgs/applications/audio/diopser/default.nix
Normal file
76
pkgs/applications/audio/diopser/default.nix
Normal file
|
@ -0,0 +1,76 @@
|
|||
{ lib, stdenv, fetchFromGitHub, cmake, pkg-config
|
||||
, libjack2, alsaLib, freetype, libX11, libXrandr, libXinerama, libXext, libXcursor
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
# Derived from subprojects/function2.wrap
|
||||
function2 = rec {
|
||||
version = "4.1.0";
|
||||
src = fetchFromGitHub {
|
||||
owner = "Naios";
|
||||
repo = "function2";
|
||||
rev = version;
|
||||
hash = "sha256-JceZU8ZvtYhFheh8BjMvjjZty4hcYxHEK+IIo5X4eSk=";
|
||||
};
|
||||
};
|
||||
|
||||
juce = rec {
|
||||
version = "unstable-2021-04-07";
|
||||
src = fetchFromGitHub {
|
||||
owner = "juce-framework";
|
||||
repo = "JUCE";
|
||||
rev = "1a5fb5992a1a4e28e998708ed8dce2cc864a30d7";
|
||||
sha256= "1ri7w4sz3sy5xilibg53ls9526fx7jwbv8rc54ccrqfhxqyin308";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "diopser";
|
||||
version = "unstable-2021-5-13";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "robbert-vdh";
|
||||
repo = pname;
|
||||
fetchSubmodules = true;
|
||||
rev = "d5fdc92f1caf5a828e071dac99e106e58f06d84d";
|
||||
sha256 = "06y1h895yxh44gp4vxzrna59lf7nlfw7aacd3kk4l1g56jhy9pdx";
|
||||
};
|
||||
|
||||
postUnpack = ''
|
||||
(
|
||||
cd "$sourceRoot"
|
||||
cp -R --no-preserve=mode,ownership ${function2.src} function2
|
||||
cp -R --no-preserve=mode,ownership ${juce.src} JUCE
|
||||
sed -i 's@CPMAddPackage("gh:juce-framework/JUCE.*@add_subdirectory(JUCE)@g' CMakeLists.txt
|
||||
sed -i 's@CPMAddPackage("gh:Naios/function2.*@add_subdirectory(function2)@g' CMakeLists.txt
|
||||
patchShebangs .
|
||||
)
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/lib/vst3
|
||||
cp -r Diopser_artefacts/Release/VST3/Diopser.vst3 $out/lib/vst3
|
||||
'';
|
||||
|
||||
nativeBuildInputs = [ cmake pkg-config ];
|
||||
|
||||
buildInputs = [
|
||||
libjack2 alsaLib freetype libX11 libXrandr libXinerama libXext
|
||||
libXcursor
|
||||
];
|
||||
|
||||
cmakeFlags = [
|
||||
"-DCMAKE_AR=${stdenv.cc.cc}/bin/gcc-ar"
|
||||
"-DCMAKE_RANLIB=${stdenv.cc.cc}/bin/gcc-ranlib"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "A totally original phase rotation plugin";
|
||||
homepage = "https://github.com/robbert-vdh/diopser";
|
||||
license = licenses.gpl3Plus;
|
||||
maintainers = with maintainers; [ magnetophon ];
|
||||
platforms = platforms.all;
|
||||
};
|
||||
}
|
|
@ -20,13 +20,13 @@ with lib.strings;
|
|||
|
||||
let
|
||||
|
||||
version = "unstable-2020-08-27";
|
||||
version = "2.30.5";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "grame-cncm";
|
||||
repo = "faust";
|
||||
rev = "c10f316fa90f338e248787ebf55e3795c3a0d70e";
|
||||
sha256 = "068pm04ddafbsj2r8akdpqyzb0m8mp9ql0rgi83hcqs4ndr8v7sb";
|
||||
rev = version;
|
||||
sha256 = "0cs52w4rwaj5d8pjak4cxsg02sxvx4y07592nc3ck81clqjmszmm";
|
||||
fetchSubmodules = true;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,22 +1,24 @@
|
|||
{ lib, stdenv, fetchFromGitHub
|
||||
, llvm, qt48Full, qrencode, libmicrohttpd_0_9_70, libjack2, alsaLib, faust, curl
|
||||
, bc, coreutils, which, libsndfile, pkg-config
|
||||
, llvm_10, qt5, qrencode, libmicrohttpd, libjack2, alsaLib, faust, curl
|
||||
, bc, coreutils, which, libsndfile, pkg-config, libxcb
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "faustlive";
|
||||
version = "unstable-dev-2020-08-03";
|
||||
version = "2.5.5";
|
||||
src = fetchFromGitHub {
|
||||
owner = "grame-cncm";
|
||||
repo = "faustlive";
|
||||
rev = "c16565dc1b616ac0aad7c303c1997fa9e57177ab";
|
||||
sha256 = "1ys661lp1xwz21vy12kwkg248jvjq1z9w433knkh0ldyy2igvmd5";
|
||||
rev = version;
|
||||
sha256 = "0qbn05nq170ckycwalkk5fppklc4g457mapr7p7ryrhc1hwzffm9";
|
||||
fetchSubmodules = true;
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config qt5.wrapQtAppsHook ];
|
||||
|
||||
buildInputs = [
|
||||
llvm qt48Full qrencode libmicrohttpd_0_9_70 libjack2 alsaLib faust curl
|
||||
bc coreutils which libsndfile pkg-config
|
||||
llvm_10 qt5.qtbase qrencode libmicrohttpd libjack2 alsaLib faust curl
|
||||
bc coreutils which libsndfile libxcb
|
||||
];
|
||||
|
||||
makeFlags = [ "PREFIX=$(out)" ];
|
||||
|
@ -39,5 +41,6 @@ stdenv.mkDerivation rec {
|
|||
'';
|
||||
homepage = "https://faust.grame.fr/";
|
||||
license = licenses.gpl3;
|
||||
maintainers = with maintainers; [ magnetophon ];
|
||||
};
|
||||
}
|
||||
|
|
39
pkgs/applications/audio/faustPhysicalModeling/default.nix
Normal file
39
pkgs/applications/audio/faustPhysicalModeling/default.nix
Normal file
|
@ -0,0 +1,39 @@
|
|||
{ stdenv, lib, fetchFromGitHub, faust2jaqt, faust2lv2 }:
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "faustPhysicalModeling";
|
||||
version = "2.20.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "grame-cncm";
|
||||
repo = "faust";
|
||||
rev = version;
|
||||
sha256 = "1mm93ba26b7q69hvabzalg30dh8pl858nj4m2bb57pznnp09lq9a";
|
||||
};
|
||||
|
||||
buildInputs = [ faust2jaqt faust2lv2 ];
|
||||
|
||||
buildPhase = ''
|
||||
cd examples/physicalModeling
|
||||
|
||||
for f in *MIDI.dsp; do
|
||||
faust2jaqt -time -vec -double -midi -nvoices 16 -t 99999 $f
|
||||
faust2lv2 -time -vec -double -gui -nvoices 16 -t 99999 $f
|
||||
done
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/lib/lv2 $out/bin
|
||||
mv *.lv2/ $out/lib/lv2
|
||||
for f in $(find . -executable -type f); do
|
||||
cp $f $out/bin/
|
||||
done
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "The physical models included with faust compiled as jack standalone and lv2 instruments";
|
||||
homepage = "https://github.com/grame-cncm/faust/tree/master-dev/examples/physicalModeling";
|
||||
license = licenses.mit;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ magnetophon ];
|
||||
};
|
||||
}
|
39
pkgs/applications/audio/faustStk/default.nix
Normal file
39
pkgs/applications/audio/faustStk/default.nix
Normal file
|
@ -0,0 +1,39 @@
|
|||
{ stdenv, lib, fetchFromGitHub, faust2jaqt, faust2lv2 }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "faustPhhysicalModeling";
|
||||
version = "2.20.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "grame-cncm";
|
||||
repo = "faust";
|
||||
rev = version;
|
||||
sha256 = "1mm93ba26b7q69hvabzalg30dh8pl858nj4m2bb57pznnp09lq9a";
|
||||
};
|
||||
|
||||
buildInputs = [ faust2jaqt faust2lv2 ];
|
||||
|
||||
buildPhase = ''
|
||||
cd examples/physicalModeling/faust-stk
|
||||
|
||||
for f in *.dsp; do
|
||||
faust2jaqt -time -vec -midi -nvoices 8 -t 99999 $f
|
||||
faust2lv2 -time -vec -double -gui -nvoices 32 -t 99999 $f
|
||||
done
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/lib/lv2 $out/bin
|
||||
mv *.lv2/ $out/lib/lv2
|
||||
for f in $(find . -executable -type f); do
|
||||
cp $f $out/bin/
|
||||
done
|
||||
'';
|
||||
meta = with lib; {
|
||||
description = "The physical modeling instruments included with faust, compiled as jack standalone and lv2 instruments";
|
||||
homepage = "https://ccrma.stanford.edu/~rmichon/faustSTK/";
|
||||
license = licenses.stk;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ magnetophon ];
|
||||
};
|
||||
}
|
|
@ -12,13 +12,13 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "gwc";
|
||||
version = "0.22-04";
|
||||
version = "0.22-05";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "AlisterH";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "0xvfra32dchnnyf9kj5s5xmqhln8jdrc9f0040hjr2dsb58y206p";
|
||||
sha256 = "sha256-FHKu5qAyRyMxXdWYTCeAc6Q4J+NOaU1SGgoTbe0PiFE=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
{ lib
|
||||
, fetchFromGitLab
|
||||
, makeDesktopItem
|
||||
, copyDesktopItems
|
||||
, rustPlatform
|
||||
, pkg-config
|
||||
, clang
|
||||
|
@ -23,11 +25,19 @@ rustPlatform.buildRustPackage rec {
|
|||
|
||||
cargoSha256 = "sha256-uNTSU06Fz/ud04K40e98rb7o/uAht0DsiJOXeHX72vw=";
|
||||
|
||||
nativeBuildInputs = [ clang pkg-config ];
|
||||
nativeBuildInputs = [ clang copyDesktopItems pkg-config ];
|
||||
buildInputs = [ glib gtk4 pipewire ];
|
||||
|
||||
LIBCLANG_PATH = "${libclang.lib}/lib";
|
||||
|
||||
desktopItems = makeDesktopItem {
|
||||
name = "Helvum";
|
||||
exec = pname;
|
||||
desktopName = "Helvum";
|
||||
genericName = "Helvum";
|
||||
categories = "AudioVideo;";
|
||||
};
|
||||
|
||||
meta = with lib; {
|
||||
description = "A GTK patchbay for pipewire";
|
||||
homepage = "https://gitlab.freedesktop.org/ryuukyu/helvum";
|
||||
|
|
35
pkgs/applications/audio/songrec/default.nix
Normal file
35
pkgs/applications/audio/songrec/default.nix
Normal file
|
@ -0,0 +1,35 @@
|
|||
{ lib
|
||||
, rustPlatform
|
||||
, fetchFromGitHub
|
||||
, gtk3
|
||||
, openssl
|
||||
, alsaLib
|
||||
, pkg-config
|
||||
, ffmpeg
|
||||
}:
|
||||
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "songrec";
|
||||
version = "0.1.8";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "marin-m";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "sha256-6siGLegNgvLdP7engwpKmhzWYqBXcMsfaXhJJ1tIqJg=";
|
||||
};
|
||||
|
||||
cargoSha256 = "sha256-H4qJYcFjip71EVTGw50goj0HjKN9fmjQZqQDhaSKlaQ=";
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
|
||||
buildInputs = [ alsaLib gtk3 openssl ffmpeg ];
|
||||
|
||||
meta = with lib; {
|
||||
description = "An open-source Shazam client for Linux, written in Rust";
|
||||
homepage = "https://github.com/marin-m/SongRec";
|
||||
license = licenses.gpl3Only;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ tcbravo ];
|
||||
};
|
||||
}
|
|
@ -2,13 +2,13 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "stochas";
|
||||
version = "1.3.4";
|
||||
version = "1.3.5";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "surge-synthesizer";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "0b26mbj727dnygavz4kihnhmnnvwsr9l145w6kydq7bd7nwiw7lq";
|
||||
sha256 = "1z8q53qfigw6wwbvpca92b9pf9d0mv3nyb0fmszz5ikj3pcybi7m";
|
||||
fetchSubmodules = true;
|
||||
};
|
||||
|
||||
|
|
|
@ -14,14 +14,14 @@ stdenv.mkDerivation rec {
|
|||
# this is what upstream calls the package, see:
|
||||
# https://github.com/ryukau/LV2Plugins#uhhyou-plugins-lv2
|
||||
pname = "uhhyou.lv2";
|
||||
version = "unstable-2020-07-31";
|
||||
version = "unstable-2021-02-08";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "ryukau";
|
||||
repo = "LV2Plugins";
|
||||
rev = "6189be67acaeb95452f8adab73a731d94a7b6f47";
|
||||
rev = "df67460fc344f94db4306d4ee21e4207e657bbee";
|
||||
fetchSubmodules = true;
|
||||
sha256 = "049gigx2s89z8vf17gscs00c150lmcdwya311nbrwa18fz4bx242";
|
||||
sha256 = "1a23av35cw26zgq93yzmmw35084hsj29cb7sb04j2silv5qisila";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config python3 ];
|
||||
|
@ -31,8 +31,7 @@ stdenv.mkDerivation rec {
|
|||
makeFlags = [ "PREFIX=$(out)" ];
|
||||
|
||||
prePatch = ''
|
||||
patchShebangs generate-ttl.sh
|
||||
cp patch/NanoVG.cpp lib/DPF/dgl/src/NanoVG.cpp
|
||||
patchShebangs generate-ttl.sh patch.sh patch/apply.sh
|
||||
'';
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
@ -41,6 +40,7 @@ stdenv.mkDerivation rec {
|
|||
description = "Audio plugins for Linux";
|
||||
longDescription = ''
|
||||
Plugin List:
|
||||
- CollidingCombSynth
|
||||
- CubicPadSynth
|
||||
- EnvelopedSine
|
||||
- EsPhaser
|
||||
|
|
39
pkgs/applications/blockchains/charge-lnd/default.nix
Normal file
39
pkgs/applications/blockchains/charge-lnd/default.nix
Normal file
|
@ -0,0 +1,39 @@
|
|||
{ lib, fetchFromGitHub, python3Packages }:
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "charge-lnd";
|
||||
version = "0.1.2";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "accumulator";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "1m1ic69aj2vlnjlp4ckan8n67r01nfysvq4w6nny32wjkr0zvphr";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = with python3Packages; [
|
||||
aiorpcx
|
||||
colorama
|
||||
googleapis-common-protos
|
||||
grpcio
|
||||
protobuf
|
||||
six
|
||||
termcolor
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
install README.md charge.config.example -Dt $out/share/doc/charge-lnd
|
||||
'';
|
||||
|
||||
doInstallCheck = true;
|
||||
installCheckPhase = ''
|
||||
$out/bin/charge-lnd --help > /dev/null
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Simple policy-based fee manager for lightning network daemon";
|
||||
homepage = "https://github.com/accumulator/charge-lnd";
|
||||
license = licenses.gpl2Plus;
|
||||
maintainers = with maintainers; [ mmilata ];
|
||||
};
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "apheleia";
|
||||
version = "2021-05-23";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "raxod502";
|
||||
repo = "apheleia";
|
||||
rev = "f865c165dac606187a66b2b25a57d5099b452120";
|
||||
sha256 = "sha256-n37jJsNOGhSjUtQysG3NVIjjayhbOa52iTXBc8SyKXE=";
|
||||
};
|
||||
|
||||
buildInputs = [ emacs ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Reformat buffer stably";
|
||||
homepage = "https://github.com/raxod502/apheleia";
|
||||
license = lib.licenses.mit;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, emacsPackages, lib }:
|
||||
|
||||
let
|
||||
runtimeDeps = with emacsPackages; [
|
||||
evil
|
||||
markdown-mode
|
||||
];
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
pname = "evil-markdown";
|
||||
version = "2020-06-01";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "Somelauw";
|
||||
repo = "evil-markdown";
|
||||
rev = "064fe9b4767470472356d20bdd08e2f30ebbc9ac";
|
||||
sha256 = "sha256-Kt2wxG1XCFowavVWtj0urM/yURKegonpZcxTy/+CrJY=";
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
emacs
|
||||
] ++ runtimeDeps;
|
||||
|
||||
propagatedUserEnvPkgs = runtimeDeps;
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Vim-like keybindings for markdown-mode";
|
||||
homepage = "https://github.com/Somelauw/evil-markdown";
|
||||
license = lib.licenses.gpl3Plus;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "git-undo";
|
||||
version = "2019-10-13";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "jwiegley";
|
||||
repo = "git-undo-el";
|
||||
rev = "cf31e38e7889e6ade7d2d2b9f8719fd44f52feb5";
|
||||
sha256 = "sha256-cVkK9EF6qQyVV3uVqnBEjF8e9nEx/8ixnM8PvxqCyYE=";
|
||||
};
|
||||
|
||||
buildInputs = [ emacs ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Revert region to most recent Git-historical version";
|
||||
homepage = "https://github.com/jwiegley/git-undo-el";
|
||||
license = lib.licenses.gpl2Plus;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "isearch-plus";
|
||||
version = "2021-01-01";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "emacsmirror";
|
||||
repo = "isearch-plus";
|
||||
rev = "376a8f9f8a9666d7e61d125abcdb645847cb8619";
|
||||
sha256 = "sha256-Kd5vpu+mI1tJPcsu7EpnnBcPVdVAijkAeTz+bLB3WlQ=";
|
||||
};
|
||||
|
||||
buildInputs = [ emacs ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Extensions to isearch";
|
||||
homepage = "https://www.emacswiki.org/emacs/download/isearch%2b.el";
|
||||
license = lib.licenses.gpl2Plus;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "isearch-prop";
|
||||
version = "2019-05-01";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "emacsmirror";
|
||||
repo = "isearch-prop";
|
||||
rev = "4a2765f835dd115d472142da05215c4c748809f4";
|
||||
sha256 = "sha256-A1Kt4nm7iRV9J5yaLupwiNL5g7ddZvQs79dggmqZ7Rk=";
|
||||
};
|
||||
|
||||
buildInputs = [ emacs ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Search text- or overlay-property contexts";
|
||||
homepage = "https://www.emacswiki.org/emacs/download/isearch-prop.el";
|
||||
license = lib.licenses.gpl3Plus;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -65,11 +65,15 @@
|
|||
};
|
||||
};
|
||||
|
||||
apheleia = callPackage ./apheleia {};
|
||||
|
||||
emacspeak = callPackage ./emacspeak {};
|
||||
|
||||
ess-R-object-popup =
|
||||
callPackage ./ess-R-object-popup { };
|
||||
|
||||
evil-markdown = callPackage ./evil-markdown { };
|
||||
|
||||
font-lock-plus = callPackage ./font-lock-plus { };
|
||||
|
||||
ghc-mod = melpaBuild {
|
||||
|
@ -88,6 +92,8 @@
|
|||
};
|
||||
};
|
||||
|
||||
git-undo = callPackage ./git-undo { };
|
||||
|
||||
haskell-unicode-input-method = melpaBuild {
|
||||
pname = "emacs-haskell-unicode-input-method";
|
||||
version = "20110905.2307";
|
||||
|
@ -111,6 +117,10 @@
|
|||
|
||||
helm-words = callPackage ./helm-words { };
|
||||
|
||||
isearch-plus = callPackage ./isearch-plus { };
|
||||
|
||||
isearch-prop = callPackage ./isearch-prop { };
|
||||
|
||||
jam-mode = callPackage ./jam-mode { };
|
||||
|
||||
llvm-mode = trivialBuild {
|
||||
|
@ -177,6 +187,8 @@
|
|||
|
||||
};
|
||||
|
||||
mu4e-patch = callPackage ./mu4e-patch { };
|
||||
|
||||
org-mac-link =
|
||||
callPackage ./org-mac-link { };
|
||||
|
||||
|
@ -206,6 +218,8 @@
|
|||
|
||||
tramp = callPackage ./tramp { };
|
||||
|
||||
youtube-dl = callPackage ./youtube-dl { };
|
||||
|
||||
zeitgeist = callPackage ./zeitgeist { };
|
||||
|
||||
# From old emacsPackages (pre emacsPackagesNg)
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "mu4e-patch";
|
||||
version = "2019-05-09";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "seanfarley";
|
||||
repo = "mu4e-patch";
|
||||
rev = "522da46c1653b1cacc79cde91d6534da7ae9517d";
|
||||
sha256 = "sha256-1lV4dDuCdyCUXi/In2DzYJPEHuAc9Jfbz2ZecNZwn4I=";
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
emacs
|
||||
];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Colorize patch emails in mu4e";
|
||||
homepage = "https://github.com/seanfarley/mu4e-patch";
|
||||
license = lib.licenses.gpl3Plus;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{ stdenv, fetchFromGitHub, emacs, lib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "youtube-dl";
|
||||
version = "2018-10-12";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "skeeto";
|
||||
repo = "youtube-dl-emacs";
|
||||
rev = "af877b5bc4f01c04fccfa7d47a2c328926f20ef4";
|
||||
sha256 = "sha256-Etl95rcoRACDPjcTPQqYK2L+w8OZbOrTrRT0JadMdH4=";
|
||||
};
|
||||
|
||||
buildInputs = [ emacs ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
install -d $out/share/emacs/site-lisp
|
||||
install *.el *.elc $out/share/emacs/site-lisp
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "Emacs frontend to the youtube-dl utility";
|
||||
homepage = "https://github.com/skeeto/youtube-dl-emacs";
|
||||
license = lib.licenses.unlicense;
|
||||
maintainers = with lib.maintainers; [ leungbk ];
|
||||
platforms = emacs.meta.platforms;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
{ lib, fetchFromGitHub, python3Packages }:
|
||||
|
||||
python3Packages.buildPythonApplication rec {
|
||||
pname = "avell-unofficial-control-center";
|
||||
version = "1.0.4";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "rodgomesc";
|
||||
repo = "avell-unofficial-control-center";
|
||||
# https://github.com/rodgomesc/avell-unofficial-control-center/issues/58
|
||||
rev = "e32e243e31223682a95a719bc58141990eef35e6";
|
||||
sha256 = "1qz1kv7p09nxffndzz9jlkzpfx26ppz66f8603zyamjq9dqdmdin";
|
||||
};
|
||||
|
||||
# No tests included
|
||||
doCheck = false;
|
||||
|
||||
propagatedBuildInputs = with python3Packages; [ pyusb elevate ];
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://github.com/rodgomesc/avell-unofficial-control-center";
|
||||
description = "Software for controlling RGB keyboard lights on some gaming laptops that use ITE Device(8291) Rev 0.03";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ rkitover ];
|
||||
};
|
||||
}
|
34
pkgs/applications/misc/clifm/default.nix
Normal file
34
pkgs/applications/misc/clifm/default.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{ stdenv, lib, fetchFromGitHub, libcap, acl, file, readline }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "clifm";
|
||||
version = "1.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "leo-arch";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "0mf9lrq0l532vyf4ycsikrw8imn4gkavyn3cr42nhjsr1drygrp8";
|
||||
};
|
||||
|
||||
buildInputs = [ libcap acl file readline ];
|
||||
|
||||
makeFlags = [
|
||||
"INSTALLPREFIX=${placeholder "out"}/bin"
|
||||
"DESKTOPPREFIX=${placeholder "out"}/share"
|
||||
];
|
||||
|
||||
preInstall = ''
|
||||
mkdir -p $out/bin $out/share
|
||||
'';
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://github.com/leo-arch/clifm";
|
||||
description = "CliFM is a CLI-based, shell-like, and non-curses terminal file manager written in C: simple, fast, extensible, and lightweight as hell";
|
||||
license = licenses.gpl2Plus;
|
||||
maintainers = with maintainers; [ vonfry ];
|
||||
platforms = platforms.unix;
|
||||
};
|
||||
}
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "logseq";
|
||||
version = "0.0.16";
|
||||
version = "0.1.3";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/logseq/logseq/releases/download/${version}/logseq-linux-x64-${version}.AppImage";
|
||||
sha256 = "dmgwFHJRy5qE71naRJKX0HCrVG0qQBOIM9TvCh4j/lY=";
|
||||
sha256 = "1akg3xjbh01nb7l06qpvz3xsjj64kf042xjnapn60jlgg5y34vbm";
|
||||
name = "${pname}-${version}.AppImage";
|
||||
};
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
, openssl
|
||||
, libopus
|
||||
, ffmpeg
|
||||
, wayland
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
|
@ -47,6 +48,7 @@ stdenv.mkDerivation rec {
|
|||
openssl
|
||||
libopus
|
||||
ffmpeg
|
||||
wayland
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
{ lib, setuptools, boto3, requests, click, pyyaml, pydantic, buildPythonApplication
|
||||
, pythonOlder, fetchFromGitHub, awscli }:
|
||||
{ lib, setuptools, boto3, requests, click, pyyaml, pydantic
|
||||
, buildPythonApplication, pythonOlder, installShellFiles, fetchFromGitHub
|
||||
, awscli }:
|
||||
|
||||
buildPythonApplication rec {
|
||||
pname = "nimbo";
|
||||
|
@ -12,13 +13,20 @@ buildPythonApplication rec {
|
|||
rev = "v${version}";
|
||||
sha256 = "1fs28s9ynfxrb4rzba6cmik0kl0q0vkpb4zdappsq62jqf960k24";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
propagatedBuildInputs = [ setuptools boto3 awscli requests click pyyaml pydantic ];
|
||||
|
||||
# nimbo tests require an AWS instance
|
||||
doCheck = false;
|
||||
pythonImportsCheck = [ "nimbo" ];
|
||||
|
||||
postInstall = ''
|
||||
installShellCompletion --cmd nimbo \
|
||||
--zsh <(_NIMBO_COMPLETE=source_zsh $out/bin/nimbo) \
|
||||
--bash <(_NIMBO_COMPLETE=source_bash $out/bin/nimbo) \
|
||||
--fish <(_NIMBO_COMPLETE=source_fish $out/bin/nimbo)
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Run machine learning jobs on AWS with a single command";
|
||||
homepage = "https://github.com/nimbo-sh/nimbo";
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ lib, mkDerivation, fetchFromGitLab, qmake, libusb1, hidapi, pkg-config }:
|
||||
{ lib, mkDerivation, fetchFromGitLab, qmake, libusb1, hidapi, pkg-config, coreutils }:
|
||||
|
||||
mkDerivation rec {
|
||||
pname = "openrgb";
|
||||
|
@ -15,11 +15,18 @@ mkDerivation rec {
|
|||
buildInputs = [ libusb1 hidapi ];
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
mkdir -p $out/bin
|
||||
cp openrgb $out/bin
|
||||
|
||||
substituteInPlace 60-openrgb.rules \
|
||||
--replace /bin/chmod "${coreutils}/bin/chmod"
|
||||
|
||||
mkdir -p $out/etc/udev/rules.d
|
||||
cp 60-openrgb.rules $out/etc/udev/rules.d
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
doInstallCheck = true;
|
||||
|
@ -27,13 +34,11 @@ mkDerivation rec {
|
|||
HOME=$TMPDIR $out/bin/openrgb --help > /dev/null
|
||||
'';
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Open source RGB lighting control";
|
||||
homepage = "https://gitlab.com/CalcProgrammer1/OpenRGB";
|
||||
maintainers = with maintainers; [ jonringer ];
|
||||
license = licenses.gpl2;
|
||||
license = licenses.gpl2Plus;
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
||||
|
|
27
pkgs/applications/misc/sfm/default.nix
Normal file
27
pkgs/applications/misc/sfm/default.nix
Normal file
|
@ -0,0 +1,27 @@
|
|||
{ lib, stdenv, fetchFromGitHub, conf ? null }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "sfm";
|
||||
version = "0.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "afify";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
hash = "sha256-i4WzYaJKityIt+LPWCbd6UsPBaYoaS397l5BInOXQQA=";
|
||||
};
|
||||
|
||||
configFile = lib.optionalString (conf!=null) (lib.writeText "config.def.h" conf);
|
||||
|
||||
postPatch = lib.optionalString (conf!=null) "cp ${configFile} config.def.h";
|
||||
|
||||
installFlags = [ "PREFIX=$(out)" ];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Simple file manager";
|
||||
homepage = "https://github.com/afify/sfm";
|
||||
license = licenses.isc;
|
||||
platforms = platforms.unix;
|
||||
maintainers = with maintainers; [ sikmir ];
|
||||
};
|
||||
}
|
|
@ -24,7 +24,8 @@ in python.pkgs.buildPythonPackage {
|
|||
|
||||
postPatch = ''
|
||||
substituteInPlace requirements.txt \
|
||||
--replace "aiohttp==3.6.2" "aiohttp>=3.6.2"
|
||||
--replace "aiohttp==3.6.2" "aiohttp>=3.6.2" \
|
||||
--replace "py-cpuinfo==7.0.0" "py-cpuinfo>=8.0.0"
|
||||
'';
|
||||
|
||||
propagatedBuildInputs = with python.pkgs; [
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
, alsaLib, at-spi2-atk, at-spi2-core, atk, cairo, cups, dbus, expat, fontconfig
|
||||
, freetype, gdk-pixbuf, glib, gtk3, libcxx, libdrm, libnotify, libpulseaudio, libuuid
|
||||
, libX11, libXScrnSaver, libXcomposite, libXcursor, libXdamage, libXext
|
||||
, libXfixes, libXi, libXrandr, libXrender, libXtst, libxcb
|
||||
, libXfixes, libXi, libXrandr, libXrender, libXtst, libxcb, libxshmfence
|
||||
, mesa, nspr, nss, pango, systemd, libappindicator-gtk3, libdbusmenu
|
||||
}:
|
||||
|
||||
|
@ -23,6 +23,7 @@ in stdenv.mkDerivation rec {
|
|||
libXScrnSaver
|
||||
libXtst
|
||||
libxcb
|
||||
libxshmfence
|
||||
mesa
|
||||
nss
|
||||
wrapGAppsHook
|
||||
|
|
|
@ -27,10 +27,10 @@ in {
|
|||
pname = "discord-canary";
|
||||
binaryName = "DiscordCanary";
|
||||
desktopName = "Discord Canary";
|
||||
version = "0.0.123";
|
||||
version = "0.0.124";
|
||||
src = fetchurl {
|
||||
url = "https://dl-canary.discordapp.net/apps/linux/${version}/discord-canary-${version}.tar.gz";
|
||||
sha256 = "0bijwfsd9s4awqkgxd9c2cxh7y5r06vix98qjp0dkv63r6jig8ch";
|
||||
sha256 = "060ypr9rn5yl8iwh4v3ax1v6501yaq72sx50q47sm0wyxn7gpv91";
|
||||
};
|
||||
};
|
||||
}.${branch}
|
||||
|
|
|
@ -1,18 +1,33 @@
|
|||
{ lib, mkFranzDerivation, fetchurl }:
|
||||
{ lib, mkFranzDerivation, fetchurl, xorg, xdg-utils, buildEnv, writeShellScriptBin }:
|
||||
|
||||
mkFranzDerivation rec {
|
||||
let
|
||||
mkFranzDerivation' = mkFranzDerivation.override {
|
||||
xdg-utils = buildEnv {
|
||||
name = "xdg-utils-for-ferdi";
|
||||
paths = [
|
||||
xdg-utils
|
||||
(lib.hiPrio (writeShellScriptBin "xdg-open" ''
|
||||
unset GDK_BACKEND
|
||||
exec ${xdg-utils}/bin/xdg-open "$@"
|
||||
''))
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
mkFranzDerivation' rec {
|
||||
pname = "ferdi";
|
||||
name = "Ferdi";
|
||||
version = "5.6.0-beta.5";
|
||||
version = "5.6.0-beta.6";
|
||||
src = fetchurl {
|
||||
url = "https://github.com/getferdi/ferdi/releases/download/v${version}/ferdi_${version}_amd64.deb";
|
||||
sha256 = "sha256-fDUzYir53OQ3O4o9eG70sGD+FJ0/4SDNsTfh97WFRnQ=";
|
||||
sha256 = "sha256-Q1HSAEVcaxFyOq7oWqa6AJJpsBKRxbsKb9ydyK/gH/A=";
|
||||
};
|
||||
extraBuildInputs = [ xorg.libxshmfence ];
|
||||
meta = with lib; {
|
||||
description = "Combine your favorite messaging services into one application";
|
||||
homepage = "https://getferdi.com/";
|
||||
license = licenses.asl20;
|
||||
maintainers = [ maintainers.davidtwco ];
|
||||
maintainers = with maintainers; [ davidtwco ma27 ];
|
||||
platforms = [ "x86_64-linux" ];
|
||||
hydraPlatforms = [ ];
|
||||
};
|
||||
|
|
|
@ -28,7 +28,8 @@
|
|||
|
||||
# Helper function for building a derivation for Franz and forks.
|
||||
|
||||
{ pname, name, version, src, meta }:
|
||||
{ pname, name, version, src, meta, extraBuildInputs ? [] }:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
inherit pname version src meta;
|
||||
|
||||
|
@ -36,7 +37,7 @@ stdenv.mkDerivation rec {
|
|||
dontPatchELF = true;
|
||||
|
||||
nativeBuildInputs = [ autoPatchelfHook makeWrapper wrapGAppsHook dpkg ];
|
||||
buildInputs = (with xorg; [
|
||||
buildInputs = extraBuildInputs ++ (with xorg; [
|
||||
libXi
|
||||
libXcursor
|
||||
libXdamage
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
|
||||
stdenv.mkDerivation {
|
||||
pname = "matrix-commander";
|
||||
version = "unstable-2021-04-18";
|
||||
version = "unstable-2021-05-26";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "8go";
|
||||
repo = "matrix-commander";
|
||||
rev = "3e89a5f4c98dd191880ae371cc63eb9282d7d91f";
|
||||
sha256 = "08nwwszp1kv5b7bgf6mmfn42slxkyhy98x18xbn4pglc4bj32iql";
|
||||
rev = "06b4738bc74ee86fb3ac88c04b8230abf82e7421";
|
||||
sha256 = "1skpq3xfnz11m298qnsw68xv391p5qg47flagzsk86pnzi841vc1";
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
|
@ -35,7 +35,7 @@ stdenv.mkDerivation {
|
|||
meta = with lib; {
|
||||
description = "Simple but convenient CLI-based Matrix client app for sending and receiving";
|
||||
homepage = "https://github.com/8go/matrix-commander";
|
||||
license = licenses.gpl3Only;
|
||||
license = licenses.gpl3Plus;
|
||||
platforms = platforms.linux;
|
||||
maintainers = [ maintainers.seb314 ];
|
||||
};
|
||||
|
|
|
@ -21,8 +21,11 @@ in mkRambox rec {
|
|||
description = "Free and Open Source messaging and emailing app that combines common web applications into one";
|
||||
homepage = "https://rambox.pro";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ ma27 ];
|
||||
maintainers = with maintainers; [ ];
|
||||
platforms = ["i686-linux" "x86_64-linux"];
|
||||
hydraPlatforms = [];
|
||||
knownVulnerabilities = [
|
||||
"Electron 7.2.4 is EOL and contains at least the following vulnerabilities: CVE-2020-6458, CVE-2020-6460 and more (https://www.electronjs.org/releases/stable?version=7). Consider using an alternative such as `ferdi'."
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ let
|
|||
else "");
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "signal-desktop";
|
||||
version = "5.3.0"; # Please backport all updates to the stable channel.
|
||||
version = "5.4.0"; # Please backport all updates to the stable channel.
|
||||
# All releases have a limited lifetime and "expire" 90 days after the release.
|
||||
# When releases "expire" the application becomes unusable until an update is
|
||||
# applied. The expiration date for the current release can be extracted with:
|
||||
|
@ -38,7 +38,7 @@ in stdenv.mkDerivation rec {
|
|||
|
||||
src = fetchurl {
|
||||
url = "https://updates.signal.org/desktop/apt/pool/main/s/signal-desktop/signal-desktop_${version}_amd64.deb";
|
||||
sha256 = "15lclxw3njih90zlh2n90v8ljg0wnglw5w8jrpa7rbd789yagvq7";
|
||||
sha256 = "046xy033ars70ay5ryj39i5053py00xj92ajdg212pamq415z1zb";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
|
|
@ -9,7 +9,8 @@ stdenv.mkDerivation {
|
|||
version = "13.3.1.22";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://download.cdn.viber.com/cdn/desktop/Linux/viber.deb";
|
||||
# Official link: https://download.cdn.viber.com/cdn/desktop/Linux/viber.deb
|
||||
url = "http://web.archive.org/web/20210602004133/https://download.cdn.viber.com/cdn/desktop/Linux/viber.deb";
|
||||
sha256 = "0rs26x0lycavybn6k1hbb5kzms0zzcmxlrmi4g8k7vyafj6s8dqh";
|
||||
};
|
||||
|
||||
|
|
37
pkgs/applications/networking/irc/senpai/default.nix
Normal file
37
pkgs/applications/networking/irc/senpai/default.nix
Normal file
|
@ -0,0 +1,37 @@
|
|||
{ lib, buildGoModule, fetchFromSourcehut, installShellFiles, scdoc }:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "senpai";
|
||||
version = "unstable-2021-05-27";
|
||||
|
||||
src = fetchFromSourcehut {
|
||||
owner = "~taiite";
|
||||
repo = "senpai";
|
||||
rev = "6be718329175c6d11e359f1a366ab6ab22b101d2";
|
||||
sha256 = "sha256-hW6DHJlDBYEqK8zj5PvGKU54sbeXjx1tdqwKXPXlKHc=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-OLi5y1hrYK6+l5WB1SX85QU4y3KjFyGaEzgbE6lnW2k=";
|
||||
|
||||
subPackages = [
|
||||
"cmd/senpai"
|
||||
];
|
||||
|
||||
nativeBuildInputs = [
|
||||
scdoc
|
||||
installShellFiles
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
scdoc < doc/senpai.1.scd > doc/senpai.1
|
||||
scdoc < doc/senpai.5.scd > doc/senpai.5
|
||||
installManPage doc/senpai.*
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Your everyday IRC student";
|
||||
homepage = "https://ellidri.org/senpai";
|
||||
license = licenses.isc;
|
||||
maintainers = with maintainers; [ malvo ];
|
||||
};
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue