1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2024-11-25 23:20:55 +00:00

Merge master into staging-next

This commit is contained in:
github-actions[bot] 2021-11-05 12:01:17 +00:00 committed by GitHub
commit 2663e5b623
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 1852 additions and 637 deletions

View file

@ -50,6 +50,29 @@
guide</link> is available.
</para>
</listitem>
<listitem>
<para>
Improvements have been made to the Hadoop module and package:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
HDFS and YARN now support production-ready highly
available deployments with automatic failover.
</para>
</listitem>
<listitem>
<para>
Hadoop now defaults to Hadoop 3, updated from 2.
</para>
</listitem>
<listitem>
<para>
JournalNode, ZKFS and HTTPFS services have been added.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Activation scripts can now opt int to be run when running
@ -1802,6 +1825,39 @@ Superuser created successfully.
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.unifi.enable">services.unifi</link>
module has been reworked, solving a number of issues. This
leads to several user facing changes:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
The <literal>services.unifi.dataDir</literal> option is
removed and the data is now always located under
<literal>/var/lib/unifi/data</literal>. This is done to
make better use of systemd state direcotiry and thus
making the service restart more reliable.
</para>
</listitem>
<listitem>
<para>
The unifi logs can now be found under:
<literal>/var/log/unifi</literal> instead of
<literal>/var/lib/unifi/logs</literal>.
</para>
</listitem>
<listitem>
<para>
The unifi run directory can now be found under:
<literal>/run/unifi</literal> instead of
<literal>/var/lib/unifi/run</literal>.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -18,6 +18,11 @@ In addition to numerous new and upgraded packages, this release has the followin
- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.
- Improvements have been made to the Hadoop module and package:
- HDFS and YARN now support production-ready highly available deployments with automatic failover.
- Hadoop now defaults to Hadoop 3, updated from 2.
- JournalNode, ZKFS and HTTPFS services have been added.
- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.
This allows activation scripts to output what they would change if the activation was really run.
The users/modules activation script supports this and outputs some of is actions.
@ -506,3 +511,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- Dokuwiki now supports caddy! However
- the nginx option has been removed, in the new configuration, please use the `dokuwiki.webserver = "nginx"` instead.
- The "${hostname}" option has been deprecated, please use `dokuwiki.sites = [ "${hostname}" ]` instead
- The [services.unifi](options.html#opt-services.unifi.enable) module has been reworked, solving a number of issues. This leads to several user facing changes:
- The `services.unifi.dataDir` option is removed and the data is now always located under `/var/lib/unifi/data`. This is done to make better use of systemd state direcotiry and thus making the service restart more reliable.
- The unifi logs can now be found under: `/var/log/unifi` instead of `/var/lib/unifi/logs`.
- The unifi run directory can now be found under: `/run/unifi` instead of `/var/lib/unifi/run`.

View file

@ -284,6 +284,10 @@ in
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
};
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
environment.etc."egl/egl_external_platform.d".source =
"/run/opengl-driver/share/egl/egl_external_platform.d/";
hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out;

View file

@ -35,6 +35,7 @@ pkgs.runCommand "hadoop-conf" {} ''
cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/

View file

@ -15,7 +15,10 @@ with lib;
"fs.defaultFS" = "hdfs://localhost";
}
'';
description = "Hadoop core-site.xml definition";
description = ''
Hadoop core-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
'';
};
hdfsSite = mkOption {
@ -28,7 +31,10 @@ with lib;
"dfs.nameservices" = "namenode1";
}
'';
description = "Hadoop hdfs-site.xml definition";
description = ''
Hadoop hdfs-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
'';
};
mapredSite = mkOption {
@ -44,7 +50,10 @@ with lib;
"mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
}
'';
description = "Hadoop mapred-site.xml definition";
description = ''
Hadoop mapred-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
'';
};
yarnSite = mkOption {
@ -67,7 +76,24 @@ with lib;
"yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
}
'';
description = "Hadoop yarn-site.xml definition";
description = ''
Hadoop yarn-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
'';
};
httpfsSite = mkOption {
default = { };
type = types.attrsOf types.anything;
example = literalExpression ''
{
"hadoop.http.max.threads" = 500;
}
'';
description = ''
Hadoop httpfs-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-hdfs-httpfs/httpfs-default.html"/>
'';
};
log4jProperties = mkOption {
@ -92,7 +118,10 @@ with lib;
"feature.terminal.enabled" = 0;
}
'';
description = "Yarn container-executor.cfg definition";
description = ''
Yarn container-executor.cfg definition
<link xlink:href="https://hadoop.apache.org/docs/r2.7.2/hadoop-yarn/hadoop-yarn-site/SecureContainer.html"/>
'';
};
extraConfDirs = mkOption {
@ -118,7 +147,8 @@ with lib;
config = mkMerge [
(mkIf (builtins.hasAttr "yarn" config.users.users ||
builtins.hasAttr "hdfs" config.users.users) {
builtins.hasAttr "hdfs" config.users.users ||
builtins.hasAttr "httpfs" config.users.users) {
users.groups.hadoop = {
gid = config.ids.gids.hadoop;
};

View file

@ -17,11 +17,14 @@ in
{
options.services.hadoop.hdfs = {
namenode = {
enabled = mkOption {
enable = mkEnableOption "Whether to run the HDFS NameNode";
formatOnInit = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the HDFS NameNode
Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
to initialize an HA cluster manually.
'';
};
inherit restartIfChanged;
@ -34,13 +37,7 @@ in
};
};
datanode = {
enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the HDFS DataNode
'';
};
enable = mkEnableOption "Whether to run the HDFS DataNode";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
@ -50,18 +47,51 @@ in
'';
};
};
journalnode = {
enable = mkEnableOption "Whether to run the HDFS JournalNode";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for journalnode
'';
};
};
zkfc = {
enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
inherit restartIfChanged;
};
httpfs = {
enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
tempPath = mkOption {
type = types.path;
default = "/tmp/hadoop/httpfs";
description = ''
HTTPFS_TEMP path used by HTTPFS
'';
};
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for HTTPFS
'';
};
};
};
config = mkMerge [
(mkIf cfg.hdfs.namenode.enabled {
(mkIf cfg.hdfs.namenode.enable {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.namenode) restartIfChanged;
preStart = ''
preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
'';
'');
serviceConfig = {
User = "hdfs";
@ -74,9 +104,10 @@ in
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
9870 # namenode.http-address
8020 # namenode.rpc-address
8022 # namenode. servicerpc-address
]);
})
(mkIf cfg.hdfs.datanode.enabled {
(mkIf cfg.hdfs.datanode.enable {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
@ -96,8 +127,64 @@ in
9867 # datanode.ipc.address
]);
})
(mkIf cfg.hdfs.journalnode.enable {
systemd.services.hdfs-journalnode = {
description = "Hadoop HDFS JournalNode";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.journalnode) restartIfChanged;
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-journalnode";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
8480 # dfs.journalnode.http-address
8485 # dfs.journalnode.rpc-address
]);
})
(mkIf cfg.hdfs.zkfc.enable {
systemd.services.hdfs-zkfc = {
description = "Hadoop HDFS ZooKeeper failover controller";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.zkfc) restartIfChanged;
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-zkfc";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
Restart = "always";
};
};
})
(mkIf cfg.hdfs.httpfs.enable {
systemd.services.hdfs-httpfs = {
description = "Hadoop httpfs";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.httpfs) restartIfChanged;
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
preStart = ''
mkdir -p $HTTPFS_TEMP
'';
serviceConfig = {
User = "httpfs";
SyslogIdentifier = "hdfs-httpfs";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
14000 # httpfs.http.port
]);
})
(mkIf (
cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
@ -105,6 +192,12 @@ in
uid = config.ids.uids.hdfs;
};
})
(mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";
isSystemUser = true;
};
})
];
}

View file

@ -17,13 +17,7 @@ in
{
options.services.hadoop.yarn = {
resourcemanager = {
enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN ResourceManager
'';
};
enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
@ -34,13 +28,7 @@ in
};
};
nodemanager = {
enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN NodeManager
'';
};
enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
inherit restartIfChanged;
addBinBash = mkOption {
type = types.bool;
@ -62,7 +50,7 @@ in
config = mkMerge [
(mkIf (
cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
) {
users.users.yarn = {
@ -72,7 +60,7 @@ in
};
})
(mkIf cfg.yarn.resourcemanager.enabled {
(mkIf cfg.yarn.resourcemanager.enable {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
@ -91,10 +79,11 @@ in
8030 # resourcemanager.scheduler.address
8031 # resourcemanager.resource-tracker.address
8032 # resourcemanager.address
8033 # resourcemanager.admin.address
]);
})
(mkIf cfg.yarn.nodemanager.enabled {
(mkIf cfg.yarn.nodemanager.enable {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [

View file

@ -9,25 +9,6 @@ let
${optionalString (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m"} \
-jar ${stateDir}/lib/ace.jar
'';
mountPoints = [
{
what = "${cfg.unifiPackage}/dl";
where = "${stateDir}/dl";
}
{
what = "${cfg.unifiPackage}/lib";
where = "${stateDir}/lib";
}
{
what = "${cfg.mongodbPackage}/bin";
where = "${stateDir}/bin";
}
{
what = "${cfg.dataDir}";
where = "${stateDir}/data";
}
];
systemdMountPoints = map (m: "${utils.escapeSystemdPath m.where}.mount") mountPoints;
in
{
@ -68,16 +49,6 @@ in
'';
};
services.unifi.dataDir = mkOption {
type = types.str;
default = "${stateDir}/data";
description = ''
Where to store the database and other data.
This directory will be bind-mounted to ${stateDir}/data as part of the service startup.
'';
};
services.unifi.openPorts = mkOption {
type = types.bool;
default = true;
@ -136,32 +107,11 @@ in
];
};
# We must create the binary directories as bind mounts instead of symlinks
# This is because the controller resolves all symlinks to absolute paths
# to be used as the working directory.
systemd.mounts = map ({ what, where }: {
bindsTo = [ "unifi.service" ];
partOf = [ "unifi.service" ];
unitConfig.RequiresMountsFor = stateDir;
options = "bind";
what = what;
where = where;
}) mountPoints;
systemd.tmpfiles.rules = [
"d '${stateDir}' 0700 unifi - - -"
"d '${stateDir}/data' 0700 unifi - - -"
"d '${stateDir}/webapps' 0700 unifi - - -"
"L+ '${stateDir}/webapps/ROOT' - - - - ${cfg.unifiPackage}/webapps/ROOT"
];
systemd.services.unifi = {
description = "UniFi controller daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ] ++ systemdMountPoints;
partOf = systemdMountPoints;
bindsTo = systemdMountPoints;
unitConfig.RequiresMountsFor = stateDir;
after = [ "network.target" ];
# This a HACK to fix missing dependencies of dynamic libs extracted from jars
environment.LD_LIBRARY_PATH = with pkgs.stdenv; "${cc.cc.lib}/lib";
# Make sure package upgrades trigger a service restart
@ -209,8 +159,27 @@ in
SystemCallErrorNumber = "EPERM";
SystemCallFilter = [ "@system-service" ];
# Required for ProtectSystem=strict
BindPaths = [ stateDir ];
StateDirectory = "unifi";
RuntimeDirectory = "unifi";
LogsDirectory = "unifi";
CacheDirectory= "unifi";
TemporaryFileSystem = [
# required as we want to create bind mounts below
"${stateDir}/webapps:rw"
];
# We must create the binary directories as bind mounts instead of symlinks
# This is because the controller resolves all symlinks to absolute paths
# to be used as the working directory.
BindPaths = [
"/var/log/unifi:${stateDir}/logs"
"/run/unifi:${stateDir}/run"
"${cfg.unifiPackage}/dl:${stateDir}/dl"
"${cfg.unifiPackage}/lib:${stateDir}/lib"
"${cfg.mongodbPackage}/bin:${stateDir}/bin"
"${cfg.unifiPackage}/webapps/ROOT:${stateDir}/webapps/ROOT"
];
# Needs network access
PrivateNetwork = false;
@ -220,6 +189,9 @@ in
};
};
imports = [
(mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data" )
];
meta.maintainers = with lib.maintainers; [ erictapen pennae ];
}

View file

@ -38,7 +38,7 @@ in
};
# Mount the vmblock for drag-and-drop and copy-and-paste.
systemd.mounts = [
systemd.mounts = mkIf (!cfg.headless) [
{
description = "VMware vmblock fuse mount";
documentation = [ "https://github.com/vmware/open-vm-tools/blob/master/open-vm-tools/vmblock-fuse/design.txt" ];
@ -52,8 +52,8 @@ in
}
];
security.wrappers.vmware-user-suid-wrapper =
{ setuid = true;
security.wrappers.vmware-user-suid-wrapper = mkIf (!cfg.headless) {
setuid = true;
owner = "root";
group = "root";
source = "${open-vm-tools}/bin/vmware-user-suid-wrapper";

View file

@ -5,7 +5,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
};
nodes = {
simple2 = {
simple = {
services.deluge = {
enable = true;
package = pkgs.deluge-2_x;
@ -16,7 +16,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
};
};
declarative2 = {
declarative = {
services.deluge = {
enable = true;
package = pkgs.deluge-2_x;
@ -45,27 +45,16 @@ import ./make-test-python.nix ({ pkgs, ...} : {
testScript = ''
start_all()
simple1.wait_for_unit("deluged")
simple2.wait_for_unit("deluged")
simple1.wait_for_unit("delugeweb")
simple2.wait_for_unit("delugeweb")
simple1.wait_for_open_port("8112")
simple2.wait_for_open_port("8112")
declarative1.wait_for_unit("network.target")
declarative2.wait_for_unit("network.target")
declarative1.wait_until_succeeds("curl --fail http://simple1:8112")
declarative2.wait_until_succeeds("curl --fail http://simple2:8112")
simple.wait_for_unit("deluged")
simple.wait_for_unit("delugeweb")
simple.wait_for_open_port("8112")
declarative.wait_for_unit("network.target")
declarative.wait_until_succeeds("curl --fail http://simple:8112")
declarative1.wait_for_unit("deluged")
declarative2.wait_for_unit("deluged")
declarative1.wait_for_unit("delugeweb")
declarative2.wait_for_unit("delugeweb")
declarative1.wait_until_succeeds("curl --fail http://declarative1:3142")
declarative2.wait_until_succeeds("curl --fail http://declarative2:3142")
declarative1.succeed(
"deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
)
declarative2.succeed(
declarative.wait_for_unit("deluged")
declarative.wait_for_unit("delugeweb")
declarative.wait_until_succeeds("curl --fail http://declarative:3142")
declarative.succeed(
"deluge-console 'connect 127.0.0.1:58846 andrew password; help' | grep -q 'rm.*Remove a torrent'"
)
'';

View file

@ -1,70 +1,230 @@
# This test is very comprehensive. It tests whether all hadoop services work well with each other.
# Run this when updating the Hadoop package or making significant changes to the hadoop module.
# For a more basic test, see hdfs.nix and yarn.nix
import ../make-test-python.nix ({pkgs, ...}: {
nodes = let
package = pkgs.hadoop;
coreSite = {
"fs.defaultFS" = "hdfs://master";
"fs.defaultFS" = "hdfs://ns1";
};
hdfsSite = {
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
"dfs.namenode.http-bind-host" = "0.0.0.0";
"dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
# HA Quorum Journal Manager configuration
"dfs.nameservices" = "ns1";
"dfs.ha.namenodes.ns1" = "nn1,nn2";
"dfs.namenode.shared.edits.dir.ns1.nn1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
"dfs.namenode.shared.edits.dir.ns1.nn2" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
"dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
"dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
"dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
"dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
"dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
"dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
# Automatic failover configuration
"dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
"dfs.ha.automatic-failover.enabled.ns1" = "true";
"dfs.ha.fencing.methods" = "shell(true)";
"ha.zookeeper.quorum" = "zk1:2181";
};
yarnSiteHA = {
"yarn.resourcemanager.zk-address" = "zk1:2181";
"yarn.resourcemanager.ha.enabled" = "true";
"yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
"yarn.resourcemanager.hostname.rm1" = "rm1";
"yarn.resourcemanager.hostname.rm2" = "rm2";
"yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
"yarn.resourcemanager.cluster-id" = "cluster1";
# yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
# hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
# that causes AM containers to fail otherwise.
"yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
"yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
};
in {
master = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite;
hdfs.namenode.enabled = true;
yarn.resourcemanager.enabled = true;
};
virtualisation.memorySize = 1024;
zk1 = { ... }: {
services.zookeeper.enable = true;
networking.firewall.allowedTCPPorts = [ 2181 ];
};
worker = {pkgs, options, ...}: {
# HDFS cluster
nn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite;
hdfs.datanode.enabled = true;
yarn.nodemanager.enabled = true;
yarnSite = options.services.hadoop.yarnSite.default // {
"yarn.resourcemanager.hostname" = "master";
};
inherit package coreSite hdfsSite;
hdfs.namenode.enable = true;
hdfs.zkfc.enable = true;
};
};
nn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.namenode.enable = true;
hdfs.zkfc.enable = true;
};
};
jn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enable = true;
};
};
jn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enable = true;
};
};
jn3 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enable = true;
};
};
dn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.datanode.enable = true;
};
};
# YARN cluster
rm1 = {pkgs, options, ...}: {
virtualisation.memorySize = 1024;
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.resourcemanager.enable = true;
};
};
rm2 = {pkgs, options, ...}: {
virtualisation.memorySize = 1024;
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.resourcemanager.enable = true;
};
};
nm1 = {pkgs, options, ...}: {
virtualisation.memorySize = 2048;
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.nodemanager.enable = true;
};
};
};
testScript = ''
start_all()
master.wait_for_unit("network.target")
master.wait_for_unit("hdfs-namenode")
#### HDFS tests ####
master.wait_for_open_port(8020)
master.wait_for_open_port(9870)
zk1.wait_for_unit("network.target")
jn1.wait_for_unit("network.target")
jn2.wait_for_unit("network.target")
jn3.wait_for_unit("network.target")
nn1.wait_for_unit("network.target")
nn2.wait_for_unit("network.target")
dn1.wait_for_unit("network.target")
worker.wait_for_unit("network.target")
worker.wait_for_unit("hdfs-datanode")
worker.wait_for_open_port(9864)
worker.wait_for_open_port(9866)
worker.wait_for_open_port(9867)
zk1.wait_for_unit("zookeeper")
jn1.wait_for_unit("hdfs-journalnode")
jn2.wait_for_unit("hdfs-journalnode")
jn3.wait_for_unit("hdfs-journalnode")
master.succeed("curl -f http://worker:9864")
worker.succeed("curl -f http://master:9870")
zk1.wait_for_open_port(2181)
jn1.wait_for_open_port(8480)
jn1.wait_for_open_port(8485)
jn2.wait_for_open_port(8480)
jn2.wait_for_open_port(8485)
worker.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
# Namenodes must be stopped before initializing the cluster
nn1.succeed("systemctl stop hdfs-namenode")
nn2.succeed("systemctl stop hdfs-namenode")
nn1.succeed("systemctl stop hdfs-zkfc")
nn2.succeed("systemctl stop hdfs-zkfc")
master.wait_for_unit("yarn-resourcemanager")
# Initialize zookeeper for failover controller
nn1.succeed("sudo -u hdfs hdfs zkfc -formatZK 2>&1 | systemd-cat")
master.wait_for_open_port(8030)
master.wait_for_open_port(8031)
master.wait_for_open_port(8032)
master.wait_for_open_port(8088)
worker.succeed("curl -f http://master:8088")
# Format NN1 and start it
nn1.succeed("sudo -u hdfs hadoop namenode -format 2>&1 | systemd-cat")
nn1.succeed("systemctl start hdfs-namenode")
nn1.wait_for_open_port(9870)
nn1.wait_for_open_port(8022)
nn1.wait_for_open_port(8020)
worker.wait_for_unit("yarn-nodemanager")
worker.wait_for_open_port(8042)
worker.wait_for_open_port(8040)
master.succeed("curl -f http://worker:8042")
# Bootstrap NN2 from NN1 and start it
nn2.succeed("sudo -u hdfs hdfs namenode -bootstrapStandby 2>&1 | systemd-cat")
nn2.succeed("systemctl start hdfs-namenode")
nn2.wait_for_open_port(9870)
nn2.wait_for_open_port(8022)
nn2.wait_for_open_port(8020)
nn1.succeed("netstat -tulpne | systemd-cat")
assert "Total Nodes:1" in worker.succeed("yarn node -list")
# Start failover controllers
nn1.succeed("systemctl start hdfs-zkfc")
nn2.succeed("systemctl start hdfs-zkfc")
assert "Estimated value of Pi is" in worker.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
assert "SUCCEEDED" in worker.succeed("yarn application -list -appStates FINISHED")
worker.succeed("sudo -u hdfs hdfs dfs -ls / | systemd-cat")
# DN should have started by now, but confirm anyway
dn1.wait_for_unit("hdfs-datanode")
# Print states of namenodes
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
# Wait for cluster to exit safemode
dn1.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
# test R/W
dn1.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
# Test NN failover
nn1.succeed("systemctl stop hdfs-namenode")
assert "active" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
nn1.succeed("systemctl start hdfs-namenode")
nn1.wait_for_open_port(9870)
nn1.wait_for_open_port(8022)
nn1.wait_for_open_port(8020)
assert "standby" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
#### YARN tests ####
rm1.wait_for_unit("network.target")
rm2.wait_for_unit("network.target")
nm1.wait_for_unit("network.target")
rm1.wait_for_unit("yarn-resourcemanager")
rm1.wait_for_open_port(8088)
rm2.wait_for_unit("yarn-resourcemanager")
rm2.wait_for_open_port(8088)
nm1.wait_for_unit("yarn-nodemanager")
nm1.wait_for_open_port(8042)
nm1.wait_for_open_port(8040)
nm1.wait_until_succeeds("yarn node -list | grep Nodes:1")
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
nm1.succeed("sudo -u yarn yarn node -list | systemd-cat")
# Test RM failover
rm1.succeed("systemctl stop yarn-resourcemanager")
assert "standby" not in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
rm1.succeed("systemctl start yarn-resourcemanager")
rm1.wait_for_unit("yarn-resourcemanager")
rm1.wait_for_open_port(8088)
assert "standby" in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
assert "SUCCEEDED" in nm1.succeed("yarn application -list -appStates FINISHED")
'';
})
})

View file

@ -1,36 +1,34 @@
# Test a minimal HDFS cluster with no HA
import ../make-test-python.nix ({...}: {
nodes = {
namenode = {pkgs, ...}: {
virtualisation.memorySize = 1024;
services.hadoop = {
package = pkgs.hadoop;
hdfs.namenode.enabled = true;
hdfs = {
namenode = {
enable = true;
formatOnInit = true;
};
httpfs.enable = true;
};
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
};
hdfsSite = {
"dfs.replication" = 1;
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
"dfs.namenode.http-bind-host" = "0.0.0.0";
"hadoop.proxyuser.httpfs.groups" = "*";
"hadoop.proxyuser.httpfs.hosts" = "*";
};
};
networking.firewall.allowedTCPPorts = [
9870 # namenode.http-address
8020 # namenode.rpc-address
];
};
datanode = {pkgs, ...}: {
services.hadoop = {
package = pkgs.hadoop;
hdfs.datanode.enabled = true;
hdfs.datanode.enable = true;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
"hadoop.proxyuser.httpfs.groups" = "*";
"hadoop.proxyuser.httpfs.hosts" = "*";
};
};
networking.firewall.allowedTCPPorts = [
9864 # datanode.http.address
9866 # datanode.address
9867 # datanode.ipc.address
];
};
};
@ -50,5 +48,13 @@ import ../make-test-python.nix ({...}: {
namenode.succeed("curl -f http://namenode:9870")
datanode.succeed("curl -f http://datanode:9864")
datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
namenode.wait_for_unit("hdfs-httpfs")
namenode.wait_for_open_port(14000)
assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
'';
})

View file

@ -1,28 +1,20 @@
# This only tests if YARN is able to start its services
import ../make-test-python.nix ({...}: {
nodes = {
resourcemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
services.hadoop.yarn.resourcemanager.enabled = true;
services.hadoop.yarn.resourcemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
networking.firewall.allowedTCPPorts = [
8088 # resourcemanager.webapp.address
8031 # resourcemanager.resource-tracker.address
];
};
nodemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
services.hadoop.yarn.nodemanager.enabled = true;
services.hadoop.yarn.nodemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.hostname" = "resourcemanager";
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";
"yarn.nodemanager.address" = "0.0.0.0:8041";
};
networking.firewall.allowedTCPPorts = [
8042 # nodemanager.webapp.address
8041 # nodemanager.address
];
};
};
@ -38,7 +30,6 @@ import ../make-test-python.nix ({...}: {
nodemanager.wait_for_unit("yarn-nodemanager")
nodemanager.wait_for_unit("network.target")
nodemanager.wait_for_open_port(8042)
nodemanager.wait_for_open_port(8041)
resourcemanager.succeed("curl -f http://localhost:8088")
nodemanager.succeed("curl -f http://localhost:8042")

View file

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
pname = "kora-icon-theme";
version = "1.4.5";
version = "1.4.7";
src = fetchFromGitHub {
owner = "bikass";
repo = "kora";
rev = "v${version}";
sha256 = "sha256-5tXXAfGY5JQ5RiKayUuQJDgX6sPHRi8Hy2ht/Hl0hdo=";
sha256 = "sha256-Ol4DrQJmQT/LIU5qWJJEm6od7e29h7g913YTFQjudBQ=";
};
nativeBuildInputs = [

View file

@ -1,6 +1,6 @@
{
"commit": "b60d5f4b773d16857c105718faad9699e145edcd",
"url": "https://github.com/commercialhaskell/all-cabal-hashes/archive/b60d5f4b773d16857c105718faad9699e145edcd.tar.gz",
"sha256": "19avxynbjhkhvjy5kcxgd3fp0b2nczsk213s1za488r6kksj90f5",
"msg": "Update from Hackage at 2021-10-18T14:27:09Z"
"commit": "f2537d46db49014726f8ad00dcc60f5e41213397",
"url": "https://github.com/commercialhaskell/all-cabal-hashes/archive/f2537d46db49014726f8ad00dcc60f5e41213397.tar.gz",
"sha256": "021j2xn1xk8fqs7648si42n7z6rjzp4jnags4jkfnk1f81swns6h",
"msg": "Update from Hackage at 2021-10-23T04:57:02Z"
}

View file

@ -142,24 +142,14 @@ let
in
stdenv.mkDerivation (rec {
version = "9.2.0.20210821";
version = "9.2.1";
pname = "${targetPrefix}ghc${variantSuffix}";
src = fetchurl {
url = "https://downloads.haskell.org/ghc/9.2.1-rc1/ghc-${version}-src.tar.xz";
sha256 = "1q2pppxv2avhykyxvyq72r5p97rkkiqp19b77yhp85ralbcp4ivw";
url = "https://downloads.haskell.org/ghc/${version}/ghc-${version}-src.tar.xz";
sha256 = "f444012f97a136d9940f77cdff03fda48f9475e2ed0fec966c4d35c4df55f746";
};
patches = [
# picked from release branch, remove with the next release candidate,
# see https://gitlab.haskell.org/ghc/ghc/-/issues/19950#note_373726
(fetchpatch {
name = "fix-darwin-link-failure.patch";
url = "https://gitlab.haskell.org/ghc/ghc/-/commit/77456387025ca74299ecc70621cbdb62b1b6ffc9.patch";
sha256 = "1g8smrn7hj8cbp9fhrylvmrb15s0xd8lhdgxqnx0asnd4az82gj8";
})
];
enableParallelBuilding = true;
outputs = [ "out" "doc" ];
@ -255,7 +245,7 @@ stdenv.mkDerivation (rec {
] ++ lib.optionals enableDocs [
sphinx
] ++ lib.optionals stdenv.isDarwin [
# TODO(@sternenseemann): use XATTR env var after backport of
# TODO(@sternenseemann): backport addition of XATTR env var like
# https://gitlab.haskell.org/ghc/ghc/-/merge_requests/6447
xattr
];

View file

@ -1,14 +0,0 @@
diff --git a/Data/Vector/Storable/Mutable.hs b/Data/Vector/Storable/Mutable.hs
index 8b538bc..2b74fce 100644
--- a/Data/Vector/Storable/Mutable.hs
+++ b/Data/Vector/Storable/Mutable.hs
@@ -197,7 +197,9 @@ storableSet (MVector n fp) x
1 -> storableSetAsPrim n fp x (undefined :: Word8)
2 -> storableSetAsPrim n fp x (undefined :: Word16)
4 -> storableSetAsPrim n fp x (undefined :: Word32)
+#if !defined(ghcjs_HOST_OS)
8 -> storableSetAsPrim n fp x (undefined :: Word64)
+#endif
_ -> unsafeWithForeignPtr fp $ \p -> do
poke p x

View file

@ -8,10 +8,10 @@
}:
mkDerivation {
pname = "cabal2nix";
version = "unstable-2021-09-28";
version = "unstable-2021-10-23";
src = fetchzip {
url = "https://github.com/NixOS/cabal2nix/archive/b4d893ed1a7a66b0046dd8a48f62b81de670ab02.tar.gz";
sha256 = "0xl5a0gfxrqz8pkx43zrj84xvcg15723lgvirxdcvc4zqa732zjg";
url = "https://github.com/NixOS/cabal2nix/archive/8aeef87436468a416e5908b48ec82ac3f15eb885.tar.gz";
sha256 = "1w6wabp0v2fii5i28nsp0ss6dsz222p94mmxrrns3q0df82s2cm1";
};
isLibrary = true;
isExecutable = true;

View file

@ -281,7 +281,9 @@ self: super: {
lvmrun = disableHardening (dontCheck super.lvmrun) ["format"];
matplotlib = dontCheck super.matplotlib;
# https://github.com/matterhorn-chat/matterhorn/issues/679 they do not want to be on stackage
matterhorn = doJailbreak super.matterhorn; # this is needed until the end of time :')
matterhorn = doJailbreak (super.matterhorn.overrideScope (self: super: {
brick = self.brick_0_64_2;
}));
memcache = dontCheck super.memcache;
metrics = dontCheck super.metrics;
milena = dontCheck super.milena;
@ -631,20 +633,7 @@ self: super: {
# removed when the next idris release (1.3.4 probably) comes
# around.
idris = generateOptparseApplicativeCompletion "idris"
(doJailbreak (dontCheck
(appendPatches super.idris [
# compatibility with haskeline >= 0.8
(pkgs.fetchpatch {
url = "https://github.com/idris-lang/Idris-dev/commit/89a87cf666eb8b27190c779e72d0d76eadc1bc14.patch";
sha256 = "0fv493zlpgjsf57w0sncd4vqfkabfczp3xazjjmqw54m9rsfix35";
})
# compatibility with megaparsec >= 0.9
(pkgs.fetchpatch {
url = "https://github.com/idris-lang/Idris-dev/commit/6ea9bc913877d765048d7cdb7fc5aec60b196fac.patch";
sha256 = "0yms74d1xdxd1c08dnp45nb1ddzq54n6hqgzxx0r494wy614ir8q";
})
])
));
(doJailbreak (dontCheck super.idris));
# https://github.com/pontarius/pontarius-xmpp/issues/105
pontarius-xmpp = dontCheck super.pontarius-xmpp;
@ -692,19 +681,17 @@ self: super: {
# For 2.17 support: https://github.com/JonasDuregard/sized-functors/pull/10
size-based = doJailbreak super.size-based;
# Remove as soon as we update to monoid-extras 0.6 and unpin these packages
dual-tree = doJailbreak super.dual-tree;
diagrams-core = doJailbreak super.diagrams-core;
# https://github.com/diagrams/diagrams-braille/issues/1
diagrams-braille = doJailbreak super.diagrams-braille;
# Apply patch from master to add compat with optparse-applicative >= 0.16.
# We unfortunately can't upgrade to 1.4.4 which includes this patch yet
# since it would require monoid-extras 0.6 which breaks other diagrams libs.
diagrams-lib = doJailbreak (appendPatch super.diagrams-lib
(pkgs.fetchpatch {
url = "https://github.com/diagrams/diagrams-lib/commit/4b9842c3e3d653be69af19778970337775e2404d.patch";
sha256 = "0xqvzh3ip9i0nv8xnh41afxki64r259pxq8ir1a4v99ggnldpjaa";
includes = [ "*/CmdLine.hs" ];
}));
# https://github.com/timbod7/haskell-chart/pull/231#issuecomment-953745932
Chart-diagrams = doJailbreak super.Chart-diagrams;
# https://github.com/xu-hao/namespace/issues/1
namespace = doJailbreak super.namespace;
# https://github.com/cchalmers/plots/issues/46
plots = doJailbreak super.plots;
# https://github.com/diagrams/diagrams-solve/issues/4
diagrams-solve = dontCheck super.diagrams-solve;
@ -1132,8 +1119,10 @@ self: super: {
});
# Chart-tests needs and compiles some modules from Chart itself
Chart-tests = (addExtraLibrary super.Chart-tests self.QuickCheck).overrideAttrs (old: {
preCheck = old.postPatch or "" + ''
Chart-tests = overrideCabal (addExtraLibrary super.Chart-tests self.QuickCheck) (old: {
# https://github.com/timbod7/haskell-chart/issues/233
jailbreak = true;
preCheck = old.preCheck or "" + ''
tar --one-top-level=../chart --strip-components=1 -xf ${self.Chart.src}
'';
});
@ -2071,4 +2060,12 @@ EOT
# file revision on hackage was gifted CRLF line endings
gogol-core = appendPatch super.gogol-core ./patches/gogol-core-144.patch;
# cabal tries to install files we're supplying from the system
# https://github.com/hslua/hslua/pull/103
lua = appendPatch super.lua (pkgs.fetchpatch {
url = "https://github.com/hslua/hslua/pull/103/commits/814bf1bb284151e827b1c11a7277819ed2779dd2.patch";
sha256 = "1kj0g51lkjyf6jv2ikayb3cfh0dcr669swmxl9a2mcrizxcbkrhy";
stripLen = 1;
});
} // import ./configuration-tensorflow.nix {inherit pkgs haskellLib;} self super

View file

@ -43,6 +43,11 @@ self: super: {
unix = null;
xhtml = null;
# Workaround for https://gitlab.haskell.org/ghc/ghc/-/issues/20594
tf-random = overrideCabal super.tf-random {
doHaddock = !pkgs.stdenv.isAarch64;
};
aeson = appendPatch (doJailbreak super.aeson) (pkgs.fetchpatch {
url = "https://gitlab.haskell.org/ghc/head.hackage/-/raw/dfd024c9a336c752288ec35879017a43bd7e85a0/patches/aeson-1.5.6.0.patch";
sha256 = "07rk7f0lhgilxvbg2grpl1p5x25wjf9m7a0wqmi2jr0q61p9a0nl";
@ -239,12 +244,10 @@ self: super: {
# https://github.com/Soostone/retry/issues/71
retry = dontCheck super.retry;
# Disable tests pending resolution of
# https://github.com/haskell/text/issues/380 or https://github.com/fpco/streaming-commons/issues/60
streaming-commons = dontCheck (appendPatch super.streaming-commons (pkgs.fetchpatch {
streaming-commons = appendPatch super.streaming-commons (pkgs.fetchpatch {
url = "https://gitlab.haskell.org/ghc/head.hackage/-/raw/dfd024c9a336c752288ec35879017a43bd7e85a0/patches/streaming-commons-0.2.2.1.patch";
sha256 = "04wi1jskr3j8ayh88kkx4irvhhgz0i7aj6fblzijy0fygikvidpy";
}));
});
# hlint 3.3 needs a ghc-lib-parser newer than the one from stackage
hlint = super.hlint_3_3_4.overrideScope (self: super: {

View file

@ -101,9 +101,6 @@ self: super:
# still present here https://github.com/glguy/th-abstraction/issues/53
th-abstraction = dontCheck super.th-abstraction;
# https://github.com/haskell/vector/issues/410
vector = appendPatch super.vector (../compilers/ghcjs/patches/vector-ghcjs-storable-set.patch) ;
# Need hedgehog for tests, which fails to compile due to dep on concurrent-output
zenc = dontCheck super.zenc;
}

View file

@ -88,7 +88,6 @@ broken-packages:
- aeson-tiled
- aeson-typescript
- aeson-utils
- aeson-via
- affection
- affine-invariant-ensemble-mcmc
- Agata
@ -128,6 +127,7 @@ broken-packages:
- alure
- amazon-emailer
- amazonka-contrib-rds-utils
- amazonka-mediaconvert
- amazonka-s3-streaming
- amazon-products
- amby
@ -499,7 +499,6 @@ broken-packages:
- bytestring-show
- bytestring-substring
- bytestring-time
- bytestring-trie
- bytestring-typenats
- c0parser
- c10k
@ -841,6 +840,7 @@ broken-packages:
- CoreErlang
- core-haskell
- corenlp-parser
- core-telemetry
- Coroutine
- coroutine-object
- CouchDB
@ -1051,7 +1051,6 @@ broken-packages:
- dia-base
- diagrams-boolean
- diagrams-builder
- diagrams-canvas
- diagrams-graphviz
- diagrams-gtk
- diagrams-pdf
@ -1330,6 +1329,7 @@ broken-packages:
- expresso
- extcore
- extended-categories
- extensible
- extensible-effects-concurrent
- extensible-skeleton
- external-sort
@ -1921,6 +1921,7 @@ broken-packages:
- hashable-orphans
- hashabler
- hashed-storage
- hashes
- hashring
- hashtables-plus
- hasim
@ -2339,6 +2340,8 @@ broken-packages:
- hslogger-reader
- hslogger-template
- hs-logo
- hslua-examples
- hslua-module-version
- hsluv-haskell
- hsmagick
- hsmodetweaks
@ -2470,7 +2473,6 @@ broken-packages:
- hw-json-simd
- hw-mquery
- hworker
- hw-prim-bits
- hw-simd
- hwsl2
- hx
@ -2845,6 +2847,7 @@ broken-packages:
- libtagc
- libxls
- libxslt
- libyaml-streamly
- lie
- life-sync
- lifted-protolude
@ -3656,6 +3659,7 @@ broken-packages:
- persistent-mongoDB
- persistent-mysql-haskell
- persistent-odbc
- persistent-postgresql-streaming
- persistent-protobuf
- persistent-ratelimit
- persistent-redis
@ -3757,6 +3761,7 @@ broken-packages:
- pointful
- pointless-haskell
- pokemon-go-protobuf-types
- poker-base
- poker-eval
- pokitdok
- polar-configfile
@ -4123,6 +4128,7 @@ broken-packages:
- resource-effect
- resource-embed
- restartable
- rest-rewrite
- restyle
- resumable-exceptions
- rethinkdb
@ -4810,7 +4816,6 @@ broken-packages:
- tapioca
- TaskMonad
- tasty-auto
- tasty-checklist
- tasty-fail-fast
- tasty-grading-system
- tasty-hedgehog-coverage
@ -4827,6 +4832,7 @@ broken-packages:
- tds
- teams
- teeth
- telega
- telegram
- telegram-api
- telegram-bot-simple

View file

@ -72,18 +72,6 @@ default-package-overrides:
# gi-gdkx11-4.x requires gtk-4.x, which is still under development and
# not yet available in Nixpkgs
- gi-gdkx11 < 4
# 2021-05-11: not all diagrams libraries have adjusted to
# monoid-extras 0.6 yet, keep them pinned to lower versions
# until we can do a full migration, see
# https://github.com/diagrams/diagrams-core/issues/115
# We can keep this pin at most until base 4.15
# Since the monoid-extras adjustment was combined with
# a major release in some cases, we need to wait for
# diagrams 1.5 to be released.
- monoid-extras < 0.6
- dual-tree < 0.2.3.0
- diagrams-core < 1.5.0
- diagrams-lib < 1.4.4
# streamly-* packages which are not in stackage and to be constrained
# as long as we have streamly < 0.8.0
- streamly-archive < 0.1.0

View file

@ -221,9 +221,6 @@ dont-distribute-packages:
- IORefCAS
- IndexedList
- InfixApplicative
- JSON-Combinator
- JSON-Combinator-Examples
- JSONb
- Javasf
- JsContracts
- JsonGrammar
@ -234,7 +231,6 @@ dont-distribute-packages:
- KiCS-debugger
- KiCS-prophecy
- LDAPv3
- LambdaDesigner
- LambdaINet
- LambdaPrettyQuote
- LambdaShell
@ -408,7 +404,6 @@ dont-distribute-packages:
- agda-snippets-hakyll
- agentx
- aip
- airship
- aivika-distributed
- algebra-checkers
- algebra-driven-design
@ -1003,7 +998,6 @@ dont-distribute-packages:
- dph-prim-interface
- dph-prim-par
- dph-prim-seq
- dprox
- dropbox-sdk
- dropsolve
- dsh-sql
@ -1045,6 +1039,7 @@ dont-distribute-packages:
- enumeration
- enumerator-fd
- enumerator-tf
- envy-extensible
- ephemeral
- erf-native
- eros-client
@ -1053,6 +1048,7 @@ dont-distribute-packages:
- errors-ext
- ersatz-toysat
- esotericbot
- esqueleto-streaming
- estreps
- eternity
- eternity-timestamped
@ -1724,8 +1720,6 @@ dont-distribute-packages:
- iException
- ice40-prim
- ide-backend
- ide-backend-common
- ide-backend-server
- ideas-math
- ideas-math-types
- ideas-statistics
@ -1832,7 +1826,6 @@ dont-distribute-packages:
- json-tokens
- json2-hdbc
- jsons-to-schema
- jspath
- jvm
- jvm-batching
- jvm-streaming
@ -1904,10 +1897,6 @@ dont-distribute-packages:
- lambdacube
- lambdacube-bullet
- lambdacube-compiler
- lambdacube-core
- lambdacube-edsl
- lambdacube-engine
- lambdacube-examples
- lambdacube-gl
- lambdacube-samples
- lambdaya-bus
@ -2871,6 +2860,8 @@ dont-distribute-packages:
- sql-simple-postgresql
- sql-simple-sqlite
- sqlite-simple-typed
- squeal-postgresql-ltree
- squeal-postgresql-uuid-ossp
- squeeze
- sr-extra
- sscgi
@ -2995,7 +2986,6 @@ dont-distribute-packages:
- testbench
- text-json-qq
- text-plus
- text-trie
- text-xml-generic
- textmatetags
- th-alpha
@ -3097,7 +3087,6 @@ dont-distribute-packages:
- typson-esqueleto
- typson-selda
- u2f
- uber
- ucam-webauth
- uhc-light
- uhc-util
@ -3205,7 +3194,6 @@ dont-distribute-packages:
- web-routes-regular
- web-routing
- web3
- webapi
- webcrank-wai
- webdriver-w3c
- webserver
@ -3284,6 +3272,7 @@ dont-distribute-packages:
- yam-web
- yaml-rpc-scotty
- yaml-rpc-snap
- yaml-streamly
- yaml-unscrambler
- yarr-image-io
- yavie

File diff suppressed because it is too large Load diff

View file

@ -205,7 +205,7 @@ in package-set { inherit pkgs lib callPackage; } self // {
callCabal2nixWithOptions = name: src: extraCabal2nixOptions: args:
let
filter = path: type:
pkgs.lib.hasSuffix "${name}.cabal" path ||
pkgs.lib.hasSuffix ".cabal" path ||
baseNameOf path == "package.yaml";
expr = self.haskellSrc2nix {
inherit name extraCabal2nixOptions;

View file

@ -15,15 +15,16 @@
buildPythonPackage rec {
pname = "aiopvpc";
version = "2.2.0";
disabled = pythonOlder "3.8";
version = "2.2.2";
format = "pyproject";
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "azogue";
repo = pname;
rev = "v${version}";
sha256 = "1hk3giwzzlcqnpw9kx3zrr808nmdb7qwac60fki5395qffd2fpqw";
sha256 = "sha256-wNMHzeKJ1kG0jnoI5fO3d5CBXE0cyoK92BkGunXK3pI=";
};
nativeBuildInputs = [
@ -49,7 +50,9 @@ buildPythonPackage rec {
" --cov --cov-report term --cov-report html" ""
'';
pythonImportsCheck = [ "aiopvpc" ];
pythonImportsCheck = [
"aiopvpc"
];
meta = with lib; {
description = "Python module to download Spanish electricity hourly prices (PVPC)";

View file

@ -8,7 +8,7 @@
buildPythonPackage rec {
pname = "flux_led";
version = "0.24.12";
version = "0.24.14";
disabled = pythonOlder "3.7";
@ -16,7 +16,7 @@ buildPythonPackage rec {
owner = "Danielhiversen";
repo = "flux_led";
rev = version;
sha256 = "sha256-vhmqfHAqbgDUvbn+dR7TuA5SFjF72/nhCL1h+GPy+9c=";
sha256 = "sha256-lHsMQbKKgHjxzaPdnqAY7WAZK3CiWfVr5Z5DWXsvRWI=";
};
propagatedBuildInputs = [

View file

@ -7,17 +7,18 @@
, korean-lunar-calendar
, pytestCheckHook
, pythonOlder
, six
}:
buildPythonPackage rec {
pname = "holidays";
version = "0.11.3.1";
format = "setuptools";
disabled = pythonOlder "3.6";
src = fetchPypi {
inherit pname version;
sha256 = "4855afe0ebf428efbcf848477828b889f8515be7f4f15ae26682919369d92774";
sha256 = "sha256-SFWv4Ov0KO+8+EhHeCi4ifhRW+f08VriZoKRk2nZJ3Q=";
};
propagatedBuildInputs = [
@ -25,18 +26,19 @@ buildPythonPackage rec {
python-dateutil
hijri-converter
korean-lunar-calendar
six
];
checkInputs = [
pytestCheckHook
];
pythonImportsCheck = [ "holidays" ];
pythonImportsCheck = [
"holidays"
];
meta = with lib; {
homepage = "https://github.com/dr-prodigy/python-holidays";
description = "Generate and work with holidays in Python";
homepage = "https://github.com/dr-prodigy/python-holidays";
license = licenses.mit;
maintainers = with maintainers; [ jluttine ];
};

View file

@ -8,12 +8,12 @@
buildPythonPackage rec {
pname = "pymazda";
version = "0.2.2";
version = "0.3.0";
disabled = pythonOlder "3.6";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-nngYdoVY3rXfszoCpUWFtJ0U0Rjczxix/wJDOZD+2O4=";
sha256 = "sha256-D0odz4GkKvjuafhEGlHtRnO8lk4rV9y3imaHl7jXqJw=";
};
propagatedBuildInputs = [

View file

@ -18,13 +18,14 @@
buildPythonPackage rec {
pname = "qcs-api-client";
version = "0.14.0";
version = "0.15.0";
format = "setuptools";
disabled = pythonOlder "3.7";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-CoiLMpaPRTISc0EO0jIMw/daTW8MyXQqaeGq0zaykmc=";
sha256 = "sha256-NzfHemIYQq2quYs3RNKF7NHfR6Vi8Sx4eRTVT2pTEYk=";
};
propagatedBuildInputs = [
@ -55,7 +56,9 @@ buildPythonPackage rec {
# Project has no tests
doCheck = false;
pythonImportsCheck = [ "qcs_api_client" ];
pythonImportsCheck = [
"qcs_api_client"
];
meta = with lib; {
description = "Python library for accessing the Rigetti QCS API";

View file

@ -4,11 +4,11 @@ let
GCC_BASE = "${stdenv.cc.cc}/lib/gcc/${stdenv.hostPlatform.uname.processor}-unknown-linux-gnu/${stdenv.cc.cc.version}";
in stdenv.mkDerivation rec {
pname = "sparse";
version = "0.6.3";
version = "0.6.4";
src = fetchurl {
url = "mirror://kernel/software/devel/sparse/dist/${pname}-${version}.tar.xz";
sha256 = "16d8c4dhipjzjf8z4z7pix1pdpqydz0v4r7i345f5s09hjnxpxnl";
sha256 = "sha256-arKLSZG8au29c1UCkTYKpqs99B9ZIGqb3paQIIpuOHw=";
};
preConfigure = ''

View file

@ -49,5 +49,8 @@ in stdenvNoCC.mkDerivation rec {
homepage = "https://github.com/HeinrichApfelmus/hyper-haskell";
license = licenses.bsd3;
maintainers = [ maintainers.rvl ];
# depends on electron-10.4.7 which is marked as insecure:
# https://github.com/NixOS/nixpkgs/pull/142641#issuecomment-957358476
broken = true;
};
}

View file

@ -62,7 +62,7 @@ let
source = fetchGrammar grammar;
location = if grammar ? location then grammar.location else null;
};
grammars' = (import ./grammars);
grammars' = (import ./grammars { inherit lib; });
grammars = grammars' //
{ tree-sitter-ocaml = grammars'.tree-sitter-ocaml // { location = "ocaml"; }; } //
{ tree-sitter-ocaml-interface = grammars'.tree-sitter-ocaml // { location = "interface"; }; } //

View file

@ -1,4 +1,4 @@
{ lib, stdenv, fetchFromGitHub, autoreconfHook, pkg-config, gettext, ncurses }:
{ lib, stdenv, fetchFromGitHub, fetchpatch, autoreconfHook, pkg-config, gettext, ncurses }:
stdenv.mkDerivation rec {
pname = "nudoku";
@ -11,6 +11,15 @@ stdenv.mkDerivation rec {
sha256 = "12v00z3p0ymi8f3w4b4bgl4c76irawn3kmd147r0ap6s9ssx2q6m";
};
patches = [
# Pull upstream fix for ncurses-6.3
(fetchpatch {
name = "ncurses-6.3.patch";
url = "https://github.com/jubalh/nudoku/commit/93899a0fd72e04b9f257e5f54af53466106b5959.patch";
sha256 = "1h3za0dnx8fk3vshql5mhcici8aw8j0vr7ra81p3r1rii4c479lm";
})
];
# Allow gettext 0.20
postPatch = ''
substituteInPlace configure.ac --replace 0.19 0.20

View file

@ -106,8 +106,16 @@ installPhase() {
sed -E "s#(libEGL_nvidia)#$i/lib/\\1#" 10_nvidia.json > 10_nvidia.json.fixed
sed -E "s#(libnvidia-egl-wayland)#$i/lib/\\1#" 10_nvidia_wayland.json > 10_nvidia_wayland.json.fixed
install -Dm644 10_nvidia.json.fixed $i/share/glvnd/egl_vendor.d/nvidia.json
install -Dm644 10_nvidia_wayland.json.fixed $i/share/glvnd/egl_vendor.d/nvidia_wayland.json
install -Dm644 10_nvidia.json.fixed $i/share/glvnd/egl_vendor.d/10_nvidia.json
install -Dm644 10_nvidia_wayland.json.fixed $i/share/egl/egl_external_platform.d/10_nvidia_wayland.json
if [[ -f "15_nvidia_gbm.json" ]]; then
sed -E "s#(libnvidia-egl-gbm)#$i/lib/\\1#" 15_nvidia_gbm.json > 15_nvidia_gbm.json.fixed
install -Dm644 15_nvidia_gbm.json.fixed $i/share/egl/egl_external_platform.d/15_nvidia_gbm.json
mkdir -p $i/lib/gbm
ln -s $i/lib/libnvidia-allocator.so $i/lib/gbm/nvidia-drm_gbm.so
fi
fi
done

View file

@ -41,7 +41,7 @@ let
i686bundled = versionAtLeast version "391" && !disable32Bit;
libPathFor = pkgs: pkgs.lib.makeLibraryPath [ pkgs.libdrm pkgs.xorg.libXext pkgs.xorg.libX11
pkgs.xorg.libXv pkgs.xorg.libXrandr pkgs.xorg.libxcb pkgs.zlib pkgs.stdenv.cc.cc ];
pkgs.xorg.libXv pkgs.xorg.libXrandr pkgs.xorg.libxcb pkgs.zlib pkgs.stdenv.cc.cc pkgs.wayland ];
self = stdenv.mkDerivation {
name = "nvidia-x11-${version}${nameSuffix}";

View file

@ -1,13 +1,13 @@
{ lib, stdenv, fetchFromGitHub, autoconf, automake, pkg-config, dovecot, libtool, xapian, icu64 }:
stdenv.mkDerivation rec {
pname = "fts-xapian";
version = "1.4.11";
version = "1.4.14";
src = fetchFromGitHub {
owner = "grosjo";
repo = "fts-xapian";
rev = version;
sha256 = "sha256-HPmS2Z1PIEM9fc6EerCEigQJg5BK/115zOW2uxFqjP0=";
sha256 = "sha256-Banyg10AiM1Jw6Zfl4Dcpc0/6Km48lLVuQ3xRLylE7k=";
};
buildInputs = [ dovecot xapian icu64 ];

View file

@ -1,22 +1,22 @@
{ lib, python3, groff, less, fetchFromGitHub, fetchpatch }:
{ lib, python3, groff, less, fetchFromGitHub }:
let
py = python3.override {
packageOverrides = self: super: {
awscrt = super.awscrt.overridePythonAttrs (oldAttrs: rec {
version = "0.11.24";
version = "0.12.4";
src = self.fetchPypi {
inherit (oldAttrs) pname;
inherit version;
sha256 = "sha256-uKpovKQEvwCFvgVw7/W1QtAffo48D5sIWav+XgcBYv8=";
sha256 = "sha256:1cmfkcv2zzirxsb989vx1hvna9nv24pghcvypl0zaxsjphv97mka";
};
});
botocore = super.botocore.overridePythonAttrs (oldAttrs: rec {
version = "2.0.0dev148";
version = "2.0.0dev155";
src = fetchFromGitHub {
owner = "boto";
repo = "botocore";
rev = "c0734f100f61bbef413cb04d9890bbffbccd230f";
sha256 = "sha256-ndSJdBF3NMNtpyHgYAksCUBDqlwPhugTkIK6Nby20oI=";
rev = "7083e5c204e139dc41f646e0ad85286b5e7c0c23";
sha256 = "sha256-aiCc/CXoTem0a9wI/AMBRK3g2BXJi7LpnUY/BxBEKVM=";
};
propagatedBuildInputs = super.botocore.propagatedBuildInputs ++ [py.pkgs.awscrt];
});
@ -40,24 +40,17 @@ let
in
with py.pkgs; buildPythonApplication rec {
pname = "awscli2";
version = "2.2.40"; # N.B: if you change this, change botocore to a matching version too
version = "2.3.4"; # N.B: if you change this, change botocore to a matching version too
src = fetchFromGitHub {
owner = "aws";
repo = "aws-cli";
rev = version;
sha256 = "sha256-IHnNRER9ePKVI9ez15HgxLDR1n6QR0iRESgNqbxQPx8=";
sha256 = "sha256-C/NrU+1AixuN4T1N5Zs8xduUQiwuQWvXkitQRnPJdNw=";
};
patches = [
(fetchpatch {
url = "https://github.com/mgorny/aws-cli/commit/85361123d2fa12eaedf912c046ffe39aebdd2bad.patch";
sha256 = "sha256-1Rb+/CY7ze1/DbJ6TfqHF01cfI2vixZ1dT91bmHTg/A=";
})
];
postPatch = ''
substituteInPlace setup.py \
substituteInPlace setup.cfg \
--replace "colorama>=0.2.5,<0.4.4" "colorama" \
--replace "cryptography>=3.3.2,<3.4.0" "cryptography" \
--replace "docutils>=0.10,<0.16" "docutils" \
@ -67,7 +60,7 @@ with py.pkgs; buildPythonApplication rec {
--replace "distro>=1.5.0,<1.6.0" "distro"
'';
checkInputs = [ jsonschema mock nose ];
checkInputs = [ jsonschema mock pytestCheckHook pytest-xdist ];
propagatedBuildInputs = [
awscrt
@ -93,8 +86,6 @@ with py.pkgs; buildPythonApplication rec {
# https://github.com/NixOS/nixpkgs/issues/16144#issuecomment-225422439
export HOME=$TMP
AWS_TEST_COMMAND=$out/bin/aws python scripts/ci/run-tests
'';
postInstall = ''

View file

@ -48,7 +48,7 @@ stdenv.mkDerivation rec {
# 'BSD-like' license but that the 'regex' library (in the ngrep tarball) is
# GPLv2.
license = "ngrep"; # Some custom BSD-style, see LICENSE.txt
platforms = platforms.linux;
platforms = with platforms; linux ++ darwin;
maintainers = [ maintainers.bjornfor ];
};
}

View file

@ -4,16 +4,16 @@ rustPlatform.buildRustPackage rec {
pname = "statix";
# also update version of the vim plugin in pkgs/misc/vim-plugins/overrides.nix
# the version can be found in flake.nix of the source code
version = "0.3.5";
version = "0.3.6";
src = fetchFromGitHub {
owner = "nerdypepper";
repo = pname;
rev = "v${version}";
sha256 = "sha256-vJvHmg6X/B6wQYjeX1FZC4MDGo0HkKbTmQH+l4tZAwg=";
sha256 = "sha256-fsEqPr+qtLNmTtxUxjcVDPoG7fjqFImnVHwscy2IBkE=";
};
cargoSha256 = "sha256-OfLpnVe1QIjpjpD4ticG/7AxPGFMMjBWN3DdLZq6pA8=";
cargoSha256 = "sha256-7fSJhRqZh7lUIe8vVzIVx+1phd+Am+GNzKN62NSuOYs=";
cargoBuildFlags = lib.optionals withJson [ "--features" "json" ];

View file

@ -182,7 +182,7 @@ let
hledger-web
hlint
hpack
hyper-haskell
# hyper-haskell # depends on electron-10.4.7 which is marked as insecure
hyper-haskell-server-with-packages
icepeak
idris