forked from mirrors/nixpkgs
Merge staging-next into staging
This commit is contained in:
commit
02e03be2bf
|
@ -738,6 +738,13 @@ self: super:
|
|||
terminology has been deprecated and should be replaced with Far/Near in the configuration file.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The nix-gc service now accepts randomizedDelaySec (default: 0) and persistent (default: true) parameters.
|
||||
By default nix-gc will now run immediately if it would have been triggered at least
|
||||
once during the time when the timer was inactive.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
@ -3,21 +3,22 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.acpid;
|
||||
|
||||
canonicalHandlers = {
|
||||
powerEvent = {
|
||||
event = "button/power.*";
|
||||
action = config.services.acpid.powerEventCommands;
|
||||
action = cfg.powerEventCommands;
|
||||
};
|
||||
|
||||
lidEvent = {
|
||||
event = "button/lid.*";
|
||||
action = config.services.acpid.lidEventCommands;
|
||||
action = cfg.lidEventCommands;
|
||||
};
|
||||
|
||||
acEvent = {
|
||||
event = "ac_adapter.*";
|
||||
action = config.services.acpid.acEventCommands;
|
||||
action = cfg.acEventCommands;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -33,7 +34,7 @@ let
|
|||
echo "event=${handler.event}" > $fn
|
||||
echo "action=${pkgs.writeShellScriptBin "${name}.sh" handler.action }/bin/${name}.sh '%e'" >> $fn
|
||||
'';
|
||||
in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // config.services.acpid.handlers))
|
||||
in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // cfg.handlers))
|
||||
}
|
||||
'';
|
||||
|
||||
|
@ -47,11 +48,7 @@ in
|
|||
|
||||
services.acpid = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable the ACPI daemon.";
|
||||
};
|
||||
enable = mkEnableOption "the ACPI daemon";
|
||||
|
||||
logEvents = mkOption {
|
||||
type = types.bool;
|
||||
|
@ -129,26 +126,28 @@ in
|
|||
|
||||
###### implementation
|
||||
|
||||
config = mkIf config.services.acpid.enable {
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.acpid = {
|
||||
description = "ACPI Daemon";
|
||||
documentation = [ "man:acpid(8)" ];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "systemd-udev-settle.service" ];
|
||||
|
||||
path = [ pkgs.acpid ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
ExecStart = escapeShellArgs
|
||||
([ "${pkgs.acpid}/bin/acpid"
|
||||
"--foreground"
|
||||
"--netlink"
|
||||
"--confdir" "${acpiConfDir}"
|
||||
] ++ optional cfg.logEvents "--logevents"
|
||||
);
|
||||
};
|
||||
|
||||
unitConfig = {
|
||||
ConditionVirtualization = "!systemd-nspawn";
|
||||
ConditionPathExists = [ "/proc/acpi" ];
|
||||
};
|
||||
|
||||
script = "acpid ${optionalString config.services.acpid.logEvents "--logevents"} --confdir ${acpiConfDir}";
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -21,13 +21,45 @@ in
|
|||
};
|
||||
|
||||
dates = mkOption {
|
||||
default = "03:15";
|
||||
type = types.str;
|
||||
default = "03:15";
|
||||
example = "weekly";
|
||||
description = ''
|
||||
Specification (in the format described by
|
||||
How often or when garbage collection is performed. For most desktop and server systems
|
||||
a sufficient garbage collection is once a week.
|
||||
|
||||
The format is described in
|
||||
<citerefentry><refentrytitle>systemd.time</refentrytitle>
|
||||
<manvolnum>7</manvolnum></citerefentry>) of the time at
|
||||
which the garbage collector will run.
|
||||
<manvolnum>7</manvolnum></citerefentry>.
|
||||
'';
|
||||
};
|
||||
|
||||
randomizedDelaySec = mkOption {
|
||||
default = "0";
|
||||
type = types.str;
|
||||
example = "45min";
|
||||
description = ''
|
||||
Add a randomized delay before each automatic upgrade.
|
||||
The delay will be chosen between zero and this value.
|
||||
This value must be a time span in the format specified by
|
||||
<citerefentry><refentrytitle>systemd.time</refentrytitle>
|
||||
<manvolnum>7</manvolnum></citerefentry>
|
||||
'';
|
||||
};
|
||||
|
||||
persistent = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
example = false;
|
||||
description = ''
|
||||
Takes a boolean argument. If true, the time when the service
|
||||
unit was last triggered is stored on disk. When the timer is
|
||||
activated, the service unit is triggered immediately if it
|
||||
would have been triggered at least once during the time when
|
||||
the timer was inactive. Such triggering is nonetheless
|
||||
subject to the delay imposed by RandomizedDelaySec=. This is
|
||||
useful to catch up on missed runs of the service when the
|
||||
system was powered down.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -50,11 +82,18 @@ in
|
|||
|
||||
config = {
|
||||
|
||||
systemd.services.nix-gc =
|
||||
{ description = "Nix Garbage Collector";
|
||||
script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
|
||||
startAt = optional cfg.automatic cfg.dates;
|
||||
systemd.services.nix-gc = {
|
||||
description = "Nix Garbage Collector";
|
||||
script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
|
||||
startAt = optional cfg.automatic cfg.dates;
|
||||
};
|
||||
|
||||
systemd.timers.nix-gc = lib.mkIf cfg.automatic {
|
||||
timerConfig = {
|
||||
RandomizedDelaySec = cfg.randomizedDelaySec;
|
||||
Persistent = cfg.persistent;
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{ lib, fetchzip }:
|
||||
|
||||
let
|
||||
version = "0.022";
|
||||
version = "0.034";
|
||||
|
||||
in fetchzip {
|
||||
name = "JuliaMono-${version}";
|
||||
url = "https://github.com/cormullion/juliamono/releases/download/v${version}/JuliaMono.zip";
|
||||
sha256 = "sha256-/MVT6n842sSiuPZNYxN3q1vn6yvMvmcTEDyvAd2GikA=";
|
||||
sha256 = "sha256:0xx3mhzs17baaich67kvwyzqg8h9ga11jrja2i8sxx4861dp1z85";
|
||||
|
||||
postFetch = ''
|
||||
mkdir -p $out/share/fonts/truetype
|
||||
|
@ -14,9 +15,12 @@ in fetchzip {
|
|||
|
||||
meta = {
|
||||
description = "A monospaced font for scientific and technical computing";
|
||||
longDescription = ''
|
||||
JuliaMono is a monospaced typeface designed for use in text editing environments that require a wide range of specialist and technical Unicode characters. It was intended as a fun experiment to be presented at the 2020 JuliaCon conference in Lisbon, Portugal (which of course didn’t physically happen in Lisbon, but online).
|
||||
'';
|
||||
maintainers = with lib.maintainers; [ suhr ];
|
||||
platforms = with lib.platforms; all;
|
||||
homepage = "https://juliamono.netlify.app/";
|
||||
homepage = "https://cormullion.github.io/pages/2020-07-26-JuliaMono/";
|
||||
license = lib.licenses.ofl;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
stdenv.mkDerivation rec {
|
||||
pname = "adoptopenjdk-icedtea-web";
|
||||
|
||||
version = "1.8.5";
|
||||
version = "1.8.6";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "AdoptOpenJDK";
|
||||
repo = "IcedTea-Web";
|
||||
rev = "icedtea-web-${version}";
|
||||
sha256 = "sha256-AC6D6n8jLdATXIXrDTHhs2QFnIZNaaZvJyFEqfxCxYQ=";
|
||||
sha256 = "sha256-meqbFLGwCMpFoOVAfvtriRAS8ZWr374eSN3m0CdC2aM=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ autoreconfHook pkg-config bc ];
|
||||
|
|
|
@ -4,6 +4,8 @@ buildDunePackage rec {
|
|||
pname = "ocaml-migrate-parsetree";
|
||||
version = "2.1.0";
|
||||
|
||||
useDune2 = true;
|
||||
|
||||
minimumOCamlVersion = "4.02";
|
||||
|
||||
src = fetchurl {
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
buildPythonPackage rec {
|
||||
pname = "alerta";
|
||||
version = "8.3.0";
|
||||
version = "8.4.0";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "83c7d751bad0cb9bd7886700da4cd83c5451b2e8eb8d4cc697966e02d6a565f8";
|
||||
sha256 = "260ff3118e73396104129928217b0f317ac5afdff8221874d8986df22ecf5f34";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [ six click requests requests-hawk pytz tabulate ];
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
buildPythonPackage rec {
|
||||
pname = "identify";
|
||||
version = "1.5.14";
|
||||
version = "1.6.1";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "de7129142a5c86d75a52b96f394d94d96d497881d2aaf8eafe320cdbe8ac4bcc";
|
||||
sha256 = "7b435803dc79a0f0ce887887a62ad360f3a9e8162ac0db9ee649d5d24085bf30";
|
||||
};
|
||||
|
||||
pythonImportsCheck = [ "identify" ];
|
||||
|
|
|
@ -1,40 +1,48 @@
|
|||
{ lib, buildPythonPackage, isPy3k, fetchPypi, substituteAll, graphviz
|
||||
, pkg-config, doctest-ignore-unicode, mock, nose }:
|
||||
{ lib
|
||||
, buildPythonPackage
|
||||
, isPy3k
|
||||
, fetchPypi
|
||||
, substituteAll
|
||||
, graphviz
|
||||
, coreutils
|
||||
, pkg-config
|
||||
, pytest
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "pygraphviz";
|
||||
version = "1.6";
|
||||
version = "1.7";
|
||||
|
||||
disabled = !isPy3k;
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "411ae84a5bc313e3e1523a1cace59159f512336318a510573b47f824edef8860";
|
||||
sha256 = "a7bec6609f37cf1e64898c59f075afd659106cf9356c5f387cecaa2e0cdb2304";
|
||||
extension = "zip";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [ graphviz ];
|
||||
checkInputs = [ doctest-ignore-unicode mock nose ];
|
||||
|
||||
patches = [
|
||||
# pygraphviz depends on graphviz being in PATH. This patch always prepends
|
||||
# graphviz to PATH.
|
||||
# pygraphviz depends on graphviz executables and wc being in PATH
|
||||
(substituteAll {
|
||||
src = ./graphviz-path.patch;
|
||||
inherit graphviz;
|
||||
src = ./path.patch;
|
||||
path = lib.makeBinPath [ graphviz coreutils ];
|
||||
})
|
||||
];
|
||||
|
||||
# The tests are currently failing because of a bug in graphviz 2.40.1.
|
||||
# Upstream does not want to skip the relevant tests:
|
||||
# https://github.com/pygraphviz/pygraphviz/pull/129
|
||||
doCheck = false;
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
|
||||
buildInputs = [ graphviz ];
|
||||
|
||||
checkInputs = [ pytest ];
|
||||
|
||||
checkPhase = ''
|
||||
pytest --pyargs pygraphviz
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Python interface to Graphviz graph drawing package";
|
||||
homepage = "https://github.com/pygraphviz/pygraphviz";
|
||||
license = licenses.bsd3;
|
||||
maintainers = with maintainers; [ matthiasbeyer ];
|
||||
maintainers = with maintainers; [ matthiasbeyer dotlambda ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
diff --git a/pygraphviz/agraph.py b/pygraphviz/agraph.py
|
||||
index 8f72024..2d8358e 100644
|
||||
--- a/pygraphviz/agraph.py
|
||||
+++ b/pygraphviz/agraph.py
|
||||
@@ -1557,7 +1557,7 @@ class AGraph(object):
|
||||
import os
|
||||
import glob
|
||||
|
||||
- paths = os.environ["PATH"]
|
||||
+ paths = '@graphviz@/bin:' + os.environ["PATH"]
|
||||
if os.name == "nt":
|
||||
exe = ".exe"
|
||||
else:
|
13
pkgs/development/python-modules/pygraphviz/path.patch
Normal file
13
pkgs/development/python-modules/pygraphviz/path.patch
Normal file
|
@ -0,0 +1,13 @@
|
|||
diff --git a/pygraphviz/agraph.py b/pygraphviz/agraph.py
|
||||
index d539ba0..f5bac3f 100644
|
||||
--- a/pygraphviz/agraph.py
|
||||
+++ b/pygraphviz/agraph.py
|
||||
@@ -1792,7 +1792,7 @@ class AGraph:
|
||||
if platform.system() == "Windows":
|
||||
name += ".exe"
|
||||
|
||||
- paths = os.environ["PATH"]
|
||||
+ paths = '@path@'
|
||||
for path in paths.split(os.pathsep):
|
||||
match = glob.glob(os.path.join(path, name))
|
||||
if match:
|
36
pkgs/tools/admin/google-cloud-sdk/cloud_sql_proxy_path.patch
Normal file
36
pkgs/tools/admin/google-cloud-sdk/cloud_sql_proxy_path.patch
Normal file
|
@ -0,0 +1,36 @@
|
|||
diff --git a/lib/googlecloudsdk/api_lib/sql/instances.py b/lib/googlecloudsdk/api_lib/sql/instances.py
|
||||
index 0d88ffe..814a436 100644
|
||||
--- a/lib/googlecloudsdk/api_lib/sql/instances.py
|
||||
+++ b/lib/googlecloudsdk/api_lib/sql/instances.py
|
||||
@@ -86,18 +86,19 @@ def GetRegionFromZone(gce_zone):
|
||||
def _GetCloudSqlProxyPath():
|
||||
"""Determines the path to the cloud_sql_proxy binary."""
|
||||
sdk_bin_path = config.Paths().sdk_bin_path
|
||||
- if not sdk_bin_path:
|
||||
- # Check if cloud_sql_proxy is located on the PATH.
|
||||
- proxy_path = file_utils.FindExecutableOnPath('cloud_sql_proxy')
|
||||
- if proxy_path:
|
||||
- log.debug(
|
||||
- 'Using cloud_sql_proxy found at [{path}]'.format(path=proxy_path))
|
||||
- return proxy_path
|
||||
- else:
|
||||
- raise exceptions.ToolException(
|
||||
- 'A Cloud SQL Proxy SDK root could not be found. Please check your '
|
||||
- 'installation.')
|
||||
- return os.path.join(sdk_bin_path, 'cloud_sql_proxy')
|
||||
+ if sdk_bin_path and os.path.isfile(os.path.join(sdk_bin_path, 'cloud_sql_proxy')):
|
||||
+ return os.path.join(sdk_bin_path, 'cloud_sql_proxy')
|
||||
+
|
||||
+ # Check if cloud_sql_proxy is located on the PATH.
|
||||
+ proxy_path = file_utils.FindExecutableOnPath('cloud_sql_proxy')
|
||||
+ if proxy_path:
|
||||
+ log.debug(
|
||||
+ 'Using cloud_sql_proxy found at [{path}]'.format(path=proxy_path))
|
||||
+ return proxy_path
|
||||
+
|
||||
+ raise exceptions.ToolException(
|
||||
+ 'A Cloud SQL Proxy SDK root could not be found. Please check your '
|
||||
+ 'installation.')
|
||||
|
||||
|
||||
def _RaiseProxyError(error_msg=None):
|
|
@ -45,6 +45,8 @@ in stdenv.mkDerivation rec {
|
|||
./gcloud-path.patch
|
||||
# Disable checking for updates for the package
|
||||
./gsutil-disable-updates.patch
|
||||
# Try to use cloud_sql_proxy from SDK only if it actually exists, otherwise, search for one in PATH
|
||||
./cloud_sql_proxy_path.patch
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
|
|
Loading…
Reference in a new issue