Toggle diff (437 lines)
diff --git a/gnu/local.mk b/gnu/local.mk
index 7278c50e4f..8dd1abe07a 100644
--- a/gnu/local.mk
+++ b/gnu/local.mk
@@ -1720,6 +1720,7 @@ dist_patch_DATA = \
%D%/packages/patches/python-apsw-3.39.2.1-test-fix.patch \
%D%/packages/patches/python-aionotify-0.2.0-py3.8.patch \
%D%/packages/patches/python-argcomplete-1.11.1-fish31.patch \
+ %D%/packages/patches/python-basicsr-fuck-nvidia.patch \
%D%/packages/patches/python-cross-compile.patch \
%D%/packages/patches/python-configobj-setuptools.patch \
%D%/packages/patches/python-dateutil-pytest-compat.patch \
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 0566f4bd69..a5767a2c31 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -750,6 +750,72 @@ (define (delete-ifdefs file)
in terms of new algorithms.")
(license license:gpl3+)))
+(define-public python-basicsr
+ (package
+ (name "python-basicsr")
+ (version "1.4.2")
+ (source (origin
+ (method git-fetch)
+ (uri
+ (git-reference
+ (url "https://github.com/XPixelGroup/BasicSR")
+ (commit (string-append "v" version))))
+ (patches
+ (search-patches
+ "python-basicsr-fuck-nvidia.patch"))
+ (modules '((guix build utils)))
+ (snippet
+ #~(begin (substitute* (find-files "." "\\.py")
+ (("\\.cuda\\(\\)") "")
+ (("pretrained=True") "weights=None"))
+ ;; Instead of images files, a custom lmdb is used
+ (delete-file-recursively "tests/data")))
+ (sha256
+ (base32
+ "0qjk1hf1qjla3f6hb8fd6dv9w3b77568z8g17mlcxl91bp031z2i"))))
+ (build-system python-build-system)
+ (arguments
+ (list
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-after 'unpack 'fix-requirements
+ (lambda _
+ (substitute* "requirements.txt"
+ (("opencv-python") "") ; installed without egg-info
+ (("tb-nightly") ""))))
+ (add-before 'check 'pre-check
+ (lambda _
+ (setenv "HOME" (getcwd))
+ ;; Missing data...
+ (delete-file-recursively "tests/test_data")
+ ;; Model is fetched over the web
+ (delete-file-recursively "tests/test_models")))
+ (replace 'check
+ (lambda* (#:key tests? #:allow-other-keys)
+ (when tests?
+ (invoke "pytest" "-vv")))))))
+ (propagated-inputs (list opencv ; used via python bindings
+ python-addict
+ python-future
+ python-lmdb
+ python-numpy
+ python-pillow
+ python-pyyaml
+ python-requests
+ python-scikit-image
+ python-scipy
+ python-pytorch
+ python-torchvision
+ python-tqdm
+ python-yapf))
+ (native-inputs (list lmdb python-cython python-pytest))
+ (home-page "https://github.com/xinntao/BasicSR")
+ (synopsis "Image and Video Super-Resolution Toolbox")
+ (description "BasicSR is a pytorch-based toolbox to perform image restoration
+tasks such as super-scaling, denoising, deblurring, and removal of JPEG
+artifacts.")
+ (license license:asl2.0)))
+
(define-public ncnn
(package
(name "ncnn")
diff --git a/gnu/packages/patches/python-basicsr-fuck-nvidia.patch b/gnu/packages/patches/python-basicsr-fuck-nvidia.patch
new file mode 100644
index 0000000000..30cc1cb9ad
--- /dev/null
+++ b/gnu/packages/patches/python-basicsr-fuck-nvidia.patch
@@ -0,0 +1,3233 @@
+diff --git a/basicsr/archs/arch_util.py b/basicsr/archs/arch_util.py
+index 11b82a7..875b2b6 100644
+--- a/basicsr/archs/arch_util.py
++++ b/basicsr/archs/arch_util.py
+@@ -10,7 +10,7 @@ from torch.nn import functional as F
+ from torch.nn import init as init
+ from torch.nn.modules.batchnorm import _BatchNorm
+
+-from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv
++from basicsr.ops.dcn import ModulatedDeformConvPack
+ from basicsr.utils import get_root_logger
+
+
+@@ -228,12 +228,8 @@ class DCNv2Pack(ModulatedDeformConvPack):
+ logger = get_root_logger()
+ logger.warning(f'Offset abs mean is {offset_absmean}, larger than 50.')
+
+- if LooseVersion(torchvision.__version__) >= LooseVersion('0.9.0'):
+- return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
+- self.dilation, mask)
+- else:
+- return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding,
+- self.dilation, self.groups, self.deformable_groups)
++ return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
++ self.dilation, mask)
+
+
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
+diff --git a/basicsr/archs/basicvsrpp_arch.py b/basicsr/archs/basicvsrpp_arch.py
+index d9699cb..e726b8b 100644
+--- a/basicsr/archs/basicvsrpp_arch.py
++++ b/basicsr/archs/basicvsrpp_arch.py
+@@ -69,14 +69,6 @@ class BasicVSRPlusPlus(nn.Module):
+ self.backbone = nn.ModuleDict()
+ modules = ['backward_1', 'forward_1', 'backward_2', 'forward_2']
+ for i, module in enumerate(modules):
+- if torch.cuda.is_available():
+- self.deform_align[module] = SecondOrderDeformableAlignment(
+- 2 * mid_channels,
+- mid_channels,
+- 3,
+- padding=1,
+- deformable_groups=16,
+- max_residue_magnitude=max_residue_magnitude)
+ self.backbone[module] = ConvResidualBlocks((2 + i) * mid_channels, mid_channels, num_blocks)
+
+ # upsampling module
+diff --git a/basicsr/archs/stylegan2_arch.py b/basicsr/archs/stylegan2_arch.py
+index 9ab37f5..42cb08c 100644
+--- a/basicsr/archs/stylegan2_arch.py
++++ b/basicsr/archs/stylegan2_arch.py
+@@ -4,7 +4,6 @@ import torch
+ from torch import nn
+ from torch.nn import functional as F
+
+-from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu
+ from basicsr.ops.upfirdn2d import upfirdn2d
+ from basicsr.utils.registry import ARCH_REGISTRY
+
+@@ -141,8 +140,7 @@ class EqualLinear(nn.Module):
+ bias. Default: ``True``.
+ bias_init_val (float): Bias initialized value. Default: 0.
+ lr_mul (float): Learning rate multiplier. Default: 1.
+- activation (None | str): The activation after ``linear`` operation.
+- Supported: 'fused_lrelu', None. Default: None.
++ activation (None | str): Ignored.
+ """
+
+ def __init__(self, in_channels, out_channels, bias=True, bias_init_val=0, lr_mul=1, activation=None):
+@@ -150,10 +148,7 @@ class EqualLinear(nn.Module):
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.lr_mul = lr_mul
+- self.activation = activation
+- if self.activation not in ['fused_lrelu', None]:
+- raise ValueError(f'Wrong activation value in EqualLinear: {activation}'
+- "Supported ones are: ['fused_lrelu', None].")
++ self.activation = None
+ self.scale = (1 / math.sqrt(in_channels)) * lr_mul
+
+ self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul))
+@@ -167,12 +162,7 @@ class EqualLinear(nn.Module):
+ bias = None
+ else:
+ bias = self.bias * self.lr_mul
+- if self.activation == 'fused_lrelu':
+- out = F.linear(x, self.weight * self.scale)
+- out = fused_leaky_relu(out, bias)
+- else:
+- out = F.linear(x, self.weight * self.scale, bias=bias)
+- return out
++ return F.linear(x, self.weight * self.scale, bias=bias)
+
+ def __repr__(self):
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
+@@ -318,7 +308,7 @@ class StyleConv(nn.Module):
+ sample_mode=sample_mode,
+ resample_kernel=resample_kernel)
+ self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
+- self.activate = FusedLeakyReLU(out_channels)
++ self.activate = ScaledLeakyReLU()
+
+ def forward(self, x, style, noise=None):
+ # modulate
+@@ -693,10 +683,7 @@ class ConvLayer(nn.Sequential):
+ and not activate))
+ # activation
+ if activate:
+- if bias:
+- layers.append(FusedLeakyReLU(out_channels))
+- else:
+- layers.append(ScaledLeakyReLU(0.2))
++ layers.append(ScaledLeakyReLU(0.2))
+
+ super(ConvLayer, self).__init__(*layers)
+
+diff --git a/basicsr/data/prefetch_dataloader.py b/basicsr/data/prefetch_dataloader.py
+index 5088425..0cf35e6 100644
+--- a/basicsr/data/prefetch_dataloader.py
++++ b/basicsr/data/prefetch_dataloader.py
+@@ -99,7 +99,7 @@ class CUDAPrefetcher():
+ self.loader = iter(loader)
+ self.opt = opt
+ self.stream = torch.cuda.Stream()
+- self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
++ self.device = torch.device('cpu')
+ self.preload()
+
+ def preload(self):
+diff --git a/basicsr/models/base_model.py b/basicsr/models/base_model.py
+index 05c8d2e..36442a2 100644
+--- a/basicsr/models/base_model.py
++++ b/basicsr/models/base_model.py
+@@ -15,7 +15,7 @@ class BaseModel():
+
+ def __init__(self, opt):
+ self.opt = opt
+- self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
++ self.device = torch.device('cpu')
+ self.is_train = opt['is_train']
+ self.schedulers = []
+ self.optimizers = []
+@@ -91,14 +91,7 @@ class BaseModel():
+ Args:
+ net (nn.Module)
+ """
+- net = net.to(self.device)
+- if self.opt['dist']:
+- find_unused_parameters = self.opt.get('find_unused_parameters', False)
+- net = DistributedDataParallel(
+- net, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
+- elif self.opt['num_gpu'] > 1:
+- net = DataParallel(net)
+- return net
++ return net.to(self.device)
+
+ def get_optimizer(self, optim_type, params, lr, **kwargs):
+ if optim_type == 'Adam':
+diff --git a/basicsr/ops/dcn/__init__.py b/basicsr/ops/dcn/__init__.py
+index 32e3592..68033e0 100644
+--- a/basicsr/ops/dcn/__init__.py
++++ b/basicsr/ops/dcn/__init__.py
+@@ -1,7 +1,4 @@
+-from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, ModulatedDeformConvPack, deform_conv,
+- modulated_deform_conv)
++from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, ModulatedDeformConvPack)
+
+ __all__ = [
+- 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'deform_conv',
+- 'modulated_deform_conv'
+-]
++ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack',]
+diff --git a/basicsr/ops/dcn/deform_conv.py b/basicsr/ops/dcn/deform_conv.py
+index 6268ca8..38ced57 100644
+--- a/basicsr/ops/dcn/deform_conv.py
++++ b/basicsr/ops/dcn/deform_conv.py
+@@ -2,191 +2,9 @@ import math
+ import os
+ import torch
+ from torch import nn as nn
+-from torch.autograd import Function
+-from torch.autograd.function import once_differentiable
+ from torch.nn import functional as F
+ from torch.nn.modules.utils import _pair, _single
+
+-BASICSR_JIT = os.getenv('BASICSR_JIT')
+-if BASICSR_JIT == 'True':
+- from torch.utils.cpp_extension import load
+- module_path = os.path.dirname(__file__)
+- deform_conv_ext = load(
+- 'deform_conv',
+- sources=[
+- os.path.join(module_path, 'src', 'deform_conv_ext.cpp'),
+- os.path.join(module_path, 'src', 'deform_conv_cuda.cpp'),
+- os.path.join(module_path, 'src', 'deform_conv_cuda_kernel.cu'),
+- ],
+- )
+-else:
+- try:
+- from . import deform_conv_ext
+- except ImportError:
+- pass
+- # avoid annoying print output
+- # print(f'Cannot import deform_conv_ext. Error: {error}. You may need to: \n '
+- # '1. compile with BASICSR_EXT=True. or\n '
+- # '2. set BASICSR_JIT=True during running')
+-
+-
+-class DeformConvFunction(Function):
+-
+- @staticmethod
+- def forward(ctx,
+- input,
+- offset,
+- weight,
+- stride=1,
+- padding=0,
+- dilation=1,
+- groups=1,
+- deformable_groups=1,
+- im2col_step=64):
+- if input is not None and input.dim() != 4:
+- raise ValueError(f'Expected 4D tensor as input, got {input.dim()}D tensor instead.')
+- ctx.stride = _pair(stride)
+- ctx.padding = _pair(padding)
+- ctx.dilation = _pair(dilation)
+- ctx.groups = groups
+- ctx.deformable_groups = deformable_groups
+- ctx.im2col_step = im2col_step
+-
+- ctx.save_for_backward(input, offset, weight)
+-
+- output = input.new_empty(DeformConvFunction._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride))
+-
+- ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
+-
+- if not input.is_cuda:
+- raise NotImplementedError
+- else:
+- cur_im2col_step = min(ctx.im2col_step, input.shape[0])
+- assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize'
+- deform_conv_ext.deform_conv_forward(input, weight,
+- offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3),
+- weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1],
+- ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups,
+- ctx.deformable_groups, cur_im2col_step)
+- return output
+-
+- @staticmethod
+- @once_differentiable
+- def backward(ctx, grad_output):
+- input, offset, weight = ctx.saved_tensors
+-
+- grad_input = grad_offset = grad_weight = None
+-
+- if not grad_output.is_cuda:
+- raise NotImplementedError
+- else:
+- cur_im2col_step = min(ctx.im2col_step, input.shape[0])
+- assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize'
+-
+- if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
+- grad_input = torch.zeros_like(input)
+- grad_offset = torch.zeros_like(offset)
+- deform_conv_ext.deform_conv_backward_input(input, offset, grad_output, grad_input,
+- grad_offset, weight, ctx.bufs_[0], weight.size(3),
+- weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1],
+- ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups,
+- ctx.deformable_groups, cur_im2col_step)
+-
+- if ctx.needs_input_grad[2]:
+- grad_weight = torch.zeros_like(weight)
+- deform_conv_ext.deform_conv_backward_parameters(input, offset, grad_output, grad_weight,
+- ctx.bufs_[0], ctx.bufs_[1], weight.size(3),
+- weight.size(2), ctx.stride[1], ctx.stride[0],
+- ctx.padding[1], ctx.padding[0], ctx.dilation[1],
+- ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1,
+- cur_im2col_step)
+-
+- return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
+-
+- @staticmethod
+- def _output_size(input, weight, padding, dilation, stride):
+- channels = weight.size(0)
+- output_size = (input.size(0), channels)
+- for d in range(input.dim() - 2):
+- in_size = input.size(d + 2)
+- pad = padding[d]
+- kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
+- stride_ = stride[d]
+- output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
+- if not all(map(lambda s: s > 0, output_size)):
+- raise ValueError(f'convolution input is too small (output would be {"x".join(map(str, output_size))})')
+- return output_size
+-
+-
+-class ModulatedDeformConvFunction(Function):
+-
+- @staticmethod
+- def forward(ctx,
+- input,
+- offset,
+- mask,
+- weight,
+- bias=None,
+- stride=1,
+- padding=0,
+- dilation=1,
+- groups=1,
+- deformable_groups=1):
+- ctx.stride = stride
+- ctx.padding = padding
+- ctx.dilation = dilation
+- ctx.groups = groups
+- ctx.deformable_groups = deformable_groups
+- ctx.with_bias = bias is not None
+- if not ctx.with_bias:
+- bias = input.new_empty(1) # fake tensor
+- if not input.is_cuda:
+- raise NotImplementedError
+- if weight.requires_grad or mask.requires_grad or offset.requires_grad or input.requires_grad:
+- ctx.save_for_backward(input, offset, mask, weight, bias)
+- output = input.new_empty(ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
+- ctx._bufs = [input.new_empty(0), input.new_empty(0)]
+- deform_conv_ext.modulated_deform_conv_forward(input, weight, bias, ctx._bufs[0], offset, mask, output,
+- ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride,
+- ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
+- ctx.groups, ctx.deformable_groups, ctx.with_bias)
+- return output
+-
+- @staticmethod
+- @once_differentiable
+- def backward(ctx, grad_output):
+- if not grad_output.is_cuda:
+- raise NotImplementedError
+- input, offset, mask, weight, bias = ctx.saved_tensors
+- grad_input = torch.zeros_like(input)
+- grad_offset = torch.zeros_like(offset)
+- grad_mask = torch.zeros_like(mask)
+- grad_weight = torch.zeros_like(weight)
+- grad_bias = torch.zeros_like(bias)
+-