add external files

This commit is contained in:
Shunsuke Shibayama 2024-02-11 23:04:16 +09:00
parent 7823243dbe
commit 92aa3ea078
122 changed files with 1087 additions and 0 deletions

View file

@ -0,0 +1,87 @@
np = pyimport "numpy"
.backends = pyimport "./backends"
.cuda = pyimport "./cuda"
.nn = pyimport "./nn"
.optim = pyimport "./optim"
.serialization = pyimport "./serialization"
.utils = pyimport "./utils"
{.load!; .save!;} = pyimport "./serialization"
{.manual_seed!;} = pyimport "./random"
{.no_grad;} = pyimport "./autograd"
.Device = 'device': ClassType
.device: (type: Str) => .Device
.DType = 'dtype': ClassType
.dtype: (type: Str) => .DType
.UInt8 = 'uint8': ClassType
.Int8 = 'int8': ClassType
.Int16 = 'int16': ClassType
.Int32 = 'int32': ClassType
.Int64 = 'int64': ClassType
.Float16 = 'float16': ClassType
.Float32 = 'float32': ClassType
.Float64 = 'float64': ClassType
.Complex32 = 'complex32': ClassType
.Complex64 = 'complex64': ClassType
.Complex128 = 'complex128': ClassType
.Size: (S: [Nat; _]) -> ClassType
.Size(S).
__call__: (size: {S}) -> .Size(S)
.Size(S)|<: Eq|.
__eq__: (self: .Size(S), other: .Size(S)) -> Bool
.Tensor!: (T: Type, Shape: [Nat; _]) -> ClassType
.Tensor!(T, _) <: Output T
.Tensor!(T, S)|<: IrregularEq|.
Output: {Tensor!(Bool, S)}
__eq__: (self: .Tensor!(T, S), other: .Tensor!(T, S)) -> .Tensor!(Bool, S)
.Tensor!(T, S)|<: Indexable(Nat, .Tensor!(T, _))|.
__getitem__: (self: .Tensor!(T, S), index: Nat or [Nat; _]) -> .Tensor!(T, _)
.Tensor!(T, S).
data: .Tensor!(T, S)
shape: .Size(S)
.Tensor!(_, _).
dtype: .DType
clone: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
cpu: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
detach: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
numpy: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> np.NDArray(T, S)
view: (|T, Old: [Nat; _], S: {A: [Nat; _] | A.prod() == Old.prod()}|(
self: .Tensor!(T, Old),
shape: {S},
) -> .Tensor!(T, S)) \
and (|T|(self: .Tensor!(T, _), shape: [Int; _]) -> .Tensor!(T, _))
backward!: |T, S: [Nat; _]|(
self: RefMut(.Tensor!(T, S)),
gradient := .Tensor!(T, S),
retain_graph := Bool,
create_graph := Bool,
) => NoneType
# TODO: S bound
item: |T|(self: Ref .Tensor!(T, _)) -> T
to: (|T, S: [Nat; _]|(
self: .Tensor!(T, S),
other: .DType or .Device,
non_blocking := Bool,
copy := Bool,
) -> .Tensor!(T, S))
size: (|T, S: [Nat; _]|(self: .Tensor!(T, S), dim: Nat) -> Nat) \
and (|T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Size)
sum: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, [])
squeeze: (|T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S.remove_all(1))) \
and (|T|(self: .Tensor!(T, _)) -> .Tensor!(T, _))
unsqueeze: (|T, S: [Nat; _], Dim: Nat|(self: .Tensor!(T, S), dim: {Dim}) -> .Tensor!(T, S.insert(Dim, 1))) \
and (|T|(self: .Tensor!(T, _), dim: Nat) -> .Tensor!(T, _))
.relu: |T, S: [Nat; _]|(x: .Tensor!(T, S)) -> .Tensor!(T, S)
.softmax: |T, S: [Nat; _]|(x: .Tensor!(T, S), dim: Nat) -> .Tensor!(T, S)
.max: (|T|(input: .Tensor!(T, _), dim: Nat, keepdim := Bool) -> (.Tensor!(T, _)), .Tensor!(T, _)) \
and (|T|(input: .Tensor!(T, _)) -> .Tensor!(T, _))
.min: (|T|(input: .Tensor!(T, _), dim: Nat, keepdim := Bool) -> (.Tensor!(T, _)), .Tensor!(T, _)) \
and (|T|(input: .Tensor!(T, _)) -> .Tensor!(T, _))
.tensor: (|T, S: [Nat; _]|(data: HasScalarType(T) and HasShape(S), dtype := .DType, device := .Device) -> .Tensor!(T, S)) \
and (|T|(data: [T; _], dtype := .DType, device := .Device) -> .Tensor!(T, _))

View file

@ -0,0 +1 @@
{.no_grad;} = import "./grad_mode"

View file

@ -0,0 +1,3 @@
.NoGrad = 'no_grad': ClassType
.NoGrad <: ContextManager
.no_grad: () -> .NoGrad

View file

@ -0,0 +1 @@
.mps = pyimport "./mps"

View file

@ -0,0 +1 @@
.is_available!: () => Bool

View file

@ -0,0 +1 @@
.is_available!: () => Bool

View file

@ -0,0 +1,17 @@
.modules = pyimport "./modules"
.parameter = pyimport "./parameter"
{
.Conv1d;
.Conv2d;
.Conv3d;
.CrossEntropyLoss;
.Flatten;
.Linear;
.MaxPool1d;
.MaxPool2d;
.MaxPool3d;
.Module;
.ReLU;
} = .modules
{.Parameter;} = .parameter

View file

@ -0,0 +1,17 @@
.activation = pyimport "./activation"
.container = pyimport "./container"
.conv = pyimport "./conv"
.flatten = pyimport "./flatten"
.linear = pyimport "./linear"
.loss = pyimport "./loss"
.module = pyimport "./module"
.pooling = pyimport "./pooling"
{.ReLU;} = .activation
{.Sequential;} = .container
{.Conv1d; .Conv2d; .Conv3d;} = .conv
{.Flatten;} = .flatten
{.Linear;} = .linear
{.CrossEntropyLoss;} = .loss
{.Module;} = .module
{.MaxPool1d; .MaxPool2d; .MaxPool3d;} = .pooling

View file

@ -0,0 +1,10 @@
{Tensor!;} = pyimport "torch"
.ReLU: ClassType
.ReLU.
__call__: () -> .ReLU
.ReLU|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .ReLU,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1,11 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
.Sequential: ClassType
.Sequential.
__call__: (*args: Module) -> .Sequential
.Sequential|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Sequential,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1,71 @@
{Device; DType; Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_ConvNd: ClassType
_ConvNd <: Module
.Conv1d: ClassType
.Conv1d <: _ConvNd
.Conv1d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 1] or (Nat,),
stride := Nat or [Nat; 1] or (Nat,),
padding := Str or Nat or [Nat; 1] or (Nat,),
dilation := Nat or [Nat; 1] or (Nat,),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv1d
.Conv1d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv1d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.Conv2d: ClassType
.Conv2d <: _ConvNd
.Conv2d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 2] or (Nat, Nat),
stride := Nat or [Nat; 2] or (Nat, Nat),
padding := Str or Nat or [Nat; 2] or (Nat, Nat),
dilation := Nat or [Nat; 2] or (Nat, Nat),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv2d
.Conv2d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv2d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.Conv3d: ClassType
.Conv3d <: _ConvNd
.Conv3d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 3] or (Nat, Nat, Nat),
stride := Nat or [Nat; 3] or (Nat, Nat, Nat),
padding := Str or Nat or [Nat; 3] or (Nat, Nat, Nat),
dilation := Nat or [Nat; 3] or (Nat, Nat, Nat),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv3d
.Conv3d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv3d,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1,10 @@
{Tensor!;} = pyimport "torch"
.Flatten: ClassType
.Flatten.
__call__: (start_dim: Nat, end_dim: Int) -> .Flatten
.Flatten|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Flatten,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1,18 @@
{Module;} = pyimport "torch/nn"
{Device; DType; Tensor!;} = pyimport "torch"
.Linear: ClassType
.Linear <: Module
.Linear.
__call__: (
in_features: Nat,
out_features: Nat,
bias := Bool,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Linear
.Linear|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Linear,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1,27 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_Loss: ClassType
_Loss <: Module
_Loss.
reduction: Str
_WeightedLoss: ClassType
_WeightedLoss <: _Loss
.CrossEntropyLoss: ClassType
.CrossEntropyLoss <: _WeightedLoss
.CrossEntropyLoss.
__call__: () -> .CrossEntropyLoss
.CrossEntropyLoss|<: GenericCallable|.
__call__: |T|(
self: .CrossEntropyLoss,
input: Tensor!(T, _),
target: Tensor!(T, _),
) -> Tensor!(T, [])
.CrossEntropyLoss.
forward: |T|(
self: .CrossEntropyLoss,
input: Tensor!(T, _),
target: Tensor!(T, _),
) -> Tensor!(T, [])

View file

@ -0,0 +1,43 @@
{Tensor!;} = pyimport "torch"
{Parameter;} = pyimport "torch/nn/parameter"
.Module: ClassType
.Module <: InheritableType
.Module|<: GenericCallable|.
__call__: |T|(
self: .Module,
input: Tensor!(T, _),
) -> Tensor!(T, _)
.Module.
__init__: (self: RefMut(.Module)) => NoneType
parameters: (self: Ref(.Module), recurse := Bool) -> Iterator Parameter
named_parameters: (self: Ref(.Module), prefix := Str, recurse := Bool, remove_duplicate := Bool) -> Iterator((Str, Parameter))
# buffers: (self: Ref(.Module), recurse := Bool) -> Iterator .Tensor!
# named_buffers: (self: Ref(.Module), prefix := Str, recurse := Bool, remove_duplicate := Bool) -> Iterator((Str, .Tensor!))
children: (self: Ref(.Module)) -> Iterator .Module
named_children: (self: Ref(.Module), prefix := Str) -> Iterator((Str, .Module))
modules: (self: Ref(.Module)) -> Iterator .Module
named_modules: (self: Ref(.Module), memo := {.Module; _}, prefix := Str, remove_duplicate := Bool) -> Iterator((Str, .Module))
train: |T <: .Module|(self: Ref(T), mode := Bool) -> T
eval: |T <: .Module|(self: Ref(T)) -> T
zero_grad!: (self: RefMut(.Module), set_to_none := Bool) => NoneType
compile: (self: Ref(.Module), *args: Obj, **kwargs: Obj) -> .Module
# register_buffer!: (self: RefMut(.Module), name: Str, tensor := Tensor!, persistent := Bool) => NoneType
register_parameter!: (self: RefMut(.Module), name: Str, param := Parameter) => NoneType
add_module!: (self: RefMut(.Module), name: Str, module := .Module) => NoneType
register_module!: (self: RefMut(.Module), name: Str, module := .Module) => NoneType
get_submodule: (self: Ref(.Module), name: Str) -> .Module
get_parameter: (self: Ref(.Module), name: Str) -> Parameter
# get_buffer: (self: Ref(.Module), name: Str) -> .Tensor!
get_extra_state: (self: Ref(.Module)) -> Obj
set_extra_state!: (self: RefMut(.Module), state: Obj) => NoneType
apply!: |T <: .Module|(self: T, fn: (module: RefMut(T)) => NoneType) => T
cuda!: |T <: .Module|(self: T, device := Int) => T
ipu!: |T <: .Module|(self: T, device := Int) => T
xpu!: |T <: .Module|(self: T, device := Int) => T
cpu!: |T <: .Module|(self: T) => T
float: |T <: .Module|(self: T) -> T
double: |T <: .Module|(self: T) -> T
half: |T <: .Module|(self: T) -> T
bfloat16: |T <: .Module|(self: T) -> T
to: |T <: .Module|(self: T, *args: Obj, **kwargs: Obj) -> T

View file

@ -0,0 +1,57 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_MaxPoolNd: ClassType
_MaxPoolNd <: Module
.MaxPool1d: ClassType
.MaxPool1d <: _MaxPoolNd
.MaxPool1d <: GenericCallable
.MaxPool1d.
__call__: (
kernel_size: Nat or [Nat; 1] or (Nat,),
stride := Nat or [Nat; 1] or (Nat,),
padding := Str or Nat or [Nat; 1] or (Nat,),
dilation := Nat or [Nat; 1] or (Nat,),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool1d
.MaxPool1d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool1d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.MaxPool2d: ClassType
.MaxPool2d <: _MaxPoolNd
.MaxPool2d.
__call__: (
kernel_size: Nat or [Nat; 2] or (Nat, Nat),
stride := Nat or [Nat; 2] or (Nat, Nat),
padding := Str or Nat or [Nat; 2] or (Nat, Nat),
dilation := Nat or [Nat; 2] or (Nat, Nat),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool2d
.MaxPool2d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool2d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.MaxPool3d: ClassType
.MaxPool3d <: _MaxPoolNd
.MaxPool3d.
__call__: (
kernel_size: Nat or [Nat; 3] or (Nat, Nat, Nat),
stride := Nat or [Nat; 3] or (Nat, Nat, Nat),
padding := Str or Nat or [Nat; 3] or (Nat, Nat, Nat),
dilation := Nat or [Nat; 3] or (Nat, Nat, Nat),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool3d
.MaxPool3d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool3d,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -0,0 +1 @@
.Parameter: ClassType

View file

@ -0,0 +1,47 @@
{Parameter;} = pyimport "torch/nn/parameter"
.Optimizer!: ClassType
.Optimizer! <: InheritableType
.Optimizer!.
__call__: (params: Iterable(Parameter)) -> .Optimizer!
zero_grad!: (self: RefMut .Optimizer!) => NoneType
step!: (self: RefMut .Optimizer!) => NoneType
.ASGD!: ClassType
.ASGD! <: .Optimizer!
.Adadelta!: ClassType
.Adadelta! <: .Optimizer!
.Adagrad!: ClassType
.Adagrad! <: .Optimizer!
.Adam!: ClassType
.Adam! <: .Optimizer!
.Adam!.
__call__: (
params: Iterable(Parameter),
lr := Float,
betas := (Float, Float),
eps := Float,
weight_decay := Float,
amsgrad := Bool,
foreach := Bool,
maximize := Bool,
) -> .Adam!
.AdamW!: ClassType
.AdamW! <: .Optimizer!
.Adamax!: ClassType
.Adamax! <: .Optimizer!
.LBFGS!: ClassType
.LBFGS! <: .Optimizer!
.NAdam!: ClassType
.NAdam! <: .Optimizer!
.RAdam!: ClassType
.RAdam! <: .Optimizer!
.RMSprop!: ClassType
.RMSprop! <: .Optimizer!
.Rprop!: ClassType
.Rprop! <: .Optimizer!
.SGD!: ClassType
.SGD! <: .Optimizer!
.SparseAdam!: ClassType
.SparseAdam! <: .Optimizer!

View file

View file

@ -0,0 +1 @@
.manual_seed!: (seed: Int) => Obj

View file

@ -0,0 +1,2 @@
.load!: (f: PathLike) => NoneType
.save!: (obj: Obj, f: PathLike) => NoneType

View file

@ -0,0 +1 @@
.data = pyimport "./data"

View file

@ -0,0 +1,9 @@
{.DataLoader;} = pyimport "./dataloader"
{.Dataset;} = pyimport "./dataset"
{
.Sampler;
.SequentialSampler;
.RandomSampler;
.SubsetRandomSampler;
.WeightedRandomSampler;
} = pyimport "./sampler"

View file

@ -0,0 +1,25 @@
torch = pyimport "torch"
dataset = pyimport "./dataset"
{Sampler;} = pyimport "./sampler"
.DataLoader: ClassType
.DataLoader <: Iterable((torch.Tensor!(_, _), torch.Tensor!(_, _)))
.DataLoader.
__call__: (
dataset: dataset.Dataset,
batch_size := Nat,
shuffle := Bool,
sampler := Sampler,
batch_sampler := Sampler,
num_workers := Nat,
collate_fn := Obj,
pin_memory := Bool,
drop_last := Bool,
timeout := Float,
worker_init_fn := Obj,
multiprocessing_context := Obj,
generator := Obj,
prefetch_factor := Nat,
persistent_workers := Bool,
pin_memory_device := Str,
) -> .DataLoader

View file

@ -0,0 +1 @@
.Dataset: ClassType

View file

@ -0,0 +1,13 @@
.Sampler: ClassType
.RandomSampler: ClassType
.RandomSampler <: .Sampler
.SequentialSampler: ClassType
.SequentialSampler <: .Sampler
.SubsetRandomSampler: ClassType
.SubsetRandomSampler <: .Sampler
.WeightedRandomSampler: ClassType
.WeightedRandomSampler <: .Sampler