refactor: move external packages to package-index

This commit is contained in:
Shunsuke Shibayama 2024-02-10 20:12:55 +09:00
parent 6b681c5fd1
commit 12a425a8da
116 changed files with 0 additions and 1031 deletions

View file

@ -1,8 +0,0 @@
.erg_parser = pyimport "erg_parser"
.compile: (code: Str, mode: Str) -> Code
.compile_ast: (ast: .erg_parser.ast.AST, mode: Str) -> Code
.compile_file: (path: Str) -> Code
.exec: (code: Str) -> Module
.exec_ast: (ast: .erg_parser.ast.AST) -> Module
.__import__: (name: Str) -> Module

View file

@ -1,4 +0,0 @@
.ast = pyimport "./ast"
.expr = pyimport "./expr"
.parse: (code: Str) -> .ast.Module

View file

@ -1,38 +0,0 @@
.Token: ClassType
.TokenKind: ClassType
.Literal: ClassType
.VarName: ClassType
.Identifier: ClassType
.Attribute: ClassType
.TupleAttribute: ClassType
.Subscript: ClassType
.TypeApp: ClassType
.NormalArray: ClassType
.NormalTuple: ClassType
.NormalDict: ClassType
.NormalSet: ClassType
.NormalRecord: ClassType
.BinOp: ClassType
.UnaryOp: ClassType
.Call: ClassType
.Args: ClassType
.Block: ClassType
.DataPack: ClassType
.Lambda: ClassType
.TypeAscription: ClassType
.VarSignature: ClassType
.SubrSignature: ClassType
.Def: ClassType
.Methods: ClassType
.ClassDef: ClassType
.PatchDef: ClassType
.ReDef: ClassType
.Compound: ClassType
.InlineModule: ClassType
.Dummy: ClassType
.Module: ClassType
.AST: ClassType
.AST.
new: (name: Str, mod: .Module) -> .AST

View file

@ -1,20 +0,0 @@
.Literal: ClassType
.NormalArray: ClassType
.NormalTuple: ClassType
.NormalDict: ClassType
.NormalSet: ClassType
.NormalRecord: ClassType
.BinOp: ClassType
.UnaryOp: ClassType
.Call: ClassType
.DataPack: ClassType
.Lambda: ClassType
.TypeAscription: ClassType
.Def: ClassType
.Methods: ClassType
.ClassDef: ClassType
.PatchDef: ClassType
.ReDef: ClassType
.Compound: ClassType
.InlineModule: ClassType
.Dummy: ClassType

View file

@ -1,26 +0,0 @@
.BaseLoader: ClassType
.PackageLoader: ClassType
.PackageLoader <: .BaseLoader
.PackageLoader.
__call__: (package_name: Str, package_path: Str) -> .PackageLoader
.FileSystemLoader: ClassType
.FileSystemLoader <: .BaseLoader
.FileSystemLoader.
__call__: (path: Str) -> .FileSystemLoader
.DictLoader: ClassType
.DictLoader <: .BaseLoader
.DictLoader.
__call__: (mapping: {Str: Str}) -> .DictLoader
.Template: ClassType
.Template.
__call__: (source: Str, autoescape := Bool, enable_async := Bool) -> .Template
render: (self: Ref(.Template), **kwargs: Obj) -> Str
.Enviroment: ClassType
.Enviroment.
__call__: (loader := .BaseLoader, trim_blocks := Bool) -> .Enviroment
get_template: (self: Ref(.Enviroment), name := Str) -> .Template

View file

@ -1,4 +0,0 @@
.pyplot = pyimport "./pyplot"
.scale = pyimport "./scale"
.use!: Str => NoneType

View file

@ -1,3 +0,0 @@
._axes = pyimport "./_axes"
.Axes! = ._axes.Axes!

View file

@ -1,21 +0,0 @@
.Axes!: ClassType
.Axes!.
scatter!: (
self: RefMut(.Axes!),
x: Num, # TODO: Float | ArrayLike
y: Num,
s := Num,
c := Iterable(Int),
vmin := Float,
vmax := Float,
) => NoneType
set!: (
self: RefMut(.Axes!),
xlabel := Str,
xlim := (Float, Float),
xticks := Iterable(Obj),
ylabel := Str,
ylim := (Float, Float),
yticks := Iterable(Obj),
title := Str
) => NoneType

View file

@ -1,5 +0,0 @@
axes = pyimport "./axes"
.Figure!: ClassType
.Figure!.
add_axes!: (self: RefMut(.Figure!), rect: [Nat; 4], projection := Str or NoneType, polar := Bool, label := Str) => axes.Axes!

View file

@ -1,4 +0,0 @@
_ImageBase: ClassType
.AxesImage!: ClassType
.AxesImage! <: _ImageBase

View file

@ -1 +0,0 @@
.Legend: ClassType

View file

@ -1,2 +0,0 @@
.Line2D: ClassType
.Line3D: ClassType

View file

@ -1,30 +0,0 @@
contextlib = pyimport "contextlib"
# lines = pyimport "../lines"
text = pyimport "../text"
legend = pyimport "../legend"
.style = pyimport "../style"
figure = pyimport "../figure"
axes = pyimport "../axes"
image = pyimport "../image"
.plot!: (*args: Obj, scaleX := Bool, scaleY := Bool) => [Obj; _]
.imshow!: (X: Obj, cmap := Str, interpolation := Str) => image.AxesImage!
.show!: () => NoneType
.text!: (x: Float, y: Float, s: Str, fontdict := {Str: Obj}, fontsize := Nat) => text.Text
.title!: (title: Str) => text.Text
.xlabel!: (label: Str) => text.Text
.ylabel!: (label: Str) => text.Text
.xlim!: (left := Float, right := Float) => (Float, Float) \
and ((left_right: (Float, Float)) => (Float, Float))
.ylim!: (bottom := Float, top := Float) => (Float, Float) \
and ((bottom_top: (Float, Float)) => (Float, Float))
.legend!: (labels := [Str; _]) => legend.Legend
.savefig!: (fname: Str, dpi := Float or Str, format := Str) => NoneType
.subplots!: (() => (figure.Figure!, axes.Axes!)) \
and ((nrows: {1}, ncols: {1}) => (figure.Figure!, axes.Axes!)) \
and ((nrows: {1}, ncols: Nat) => (figure.Figure!, [axes.Axes!; _])) \
and ((nrows: Nat, ncols: {1}) => (figure.Figure!, [axes.Axes!; _])) \
and ((nrows: Nat, ncols: Nat) => (figure.Figure!, [[axes.Axes!; _]; _]))
.figure!: (num := Int or Str, figsize := [Float; _], dpi := Float) => figure.Figure!
.xkcd!: (scale := Float, length := Float, randomness := Float) => contextlib.ExitStack!

View file

@ -1 +0,0 @@
.use!: Str => NoneType

View file

@ -1 +0,0 @@
.Text: ClassType

View file

@ -1,89 +0,0 @@
.Complex64 = 'complex64': ClassType
.Complex128 = 'complex128': ClassType
.Float16 = 'float16': ClassType
.Float32 = 'float32': ClassType
.Float64 = 'float64': ClassType
.Int8 = 'int8': ClassType
.Int16 = 'int16': ClassType
.Int32 = 'int32': ClassType
.Int64 = 'int64': ClassType
.UInt8 = 'uint8': ClassType
.UInt16 = 'uint16': ClassType
.UInt32 = 'uint32': ClassType
.UInt64 = 'uint64': ClassType
.Bool_ = 'bool_': ClassType
.Str_ = 'str_': ClassType
.NDArray = 'ndarray': (T: Type, Shape: [Nat; _]) -> ClassType
.NDArray(T, _) <: Output T
.NDArray(_, _) <: Num
.NDArray(T, S)|<: Add .NDArray(T, S)|.
Output: {.NDArray(T, S)}
__add__: (self: .NDArray(T, S), other: .NDArray(T, S)) -> .NDArray(T, S)
.NDArray(T, S)|<: Add T|.
Output: {.NDArray(T, S)}
__add__: (self: .NDArray(T, S), other: T) -> .NDArray(T, S)
.NDArray(T, S)|<: Sub .NDArray(T, S)|.
Output: {.NDArray(T, S)}
__sub__: (self: .NDArray(T, S), other: .NDArray(T, S)) -> .NDArray(T, S)
.NDArray(T, S)|<: Sub T|.
Output: {.NDArray(T, S)}
__sub__: (self: .NDArray(T, S), other: T) -> .NDArray(T, S)
.NDArray(T, S)|<: Mul .NDArray(T, S)|.
Output: {.NDArray(T, S)}
__mul__: (self: .NDArray(T, S), other: .NDArray(T, S)) -> .NDArray(T, S)
.NDArray(T, S)|<: Mul T|.
Output: {.NDArray(T, S)}
__mul__: (self: .NDArray(T, S), other: T) -> .NDArray(T, S)
.NDArray(T, S)|<: Div .NDArray(T, S)|.
Output: {.NDArray(T, S)}
__div__: (self: .NDArray(T, S), other: .NDArray(T, S)) -> .NDArray(T, S)
.NDArray(T, S)|<: Div T|.
Output: {.NDArray(T, S)}
__div__: (self: .NDArray(T, S), other: T) -> .NDArray(T, S)
.NDArray.
shape: [Nat; _]
ndim: Nat
dtype: Type
size: Nat
copy: |T, S: [Nat; _]|(self: .NDArray(T, S),) -> .NDArray(T, S)
reshape: |T, Old: [Nat; _], S: {A: [Nat; _] | A.prod() == Old.prod()}|(
self: .NDArray(T, Old),
shape: {S},
) -> .NDArray(T, S)
sum: |T <: Num|(self: .NDArray(T, _),) -> T
take: (|T|(self: .NDArray(T, _), indice: Nat) -> T) \
and (|T|(self: .NDArray(T, _), indices: .NDArray(Nat) or [Nat; _]) -> .NDArray(T, _))
tobytes: |T|(self: .NDArray(T, _),) -> Bytes
tolist: |T|(self: .NDArray(T, _),) -> [T; _]
.nan: Float
.Nan: Float
.abs: |T, S: [Nat; _]|(object: .NDArray(T, S),) -> .NDArray(T, S)
.add: |T, S: [Nat; _]|(object: .NDArray(T, S), other: .NDArray(T, S)) -> .NDArray(T, S)
.all: |T <: Num|(object: .NDArray(T),) -> Bool
.any: |T <: Num|(object: .NDArray(T),) -> Bool
.arange: |T <: Num|(start: T, stop := T, step := T) -> .NDArray(T)
.array: |T, S: [Nat; _]|(object: HasScalarType(T) and HasShape(S),) -> .NDArray(T, S)
.linspace: |T <: Num|(start: T, stop: T, num := Nat, endpoint := Bool, retstep := Bool, dtype := Type, axis := Nat) -> .NDArray(T)
.max: |T <: Num|(object: .NDArray(T),) -> T
.mean: |T <: Num|(object: .NDArray(T),) -> T
.min: |T <: Num|(object: .NDArray(T),) -> T
.ones: |T|(shape: Nat or [Nat; _], dtype := Type) -> .NDArray(T)
.reshapce: |T|(object: .NDArray(T), shape: [Nat; _]) -> .NDArray(T)
.std: |T <: Num|(object: .NDArray(T),) -> T
.sum: |T|(object: .NDArray(T),) -> T
.sqrt: |T|(object: .NDArray(T),) -> .NDArray(T)
.transpose: |T|(object: .NDArray(T), axes := [Nat; _]) -> .NDArray(T)
.zeros: (|N: Nat|(shape: {N}, dtype := Type) -> .NDArray(Nat, [N])) \
and (|S: [Nat; _]|(shape: {S}, dtype := Type) -> .NDArray(Nat, S))
.empty: (|N: Nat|(shape: {N}, dtype := Type) -> .NDArray(Nat, [N])) \
and (|S: [Nat; _]|(shape: {S}, dtype := Type) -> .NDArray(Nat, S))
.dot: (|T, I: Nat, J: Nat, K: Nat|(l: .NDArray(T, [I, J]), r: .NDArray(T, [J, K])) -> .NDArray(T, [I, K])) \
and (|T, I: Nat, J: Nat|(l: .NDArray(T, [I]), r: .NDArray(T, [I, J])) -> .NDArray(T, [J])) \
and (|T, I: Nat|(l: .NDArray(T, [I]), r: .NDArray(T, [I])) -> T)

View file

@ -1 +0,0 @@
.version = pyimport "./version"

View file

@ -1,23 +0,0 @@
.Version: ClassType
.Version <: Eq
.Version <: Ord
.Version.
epoch: Nat
release: (Nat, Nat, Nat)
pre: (Str, Nat) or NoneType
post: Nat or NoneType
dev: Nat or NoneType
local: Str or NoneType
public: Str
base_version: Str
is_prerelease: Bool
is_postrelease: Bool
is_devrelease: Bool
major: Nat
minor: Nat
micro: Nat
__call__: (ver: Str) -> .Version
.InvalidVersion: ClassType
.parse: (ver: Str) -> .Version

View file

@ -1,17 +0,0 @@
.core = pyimport "core"
.io = pyimport "io"
.plotting = pyimport "plotting"
.util = pyimport "util"
{
.DataFrame!;
.Series!;
.Index;
} = pyimport "core/api"
{
.read_csv!;
} = pyimport "io/parsers"
{
.read_json!;
} = pyimport "io/json"

View file

@ -1,5 +0,0 @@
.algorithms = pyimport "algorithms"
.api = pyimport "api"
.frame = pyimport "frame"
.indexes = pyimport "indexes"
.series = pyimport "series"

View file

@ -1,3 +0,0 @@
{.DataFrame!;} = pyimport "./frame"
{.Series!;} = pyimport "./series"
{.Index;} = pyimport "./indexes/api"

View file

@ -1,14 +0,0 @@
{.Index;} = pyimport "./indexes/api"
# I := Nat, V := Obj
.DataFrame!: (C: Type, I: Type, V: Type) -> ClassType
.DataFrame!(C, I, V) <: Input(C)
.DataFrame!(C, I, V) <: Input(I)
.DataFrame!(C, I, V) <: Output(V)
.DataFrame!.
__call__: |K, V, I|(dic: {K: [V; _]} or Iterable(Iterable(V)), index: [I; _] := [Nat; _]) -> .DataFrame!(K, I, V)
shape: (Nat, Nat)
index: .Index(_) # TODO
head: |C, I, V|(self: .DataFrame!(C, I, V), tail: Nat := {5}) -> .DataFrame!(C, I, V)
tail: |C, I, V|(self: .DataFrame!(C, I, V), tail: Nat := {5}) -> .DataFrame!(C, I, V)
info!: (self: .DataFrame!(_, _, _)) => NoneType

View file

@ -1 +0,0 @@
{.Index;} = pyimport "./base"

View file

@ -1,2 +0,0 @@
.Index: (T: Type) -> ClassType
.Index(T) <: Output(T)

View file

@ -1,8 +0,0 @@
{.Index;} = pyimport "./indexes/api"
# K := Nat, V := Obj
.Series!: (K: Type, V: Type) -> ClassType
.Series!(K, V) <: Input(K)
.Series!(K, V) <: Output(V)
.Series!.
__call__: |K, V|(iterable: Iterable(V), index: [K; _] or .Index(K) := [Nat; _]) -> .Series! K, V

View file

@ -1,7 +0,0 @@
.api = pyimport "api"
.clipboads = pyimport "clipboards"
.common = pyimport "common"
.excel = pyimport "excel"
.html = pyimport "html"
.json = pyimport "json"
.parsers = pyimport "parsers"

View file

@ -1,3 +0,0 @@
{DataFrame!;} = pyimport "../../core/frame"
.read_json!: (path: PathLike) => DataFrame!

View file

@ -1,3 +0,0 @@
{DataFrame!;} = pyimport "../../core/frame"
.read_csv!: (path: PathLike) => DataFrame!

View file

@ -1 +0,0 @@
.version = pyimport "version"

View file

@ -1 +0,0 @@
.Version: ClassType

View file

@ -1 +0,0 @@
.setup!: (*Obj,) => NoneType

View file

@ -1,3 +0,0 @@
.Develop = 'develop': ClassType
.Develop.
run!: (self: .Develop) => NoneType

View file

@ -1,3 +0,0 @@
.Install = 'install': ClassType
.Install.
run!: (self: .Install) => NoneType

View file

@ -1,13 +0,0 @@
.PY2: Bool
.PY3: Bool
.class_types: [Type; _]
.intger_types: [Type; _]
.string_types: [Type; _]
.float_types: [Type; _]
.text_type: Type
.binary_type: Type
.MAXSIZE: Nat
.exec_!: (code: Str, globals := {Str: Obj}, locals := {Str: Obj}) => Obj
.print_!: (*objs: [Obj; _], end := Str, sep := Str) => NoneType

View file

@ -1,87 +0,0 @@
np = pyimport "numpy"
.backends = pyimport "./backends"
.cuda = pyimport "./cuda"
.nn = pyimport "./nn"
.optim = pyimport "./optim"
.serialization = pyimport "./serialization"
.utils = pyimport "./utils"
{.load!; .save!;} = pyimport "./serialization"
{.manual_seed!;} = pyimport "./random"
{.no_grad;} = pyimport "./autograd"
.Device = 'device': ClassType
.device: (type: Str) => .Device
.DType = 'dtype': ClassType
.dtype: (type: Str) => .DType
.UInt8 = 'uint8': ClassType
.Int8 = 'int8': ClassType
.Int16 = 'int16': ClassType
.Int32 = 'int32': ClassType
.Int64 = 'int64': ClassType
.Float16 = 'float16': ClassType
.Float32 = 'float32': ClassType
.Float64 = 'float64': ClassType
.Complex32 = 'complex32': ClassType
.Complex64 = 'complex64': ClassType
.Complex128 = 'complex128': ClassType
.Size: (S: [Nat; _]) -> ClassType
.Size(S).
__call__: (size: {S}) -> .Size(S)
.Size(S)|<: Eq|.
__eq__: (self: .Size(S), other: .Size(S)) -> Bool
.Tensor!: (T: Type, Shape: [Nat; _]) -> ClassType
.Tensor!(T, _) <: Output T
.Tensor!(T, S)|<: IrregularEq|.
Output: {Tensor!(Bool, S)}
__eq__: (self: .Tensor!(T, S), other: .Tensor!(T, S)) -> .Tensor!(Bool, S)
.Tensor!(T, S)|<: Indexable(Nat, .Tensor!(T, _))|.
__getitem__: (self: .Tensor!(T, S), index: Nat or [Nat; _]) -> .Tensor!(T, _)
.Tensor!(T, S).
data: .Tensor!(T, S)
shape: .Size(S)
.Tensor!(_, _).
dtype: .DType
clone: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
cpu: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
detach: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S)
numpy: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> np.NDArray(T, S)
view: (|T, Old: [Nat; _], S: {A: [Nat; _] | A.prod() == Old.prod()}|(
self: .Tensor!(T, Old),
shape: {S},
) -> .Tensor!(T, S)) \
and (|T|(self: .Tensor!(T, _), shape: [Int; _]) -> .Tensor!(T, _))
backward!: |T, S: [Nat; _]|(
self: RefMut(.Tensor!(T, S)),
gradient := .Tensor!(T, S),
retain_graph := Bool,
create_graph := Bool,
) => NoneType
# TODO: S bound
item: |T|(self: Ref .Tensor!(T, _)) -> T
to: (|T, S: [Nat; _]|(
self: .Tensor!(T, S),
other: .DType or .Device,
non_blocking := Bool,
copy := Bool,
) -> .Tensor!(T, S))
size: (|T, S: [Nat; _]|(self: .Tensor!(T, S), dim: Nat) -> Nat) \
and (|T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Size)
sum: |T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, [])
squeeze: (|T, S: [Nat; _]|(self: .Tensor!(T, S)) -> .Tensor!(T, S.remove_all(1))) \
and (|T|(self: .Tensor!(T, _)) -> .Tensor!(T, _))
unsqueeze: (|T, S: [Nat; _], Dim: Nat|(self: .Tensor!(T, S), dim: {Dim}) -> .Tensor!(T, S.insert(Dim, 1))) \
and (|T|(self: .Tensor!(T, _), dim: Nat) -> .Tensor!(T, _))
.relu: |T, S: [Nat; _]|(x: .Tensor!(T, S)) -> .Tensor!(T, S)
.softmax: |T, S: [Nat; _]|(x: .Tensor!(T, S), dim: Nat) -> .Tensor!(T, S)
.max: (|T|(input: .Tensor!(T, _), dim: Nat, keepdim := Bool) -> (.Tensor!(T, _)), .Tensor!(T, _)) \
and (|T|(input: .Tensor!(T, _)) -> .Tensor!(T, _))
.min: (|T|(input: .Tensor!(T, _), dim: Nat, keepdim := Bool) -> (.Tensor!(T, _)), .Tensor!(T, _)) \
and (|T|(input: .Tensor!(T, _)) -> .Tensor!(T, _))
.tensor: (|T, S: [Nat; _]|(data: HasScalarType(T) and HasShape(S), dtype := .DType, device := .Device) -> .Tensor!(T, S)) \
and (|T|(data: [T; _], dtype := .DType, device := .Device) -> .Tensor!(T, _))

View file

@ -1 +0,0 @@
{.no_grad;} = import "./grad_mode"

View file

@ -1,3 +0,0 @@
.NoGrad = 'no_grad': ClassType
.NoGrad <: ContextManager
.no_grad: () -> .NoGrad

View file

@ -1 +0,0 @@
.mps = pyimport "./mps"

View file

@ -1 +0,0 @@
.is_available!: () => Bool

View file

@ -1 +0,0 @@
.is_available!: () => Bool

View file

@ -1,17 +0,0 @@
.modules = pyimport "./modules"
.parameter = pyimport "./parameter"
{
.Conv1d;
.Conv2d;
.Conv3d;
.CrossEntropyLoss;
.Flatten;
.Linear;
.MaxPool1d;
.MaxPool2d;
.MaxPool3d;
.Module;
.ReLU;
} = .modules
{.Parameter;} = .parameter

View file

@ -1,17 +0,0 @@
.activation = pyimport "./activation"
.container = pyimport "./container"
.conv = pyimport "./conv"
.flatten = pyimport "./flatten"
.linear = pyimport "./linear"
.loss = pyimport "./loss"
.module = pyimport "./module"
.pooling = pyimport "./pooling"
{.ReLU;} = .activation
{.Sequential;} = .container
{.Conv1d; .Conv2d; .Conv3d;} = .conv
{.Flatten;} = .flatten
{.Linear;} = .linear
{.CrossEntropyLoss;} = .loss
{.Module;} = .module
{.MaxPool1d; .MaxPool2d; .MaxPool3d;} = .pooling

View file

@ -1,10 +0,0 @@
{Tensor!;} = pyimport "torch"
.ReLU: ClassType
.ReLU.
__call__: () -> .ReLU
.ReLU|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .ReLU,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1,11 +0,0 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
.Sequential: ClassType
.Sequential.
__call__: (*args: Module) -> .Sequential
.Sequential|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Sequential,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1,71 +0,0 @@
{Device; DType; Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_ConvNd: ClassType
_ConvNd <: Module
.Conv1d: ClassType
.Conv1d <: _ConvNd
.Conv1d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 1] or (Nat,),
stride := Nat or [Nat; 1] or (Nat,),
padding := Str or Nat or [Nat; 1] or (Nat,),
dilation := Nat or [Nat; 1] or (Nat,),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv1d
.Conv1d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv1d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.Conv2d: ClassType
.Conv2d <: _ConvNd
.Conv2d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 2] or (Nat, Nat),
stride := Nat or [Nat; 2] or (Nat, Nat),
padding := Str or Nat or [Nat; 2] or (Nat, Nat),
dilation := Nat or [Nat; 2] or (Nat, Nat),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv2d
.Conv2d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv2d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.Conv3d: ClassType
.Conv3d <: _ConvNd
.Conv3d.
__call__: (
in_channels: Nat,
out_channels: Nat,
kernel_size: Nat or [Nat; 3] or (Nat, Nat, Nat),
stride := Nat or [Nat; 3] or (Nat, Nat, Nat),
padding := Str or Nat or [Nat; 3] or (Nat, Nat, Nat),
dilation := Nat or [Nat; 3] or (Nat, Nat, Nat),
groups := Nat,
bias := Bool,
padding_mode := Str,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Conv3d
.Conv3d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Conv3d,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1,10 +0,0 @@
{Tensor!;} = pyimport "torch"
.Flatten: ClassType
.Flatten.
__call__: (start_dim: Nat, end_dim: Int) -> .Flatten
.Flatten|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Flatten,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1,18 +0,0 @@
{Module;} = pyimport "torch/nn"
{Device; DType; Tensor!;} = pyimport "torch"
.Linear: ClassType
.Linear <: Module
.Linear.
__call__: (
in_features: Nat,
out_features: Nat,
bias := Bool,
device := Device or Str or Nat,
dtype := DType or Str,
) -> .Linear
.Linear|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .Linear,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1,27 +0,0 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_Loss: ClassType
_Loss <: Module
_Loss.
reduction: Str
_WeightedLoss: ClassType
_WeightedLoss <: _Loss
.CrossEntropyLoss: ClassType
.CrossEntropyLoss <: _WeightedLoss
.CrossEntropyLoss.
__call__: () -> .CrossEntropyLoss
.CrossEntropyLoss|<: GenericCallable|.
__call__: |T|(
self: .CrossEntropyLoss,
input: Tensor!(T, _),
target: Tensor!(T, _),
) -> Tensor!(T, [])
.CrossEntropyLoss.
forward: |T|(
self: .CrossEntropyLoss,
input: Tensor!(T, _),
target: Tensor!(T, _),
) -> Tensor!(T, [])

View file

@ -1,43 +0,0 @@
{Tensor!;} = pyimport "torch"
{Parameter;} = pyimport "torch/nn/parameter"
.Module: ClassType
.Module <: InheritableType
.Module|<: GenericCallable|.
__call__: |T|(
self: .Module,
input: Tensor!(T, _),
) -> Tensor!(T, _)
.Module.
__init__: (self: RefMut(.Module)) => NoneType
parameters: (self: Ref(.Module), recurse := Bool) -> Iterator Parameter
named_parameters: (self: Ref(.Module), prefix := Str, recurse := Bool, remove_duplicate := Bool) -> Iterator((Str, Parameter))
# buffers: (self: Ref(.Module), recurse := Bool) -> Iterator .Tensor!
# named_buffers: (self: Ref(.Module), prefix := Str, recurse := Bool, remove_duplicate := Bool) -> Iterator((Str, .Tensor!))
children: (self: Ref(.Module)) -> Iterator .Module
named_children: (self: Ref(.Module), prefix := Str) -> Iterator((Str, .Module))
modules: (self: Ref(.Module)) -> Iterator .Module
named_modules: (self: Ref(.Module), memo := {.Module; _}, prefix := Str, remove_duplicate := Bool) -> Iterator((Str, .Module))
train: |T <: .Module|(self: Ref(T), mode := Bool) -> T
eval: |T <: .Module|(self: Ref(T)) -> T
zero_grad!: (self: RefMut(.Module), set_to_none := Bool) => NoneType
compile: (self: Ref(.Module), *args: Obj, **kwargs: Obj) -> .Module
# register_buffer!: (self: RefMut(.Module), name: Str, tensor := Tensor!, persistent := Bool) => NoneType
register_parameter!: (self: RefMut(.Module), name: Str, param := Parameter) => NoneType
add_module!: (self: RefMut(.Module), name: Str, module := .Module) => NoneType
register_module!: (self: RefMut(.Module), name: Str, module := .Module) => NoneType
get_submodule: (self: Ref(.Module), name: Str) -> .Module
get_parameter: (self: Ref(.Module), name: Str) -> Parameter
# get_buffer: (self: Ref(.Module), name: Str) -> .Tensor!
get_extra_state: (self: Ref(.Module)) -> Obj
set_extra_state!: (self: RefMut(.Module), state: Obj) => NoneType
apply!: |T <: .Module|(self: T, fn: (module: RefMut(T)) => NoneType) => T
cuda!: |T <: .Module|(self: T, device := Int) => T
ipu!: |T <: .Module|(self: T, device := Int) => T
xpu!: |T <: .Module|(self: T, device := Int) => T
cpu!: |T <: .Module|(self: T) => T
float: |T <: .Module|(self: T) -> T
double: |T <: .Module|(self: T) -> T
half: |T <: .Module|(self: T) -> T
bfloat16: |T <: .Module|(self: T) -> T
to: |T <: .Module|(self: T, *args: Obj, **kwargs: Obj) -> T

View file

@ -1,57 +0,0 @@
{Tensor!;} = pyimport "torch"
{Module;} = pyimport "torch/nn"
_MaxPoolNd: ClassType
_MaxPoolNd <: Module
.MaxPool1d: ClassType
.MaxPool1d <: _MaxPoolNd
.MaxPool1d <: GenericCallable
.MaxPool1d.
__call__: (
kernel_size: Nat or [Nat; 1] or (Nat,),
stride := Nat or [Nat; 1] or (Nat,),
padding := Str or Nat or [Nat; 1] or (Nat,),
dilation := Nat or [Nat; 1] or (Nat,),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool1d
.MaxPool1d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool1d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.MaxPool2d: ClassType
.MaxPool2d <: _MaxPoolNd
.MaxPool2d.
__call__: (
kernel_size: Nat or [Nat; 2] or (Nat, Nat),
stride := Nat or [Nat; 2] or (Nat, Nat),
padding := Str or Nat or [Nat; 2] or (Nat, Nat),
dilation := Nat or [Nat; 2] or (Nat, Nat),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool2d
.MaxPool2d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool2d,
input: Tensor!(T, S),
) -> Tensor!(T, S)
.MaxPool3d: ClassType
.MaxPool3d <: _MaxPoolNd
.MaxPool3d.
__call__: (
kernel_size: Nat or [Nat; 3] or (Nat, Nat, Nat),
stride := Nat or [Nat; 3] or (Nat, Nat, Nat),
padding := Str or Nat or [Nat; 3] or (Nat, Nat, Nat),
dilation := Nat or [Nat; 3] or (Nat, Nat, Nat),
return_indices := Bool,
ceil_mode := Bool,
) -> .MaxPool3d
.MaxPool3d|<: GenericCallable|.
__call__: |T, S: [Nat; _]|(
self: .MaxPool3d,
input: Tensor!(T, S),
) -> Tensor!(T, S)

View file

@ -1 +0,0 @@
.Parameter: ClassType

View file

@ -1,47 +0,0 @@
{Parameter;} = pyimport "torch/nn/parameter"
.Optimizer!: ClassType
.Optimizer! <: InheritableType
.Optimizer!.
__call__: (params: Iterable(Parameter)) -> .Optimizer!
zero_grad!: (self: RefMut .Optimizer!) => NoneType
step!: (self: RefMut .Optimizer!) => NoneType
.ASGD!: ClassType
.ASGD! <: .Optimizer!
.Adadelta!: ClassType
.Adadelta! <: .Optimizer!
.Adagrad!: ClassType
.Adagrad! <: .Optimizer!
.Adam!: ClassType
.Adam! <: .Optimizer!
.Adam!.
__call__: (
params: Iterable(Parameter),
lr := Float,
betas := (Float, Float),
eps := Float,
weight_decay := Float,
amsgrad := Bool,
foreach := Bool,
maximize := Bool,
) -> .Adam!
.AdamW!: ClassType
.AdamW! <: .Optimizer!
.Adamax!: ClassType
.Adamax! <: .Optimizer!
.LBFGS!: ClassType
.LBFGS! <: .Optimizer!
.NAdam!: ClassType
.NAdam! <: .Optimizer!
.RAdam!: ClassType
.RAdam! <: .Optimizer!
.RMSprop!: ClassType
.RMSprop! <: .Optimizer!
.Rprop!: ClassType
.Rprop! <: .Optimizer!
.SGD!: ClassType
.SGD! <: .Optimizer!
.SparseAdam!: ClassType
.SparseAdam! <: .Optimizer!

View file

@ -1 +0,0 @@
.manual_seed!: (seed: Int) => Obj

View file

@ -1,2 +0,0 @@
.load!: (f: PathLike) => NoneType
.save!: (obj: Obj, f: PathLike) => NoneType

View file

@ -1 +0,0 @@
.data = pyimport "./data"

View file

@ -1,9 +0,0 @@
{.DataLoader;} = pyimport "./dataloader"
{.Dataset;} = pyimport "./dataset"
{
.Sampler;
.SequentialSampler;
.RandomSampler;
.SubsetRandomSampler;
.WeightedRandomSampler;
} = pyimport "./sampler"

View file

@ -1,25 +0,0 @@
torch = pyimport "torch"
dataset = pyimport "./dataset"
{Sampler;} = pyimport "./sampler"
.DataLoader: ClassType
.DataLoader <: Iterable((torch.Tensor!(_, _), torch.Tensor!(_, _)))
.DataLoader.
__call__: (
dataset: dataset.Dataset,
batch_size := Nat,
shuffle := Bool,
sampler := Sampler,
batch_sampler := Sampler,
num_workers := Nat,
collate_fn := Obj,
pin_memory := Bool,
drop_last := Bool,
timeout := Float,
worker_init_fn := Obj,
multiprocessing_context := Obj,
generator := Obj,
prefetch_factor := Nat,
persistent_workers := Bool,
pin_memory_device := Str,
) -> .DataLoader

View file

@ -1 +0,0 @@
.Dataset: ClassType

Some files were not shown because too many files have changed in this diff Show more