from fastcore.test import test_eq, test_fail
1, 2, 3), (2, 3)), (1, 2, 3))
test_eq(calculate_target_shape((1, 2, 3), (2, 1)), (1, 2, 3))
test_eq(calculate_target_shape((1, 2, 3), (1, 3)), (1, 2, 3))
test_eq(calculate_target_shape((1, 2, 3), (1, 1)), (1, 2, 3))
test_eq(calculate_target_shape((
1, 5), (3, 1)), (3, 5))
test_eq(calculate_target_shape((
=((1, 2, 3), (2, 2)), contains="Cannot broadcast") test_fail(calculate_target_shape, args
Operations: Common
UnaryElementwiseOp
UnaryElementwiseOp (a, name=None)
Base class for unary elementwise operations
BinaryElementwiseOp
BinaryElementwiseOp (a, b, name=None)
Base class for binary elementwise operations
BaseOp
BaseOp (*args, name:str=None)
Base class for all operations
Load
Load (name=None)
Load a tensor
Add
Add (a, b, name=None)
Add two tensors
Sub
Sub (a, b, name=None)
Subtract two tensors
Mul
Mul (a, b, name=None)
Multiply two tensors
Div
Div (a, b, name=None)
Divide two tensors
Neg
Neg (a, name=None)
Negate a tensor
Pow
Pow (a, power, name=None)
Raise a tensor to a power
Log
Log (a, name=None)
Take the natural logarithm of a tensor
Exp
Exp (a, name=None)
Exponentiate a tensor
ExpLog
ExpLog (a, name=None)
Exponentiate a tensor
Matmul
Matmul (a, b, name=None)
Matrix multiplication of two tensors
fast_mmul
fast_mmul (a, b)
Fast matrix multiplication for 2D tensors
Sum
Sum (a, axis=None, keepdims=False, name=None)
Sum-reduce a tensor along the given axis (int or tuple of ints)
Broadcast
Broadcast (a, target_shape, name=None)
Broadcast a tensor to the given shape
Slice
Slice (a, key, name=None)
Base class for unary elementwise operations
# class LessThan(BinaryElementwiseOp):
# name_template = "({}<{})"
# def __init__(self, a, b, name=None):
# super().__init__(a, b, name=name)
# self.out = Tensor(
# data=self.args[0].data < self.args[1].data, name=self.name, op=self
# )
# # def backward(self):
# # self.parents[0].accum_grad(self.out.grad * (self.parents[0].data < self.parents[1].data)
# # self.parents[1].accum_grad(self.out.grad * (self.parents[0].data >= self.parents[1].data)
# class Where(BaseOp):
# name_template = "where({})"
# def __init__(self, a, b, c, name=None):
# super().__init__(a, b, c, name=name)
# self.parents = self.args
# self.out = Tensor(
# data=np.where(self.args[0].data, self.args[1].data, self.args[2].data),
# name=self.name,
# op=self,
# )
# def backward(self):
# # self.parents[0].accum_grad(self.out.grad * self.parents[1].data
# # self.parents[0].accum_grad(self.out.grad * self.parents[2].data
# self.parents[1].accum_grad(self.out.grad * self.parents[0].data
# self.parents[2].accum_grad(self.out.grad * (1 - self.parents[0].data)
Transpose
Transpose (a, dim0, dim1, name=None)
Transpose a tensor
Dropout
Dropout (a, p_drop=0.1, training=True, name=None)
Apply Dropout to a tensor
Embedding
Embedding (a, indices, name=None)
Embedding layer