lovely-tensors
  1. 🔎 Tensor representations
  2. 🧾 View as a summary
  • ❤️ Lovely Tensors
  • 🔎 Tensor representations
    • 🧾 View as a summary
    • 🖌️ View as RGB images
    • 📊 View as a histogram
    • 📺 View channels
  • ✨ Misc
    • 🤔 Config
    • 🙉 Monkey-patching
    • 🎭 Matplotlib integration
    • 📜 IPython’s history obsession

On this page

  • lovely
  • Examples
  1. 🔎 Tensor representations
  2. 🧾 View as a summary

🧾 View as a summary

spicy = randoms[:12].clone()

spicy[0] *= 10000
spicy[1] /= 10000
spicy[3] = float('inf')
spicy[4] = float('-inf')
spicy[5] = float('nan')
spicy = spicy.reshape((2,6))

source

lovely

 lovely (t:torch.Tensor, verbose=False, plain=False, depth=0, color=None)
Type Default Details
t Tensor Tensor of interest
verbose bool False Whether to show the full tensor
plain bool False Just print if exactly as before
depth int 0 Show stats in depth
color NoneType None Force color (True/False) or auto.

Examples

print(lovely(randoms[0]))
print(lovely(randoms[:2]))
print(lovely(randoms[:6].view(2, 3))) # More than 2 elements -> show statistics
print(lovely(randoms[:11]))           # More than 10 -> suppress data output
tensor 1.927
tensor[2] μ=1.707 σ=0.311 [1.927, 1.487]
tensor[2, 3] n=6 x∈[-2.106, 1.927] μ=0.276 σ=1.594 [[1.927, 1.487, 0.901], [-2.106, 0.678, -1.235]]
tensor[11] x∈[-2.106, 1.927] μ=0.046 σ=1.384
grad = torch.tensor(1., requires_grad=True, dtype=torch.float64)
print(lovely(grad)); print(lovely(grad+1))
tensor f64 grad 1.000
tensor f64 grad AddBackward0 2.000
if torch.cuda.is_available():
    print(lovely(torch.tensor(1., device=torch.device("cuda:0"))))
    test_eq(str(lovely(torch.tensor(1., device=torch.device("cuda:0")))), "tensor cuda:0 1.000")
tensor cuda:0 1.000

Do we have any floating point nasties? Is the tensor all zeros?

# Statistics and range are calculated on good values only, if there are at lest 3 of them.
lovely(spicy)
tensor[2, 6] n=12 x∈[-1.605, 1.927e+04] μ=2.141e+03 σ=6.423e+03 +Inf! -Inf! NaN!
lovely(spicy, color=False)
tensor[2, 6] n=12 x∈[-1.605, 1.927e+04] μ=2.141e+03 σ=6.423e+03 +Inf! -Inf! NaN!
lovely(torch.tensor([float("nan")]*11))
tensor[11] NaN!
lovely(torch.zeros(12))
tensor[12] all_zeros
lovely(torch.randn([0,0,0], dtype=torch.float16))
tensor[0, 0, 0] f16 empty
lovely(torch.tensor([1,2,3], dtype=torch.int32))
tensor[3] i32 x∈[1, 3] μ=2.000 σ=1.000 [1, 2, 3]
torch.set_printoptions(linewidth=120)
lovely(spicy, verbose=True)
tensor[2, 6] n=12 x∈[-1.605, 1.927e+04] μ=2.141e+03 σ=6.423e+03 +Inf! -Inf! NaN!
tensor([[ 1.9269e+04,  1.4873e-04,  9.0072e-01,         inf,        -inf,         nan],
        [-4.3067e-02, -1.6047e+00, -7.5214e-01,  1.6487e+00, -3.9248e-01, -1.4036e+00]])
lovely(spicy, plain=True)
tensor([[ 1.9269e+04,  1.4873e-04,  9.0072e-01,         inf,        -inf,         nan],
        [-4.3067e-02, -1.6047e+00, -7.5214e-01,  1.6487e+00, -3.9248e-01, -1.4036e+00]])
image = torch.load("mysteryman.pt")
image[1,2,3] = float('nan')

lovely(image, depth=2) # Limited by set_config(deeper_lines=N)
tensor[3, 196, 196] n=115248 (0.4Mb) x∈[-2.118, 2.640] μ=-0.388 σ=1.073 NaN!
  tensor[196, 196] n=38416 x∈[-2.118, 2.249] μ=-0.324 σ=1.036
    tensor[196] x∈[-1.912, 2.249] μ=-0.673 σ=0.522
    tensor[196] x∈[-1.861, 2.163] μ=-0.738 σ=0.418
    tensor[196] x∈[-1.758, 2.198] μ=-0.806 σ=0.397
    tensor[196] x∈[-1.656, 2.249] μ=-0.849 σ=0.369
    tensor[196] x∈[-1.673, 2.198] μ=-0.857 σ=0.357
    tensor[196] x∈[-1.656, 2.146] μ=-0.848 σ=0.372
    tensor[196] x∈[-1.433, 2.215] μ=-0.784 σ=0.397
    tensor[196] x∈[-1.279, 2.249] μ=-0.695 σ=0.486
    tensor[196] x∈[-1.364, 2.249] μ=-0.637 σ=0.539
    ...
  tensor[196, 196] n=38416 x∈[-1.966, 2.429] μ=-0.274 σ=0.973 NaN!
    tensor[196] x∈[-1.861, 2.411] μ=-0.529 σ=0.556
    tensor[196] x∈[-1.826, 2.359] μ=-0.562 σ=0.473
    tensor[196] x∈[-1.756, 2.376] μ=-0.622 σ=0.459 NaN!
    tensor[196] x∈[-1.633, 2.429] μ=-0.664 σ=0.430
    tensor[196] x∈[-1.651, 2.376] μ=-0.669 σ=0.399
    tensor[196] x∈[-1.633, 2.376] μ=-0.701 σ=0.391
    tensor[196] x∈[-1.563, 2.429] μ=-0.670 σ=0.380
    tensor[196] x∈[-1.475, 2.429] μ=-0.616 σ=0.386
    tensor[196] x∈[-1.511, 2.429] μ=-0.593 σ=0.399
    ...
  tensor[196, 196] n=38416 x∈[-1.804, 2.640] μ=-0.567 σ=1.178
    tensor[196] x∈[-1.717, 2.396] μ=-0.982 σ=0.350
    tensor[196] x∈[-1.752, 2.326] μ=-1.034 σ=0.314
    tensor[196] x∈[-1.648, 2.379] μ=-1.086 σ=0.314
    tensor[196] x∈[-1.630, 2.466] μ=-1.121 σ=0.305
    tensor[196] x∈[-1.717, 2.448] μ=-1.120 σ=0.302
    tensor[196] x∈[-1.717, 2.431] μ=-1.166 σ=0.314
    tensor[196] x∈[-1.560, 2.448] μ=-1.124 σ=0.326
    tensor[196] x∈[-1.421, 2.431] μ=-1.064 σ=0.383
    tensor[196] x∈[-1.526, 2.396] μ=-1.047 σ=0.417
    ...
t = torch.zeros(2, 3, 4, names=('N', 'C', None))
test_eq(str(lovely(t)), "tensor[N=2, C=3, 4] n=24 \x1b[38;2;127;127;127mall_zeros\x1b[0m")

lovely(t)
/tmp/ipykernel_377816/3561422158.py:1: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1935.)
  t = torch.zeros(2, 3, 4, names=('N', 'C', None))
tensor[N=2, C=3, 4] n=24 all_zeros

Meta device

t = torch.empty(3,3, device="meta")
lovely(t)
tensor[3, 3] n=9 meta meta

CUDA memory is not leaked

def memstats():
    allocated = int(torch.cuda.memory_allocated() // (1024*1024))
    max_allocated = int(torch.cuda.max_memory_allocated() // (1024*1024))
    return f"Allocated: {allocated} MB, Max: {max_allocated} Mb"

if torch.cuda.is_available():
    cudamem = torch.cuda.memory_allocated()
    print(f"before allocation: {memstats()}")
    numbers = torch.randn((3, 1024, 1024), device="cuda") # 12Mb image
    torch.cuda.synchronize()

    print(f"after allocation: {memstats()}")
    # Note, the return value of lovely() is not a string, but a
    # StrProxy that holds reference to 'numbers'. You have to del
    # the references to it, but once it's gone, the reference to
    # the tensor is gone too.
    display(lovely(numbers) )
    print(f"after repr: {memstats()}")

    del numbers
    # torch.cuda.memory.empty_cache()

    print(f"after cleanup: {memstats()}")
    test_eq(cudamem >= torch.cuda.memory_allocated(), True)
before allocation: Allocated: 0 MB, Max: 0 Mb
after allocation: Allocated: 12 MB, Max: 12 Mb
tensor[3, 1024, 1024] n=3145728 (12Mb) x∈[-5.013, 5.150] μ=-0.000 σ=0.999 cuda:0
after repr: Allocated: 12 MB, Max: 12 Mb
after cleanup: Allocated: 0 MB, Max: 12 Mb
# We don't really supposed complex numbers yet
c = torch.randn(5, dtype=torch.complex64)
lovely(c)
tensor([-0.4011-0.4035j,  1.1300+0.0788j, -0.0277+0.9978j, -0.4636+0.6064j, -1.1505-0.9865j])