using UnityEngine.Assertions; using System; using System.Runtime.InteropServices; using System.Text; using Unity.Collections.LowLevel.Unsafe; using UnityEngine; namespace Unity.Barracuda { /// /// TensorShape are immutable representation of a Tensor dimensions and rank. /// Depending on which constructor is used, the TensorShape will either be rank 8 and channels last (ie NHWC) or actual /// rank with unnamed tensor dimensions when using the constructor that takes int[]. /// With legacy use (explicit named constructors) of TensorShape an axis can be of size 1. For example, a tensor /// without spatial information will be N,1,1,C. With the use of TensorShape via the int[] constructor, then axes can /// have values of 0. /// [Serializable] public unsafe struct TensorShape { /// /// Max rank /// public const int MaxRank = 8; // The following dimension names are based on ONNX Dimension Denotation. // see: https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md /// /// Data channel dimension index number /// public const int DataChannel = 7; /// /// Channels dimension index number /// public const int C = DataChannel; /// /// Data feature 0 dimension index number /// public const int DataFeature0 = 6; /// /// Width dimension index number /// public const int W = DataFeature0; /// /// Data feature 1 dimension index number /// public const int DataFeature1 = 5; /// /// Height dimension index number /// public const int H = DataFeature1; /// /// Data feature 2 dimension index number /// public const int DataFeature2 = 4; /// /// Depth dimension index number /// public const int D = DataFeature2; /// /// Data feature 3 dimension index number /// public const int DataFeature3 = 3; /// /// Batch dimension index number /// public const int DataBatch = 2; /// /// Sequence length dimension index number /// public const int NumberOfDirections = 1; /// /// Sequence length dimension index number /// public const int SequenceLength = 0; /// /// Data features /// public static readonly int[] DataFeatures = { W, H, D, DataFeature3 }; /// /// Kernel input channel dimension /// public const int KernelInChannel = 6; /// /// Kernel output channel dimension /// public const int KernelOutChannel = 7; /// /// Kernel spatial dimension 0 /// public const int KernelSpatial0 = 5; /// /// Kernel spatial dimension 1 /// public const int KernelSpatial1 = DataBatch; // NOTE: maps to batch /// /// Kernel spatial dimension 2 /// public const int KernelSpatial2 = DataBatch-1; // NOTE: maps to numDirections /// /// Kernel spatial dimension 3 /// public const int KernelSpatial3 = SequenceLength; // NOTE: maps to sequenceLength /// /// Kernel spatial dimensions /// public static readonly int[] KernelSpatials = { KernelSpatial0, KernelSpatial1, KernelSpatial2, KernelSpatial3 }; /// /// Return the number of sequence. /// public int sequenceLength { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[SequenceLength]; return value; } } return 1; } } /// /// Return the number of direction. /// public int numberOfDirections { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[NumberOfDirections]; return value; } } return 1; } } /// /// Return the number of batch. /// public int batch { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataBatch]; return value; } } return this[0]; } } /// /// Return the size of 3rd spatial dimension (axis is DataFeature3) /// Internal for now, please use myTensorShape[DataFeature3] instead. /// internal int extraDimension { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataFeature3]; return value; } } return 1; } } /// /// Return the spatial depth (axis is DataFeature2). /// public int depth { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataFeature2]; return value; } } return 1; } } /// /// Return the spatial height (axis is DataFeature1). /// public int height { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataFeature1]; return value; } } return this[1]; } } /// /// Return the spatial width (axis is DataFeature0). /// public int width { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataFeature0]; return value; } } return this[2]; } } /// /// Return the number of channels. /// public int channels { get { if (hasNamedDimensions) { fixed (int* shape = &d0) { int value = shape[DataChannel]; return value; } } return this[3]; } } // TODO: Use `fixed int m_Shape[MaxRank];` when debugger display works int d0; int d1; int d2; int d3; int d4; int d5; int d6; int d7; #region Constructors /// /// Create a TensorShape of shape [S,R,N,T,D,H,W,C]. /// Currently seqLen must be 1. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels public TensorShape(int s, int r, int n, int t, int d, int h, int w, int c) : this() { m_UsesNamedDimensions = NamedDimension.All; m_Rank = MaxRank; fixed (int* shape = &d0) { shape[SequenceLength] = s > 0 ? s : 1; shape[NumberOfDirections] = r > 0 ? r : 1; shape[DataBatch] = n > 0 ? n : 1; shape[DataFeature3] = t > 0 ? t : 1; shape[DataFeature2] = d > 0 ? d : 1; shape[DataFeature1] = h > 0 ? h : 1; shape[DataFeature0] = w > 0 ? w : 1; shape[DataChannel] = c > 0 ? c : 1; } } /// /// Create a TensorShape of shape [1,1,N,1,D,H,W,C]. /// /// batch /// depth /// height /// width /// channels public TensorShape(int n, int d, int h, int w, int c) : this(1, 1, n, 1, d, h, w, c) { m_UsesNamedDimensions = NamedDimension.N | NamedDimension.D | NamedDimension.H | NamedDimension.W | NamedDimension.C; } /// /// Create a TensorShape of shape [1,1,N,1,1,H,W,C]. /// /// batch /// height /// width /// channels public TensorShape(int n, int h, int w, int c) : this(n, 1, h, w, c) { m_UsesNamedDimensions = NamedDimension.N | NamedDimension.H | NamedDimension.W | NamedDimension.C; } /// /// Create a TensorShape of shape [1,1,N,1,1,1,W,C]. /// /// batch /// width /// channels public TensorShape(int n, int w, int c) : this(n, 1, w, c) { m_UsesNamedDimensions = NamedDimension.N | NamedDimension.W | NamedDimension.C; } /// /// Create a TensorShape of shape [1,1,N,1,1,1,1,C]. /// /// batch /// channels public TensorShape(int n, int c) : this(n, 1, c) { m_UsesNamedDimensions = NamedDimension.N | NamedDimension.C; } /// /// Create a TensorShape of shape [1,1,N,1,1,1,1,1]. /// /// batch public TensorShape(int n) : this(n, 1) { m_UsesNamedDimensions = NamedDimension.N; } /// /// Create a TensorShape of arbitrary `shape`. /// /// shape as int array /// create the shape with no specific, named layout public TensorShape(int[] shape, bool unnamedDimensions = false) : this() { Assert.IsTrue(shape.Length <= MaxRank, $"Only shapes up to a maximum rank of {MaxRank} are supported."); if (unnamedDimensions) { m_UsesNamedDimensions = NamedDimension.None; m_Rank = shape.Length; if (m_Rank > 0) { fixed (int* dst = &d0, src = &shape[0]) { UnsafeUtility.MemCpy(dst, src, shape.Length * sizeof(int)); UnsafeUtility.MemSet(dst + shape.Length, 0, (MaxRank - shape.Length) * sizeof(int)); } } else { // Treat a scalar as a rank-1 tensor m_Rank = 1; fixed (int* dst = &d0) { UnsafeUtility.MemSet(dst, 0, MaxRank * sizeof(int)); dst[0] = 1; } } } else { TensorShape copy; switch (shape.Length) { case 0: // Treat a scalar as a rank-1 tensor copy = new TensorShape(1); break; case 1: copy = new TensorShape(shape[0]); break; case 2: copy = new TensorShape(shape[0], shape[1]); break; case 3: copy = new TensorShape(shape[0], shape[1], shape[2]); break; case 4: copy = new TensorShape(shape[0], shape[1], shape[2], shape[3]); break; case 5: copy = new TensorShape(shape[0], shape[1], shape[2], shape[3], shape[4]); break; #if UNITY_EDITOR // Restricting this to editor-only since Burst cannot have exceptions, but this code should also not be // run since there are no rank-6/7 named tensor constructors case 6: case 7: throw new ArgumentException($"Must use unnamedDimensions = true for a rank {shape.Length} tensor"); #endif case 8: default: copy = new TensorShape(shape[0], shape[1], shape[2], shape[3], shape[4], shape[5], shape[6], shape[7]); break; } fixed (TensorShape* dst = &this) { UnsafeUtility.CopyStructureToPtr(ref copy, dst); } } } #endregion #region Properties [Flags] enum NamedDimension : byte { S = 1 << SequenceLength, R = 1 << NumberOfDirections, N = 1 << DataBatch, T = 1 << DataFeature3, D = 1 << DataFeature2, H = 1 << DataFeature1, W = 1 << DataFeature0, C = 1 << DataChannel, None = 0, All = S | R | N | T | D | H | W | C } /// /// Whether this shape makes use of named dimensions or is nameless. /// public bool hasNamedDimensions => m_UsesNamedDimensions != 0; NamedDimension m_UsesNamedDimensions; /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel intermediate dimension 0. /// public int kernelSpatialDepth => numberOfDirections; /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel height. /// public int kernelHeight => batch; //Use .batch so HWCK weight use 4D constructor for backward compatibility with 4D tensorShape. /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel width. /// public int kernelWidth => height; /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel depth (aka the number of input channels of the associated operator). /// public int kernelDepth => width; /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel count (aka the number of output channels of the associated operator). /// public int kernelCount => channels; /// /// Return the number of batch. /// public int flatHeight => batch; /// /// Return the T*D*H*W*C. /// public int flatWidth { get { int w = 1; if (hasNamedDimensions) { w = extraDimension * depth * height * width * channels; return w; } for (int i = 1; i < rank; i++) { w *= this[i]; } return w; } } /// /// Return the total number of elements represented by this shape. /// public int length { get { int l = 1; if (hasNamedDimensions) { l = sequenceLength * numberOfDirections * flatHeight * flatWidth; return l; } for (int i = 0; i < rank; i++) { l *= this[i]; } return l; } } /// /// Always 8 if legacy, named constructors are used otherwise the actual rank. /// Look also at the `dimensions` property. /// public int rank => m_Rank; int m_Rank; /// /// Return the count of non-unit dimension of this shape. /// For example [N,1,1,C] dimensions is 2. /// public int dimensions { get { if (hasNamedDimensions) // legacy return (sequenceLength > 1 ? 1 : 0) + (numberOfDirections > 1 ? 1 : 0) + (batch > 1 ? 1 : 0) + (extraDimension > 1 ? 1 : 0) + (depth > 1 ? 1 : 0) + (height > 1 ? 1 : 0) + (width > 1 ? 1 : 0) + (channels > 1 ? 1 : 0); return rank; } } #endregion #region Helpers /// /// Allow to use negative axis to access tensorShape backward. /// `axis` should be from -rank to rank (exclusive). /// /// axis /// adjusted axis public int Axis(int axis) { Assert.IsTrue(axis > -rank && axis < rank); return axis >= 0 ? axis: rank + axis; } /// /// Given an offset in memory return the dimensions indices of the element as [_,_,N,_,_,H,W,C]. /// /// one dimensional index (offset) in the memory /// batch /// height /// width /// channels public void GetPositionsFromIndex(int index, ref int n, ref int h, ref int w, ref int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); c = index % shape.channels; w = (index / shape.channels) % shape.width; h = (index / (shape.channels * shape.width)) % shape.height; n = (index / (shape.channels * shape.width * shape.height * shape.depth * shape.extraDimension)) % shape.batch; } /// /// Given an offset in memory return the dimensions indices of the element as [S,R,N,T,D,H,W,C]. /// /// one dimensional index (offset) in the memory /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels public void GetPositionsFromIndex(int index, ref int s, ref int r, ref int n, ref int t, ref int d, ref int h, ref int w, ref int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); c = index % shape.channels; w = (index / shape.channels) % shape.width; h = (index / (shape.channels * shape.width)) % shape.height; d = (index / (shape.channels * shape.width * shape.height)) % shape.depth; t = (index / (shape.channels * shape.width * shape.height * shape.depth)) % shape.extraDimension; n = (index / (shape.channels * shape.width * shape.height * shape.depth * shape.extraDimension)) % shape.batch; r = (index / (shape.channels * shape.width * shape.height * shape.depth * shape.extraDimension * shape.batch)) % shape.numberOfDirections; s = (index / (shape.channels * shape.width * shape.height * shape.depth * shape.extraDimension * shape.batch * shape.numberOfDirections)) % shape.sequenceLength; } /// /// Given an offset in memory return the dimensions indices of the element as [S,R,N,T,D,H,W,C] in ChannelFirst memory layout. /// /// one dimensional index (offset) in the memory /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels internal void GetPositionsFromIndexChannelFirst(int index, ref int s, ref int r, ref int n, ref int t, ref int d, ref int h, ref int w, ref int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); w = index % shape.width; h = (index / shape.width) % shape.height; d = (index / (shape.width * shape.height)) % shape.depth; t = (index / (shape.width * shape.height * shape.depth)) % shape.extraDimension; c = (index / (shape.width * shape.height * shape.depth * shape.extraDimension)) % shape.channels; n = (index / (shape.width * shape.height * shape.depth * shape.extraDimension * shape.channels)) % shape.batch; r = (index / (shape.width * shape.height * shape.depth * shape.extraDimension * shape.channels * shape.batch)) % shape.numberOfDirections; s = (index / (shape.width * shape.height * shape.depth * shape.extraDimension * shape.channels * shape.batch * shape.numberOfDirections)) % shape.sequenceLength; } /// /// Given an offset in memory return the dimensions indices of the element as [_,_,N,_,_,H,W,C] in ChannelFirst format. /// /// one dimensional index (offset) in the memory /// batch /// height /// width /// channels internal void GetPositionsFromIndexChannelFirst(int index, ref int n, ref int h, ref int w, ref int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); w = index % shape.width; h = (index / shape.width) % shape.height; c = (index / (shape.width * shape.height * shape.depth * shape.extraDimension)) % shape.channels; n = (index / (shape.width * shape.height * shape.depth * shape.extraDimension * shape.channels)) % shape.batch; } /// /// Given an element dimensions indices [0,0,N,0,0,H,W,C] with broadcast support, return this element offset in memory. /// /// batch /// height /// width /// channels /// public int IndexWithBroadcast(int n, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); n %= shape.batch; h %= shape.height; w %= shape.width; c %= shape.channels; return Index(n, h, w, c); } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] with broadcast support, return this element offset in memory. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int IndexWithBroadcast(int s, int r, int n, int t, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); s %= shape.sequenceLength; r %= shape.numberOfDirections; n %= shape.batch; t %= shape.extraDimension; d %= shape.depth; h %= shape.height; w %= shape.width; c %= shape.channels; return Index(s, r, n, t, d, h, w, c); } /// /// Given an element dimensions indices [1,N,1,1,1,H,W,C] return this element offset in memory, clamping indices to tensor dimensions. /// /// batch /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int IndexWithClamp(int n, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); n = Math.Max(n, 0); h = Math.Max(h, 0); w = Math.Max(w, 0); c = Math.Max(c, 0); n = Math.Min(n, shape.batch - 1); h = Math.Min(h, shape.height - 1); w = Math.Min(w, shape.width - 1); c = Math.Min(c, shape.channels - 1); return Index(n, h, w, c); } /// /// Given an element dimensions indices [1,N,1,1,D,H,W,C] return this element offset in memory, clamping indices to tensor dimensions. /// /// batch /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int IndexWithClamp(int n, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); n = Math.Max(n, 0); d = Math.Max(d, 0); h = Math.Max(h, 0); w = Math.Max(w, 0); c = Math.Max(c, 0); n = Math.Min(n, shape.batch - 1); d = Math.Min(d, shape.depth - 1); h = Math.Min(h, shape.height - 1); w = Math.Min(w, shape.width - 1); c = Math.Min(c, shape.channels - 1); return Index(n, d, h, w, c); } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] return this element offset in memory, clamping indices to tensor dimensions. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int IndexWithClamp(int s, int r, int n, int t, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); s = Math.Max(s, 0); r = Math.Max(r, 0); n = Math.Max(n, 0); t = Math.Max(t, 0); d = Math.Max(d, 0); h = Math.Max(h, 0); w = Math.Max(w, 0); c = Math.Max(c, 0); s = Math.Min(s, shape.sequenceLength - 1); r = Math.Min(r, shape.numberOfDirections - 1); n = Math.Min(n, shape.batch - 1); t = Math.Min(t, shape.extraDimension - 1); d = Math.Min(d, shape.depth - 1); h = Math.Min(h, shape.height - 1); w = Math.Min(w, shape.width - 1); c = Math.Min(c, shape.channels - 1); return Index(s,r,n,t,d,h,w,c); } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] return this element offset in memory. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int Index(int s, int r, int n, int t, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = s * shape.numberOfDirections * shape.batch * shape.extraDimension * shape.depth * shape.height * shape.width * shape.channels + r * shape.batch * shape.extraDimension * shape.depth * shape.height * shape.width * shape.channels + n * shape.extraDimension * shape.depth * shape.height * shape.width * shape.channels + t * shape.depth * shape.height * shape.width * shape.channels + d * shape.height * shape.width * shape.channels + h * shape.width * shape.channels + w * shape.channels + c; return index; } /// /// Given an element dimensions indices [0,0,N,0,D,H,W,C] return this element offset in memory. /// /// batch /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int Index(int n, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = n * shape.extraDimension * shape.depth * shape.height * shape.width * shape.channels + d * shape.height * shape.width * shape.channels + h * shape.width * shape.channels + w * shape.channels + c; return index; } /// /// Given an element dimensions indices [0,0,N,0,0,H,W,C] return this element offset in memory. /// /// batch /// height /// width /// channels /// one dimensional index (offset in the flat memory region) public int Index(int n, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = n * shape.extraDimension * shape.depth * shape.height * shape.width * shape.channels + h * shape.width * shape.channels + w * shape.channels + c; return index; } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] return this element offset in memory in ChannelFirst format. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// one dimensional index (offset in the flat memory region) internal int IndexChannelFirst(int s, int r, int n, int t, int d, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = s * shape.numberOfDirections * shape.batch * shape.channels * shape.extraDimension * shape.depth * shape.height * shape.width + r * shape.batch * shape.channels * shape.extraDimension * shape.depth * shape.height * shape.width + n * shape.channels * shape.extraDimension * shape.depth * shape.height * shape.width + c * shape.extraDimension * shape.depth * shape.height * shape.width + t * shape.depth * shape.height * shape.width + d * shape.height * shape.width + h * shape.width + w; return index; } /// /// Given an element dimensions indices [0,0,N,0,0,H,W,C] return this element offset in memory in ChannelFirst format. /// /// batch /// height /// width /// channels /// one dimensional index (offset in the flat memory region) internal int IndexChannelFirst(int n, int h, int w, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = n * shape.channels * shape.extraDimension * shape.depth * shape.height * shape.width + c * shape.extraDimension * shape.depth * shape.height * shape.width + h * shape.width + w; return index; } /// /// Given an element dimensions indices [0,0,N,0,0,0,0,C] return this element offset in memory. /// /// batch /// channels /// one dimensional index (offset in the flat memory region) public int Index(int n, int c) { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); int index = n * shape.flatWidth + c; return index; } /// /// Indexer to return a dimension of this tensorShape as [S,R,N,T,D,H,W,C] /// Prefer this over ToArray() to avoid GC allocation/collection. /// /// axis public int this[int axis] { get { if (axis >= rank) #if UNITY_EDITOR throw new IndexOutOfRangeException($"Attempting to access element {axis} from a rank {rank} shape"); #else // For Burst we cannot throw exceptions, so just return 0 for now, which will likely cause an error return 0; #endif // switch case instead of ToArray() avoids GC allocation if (hasNamedDimensions) { switch(axis) { case 0: return sequenceLength; case 1: return numberOfDirections; case 2: return batch; case 3: return extraDimension; case 4: return depth; case 5: return height; case 6: return width; default: return channels; } } fixed (int* shape = &d0) { return shape[axis]; } } internal set { if (hasNamedDimensions) axis = (axis < 0 || axis > 7) ? 7 : axis; else axis = Axis(axis); if (axis >= rank) #if UNITY_EDITOR throw new IndexOutOfRangeException($"Attempting to access element {axis} from a rank {rank} shape"); #else // For Burst we cannot throw exceptions return; #endif fixed (int* shape = &d0) { if (hasNamedDimensions) shape[axis] = value > 0 ? value : 1; else shape[axis] = value; } } } /// /// Return an array representation of this tensorShape as [S,R,N,T,D,H,W,C] /// Prefer tensorShape[x] to avoid GC allocation/collection. /// /// shape as int array public int[] ToArray() { int size = rank; var shape = new int[size]; if (size > 0) { fixed (int* dst = &shape[0], src = &d0) { UnsafeUtility.MemCpy(dst, src, size * sizeof(int)); } } else { // Treat a scalar as a rank-1 tensor return new[] { 1 }; } return shape; } /// /// Remove single-dimensional entries from the shape. /// [s=1,r=1,b=4,t=1,d=1h=1,w=1,c=128] => [s=1,r=1,b=1,t=1,d=1,h=1,w=4,c=128] /// /// new TensorShape public TensorShape Squeeze() { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); var dims = shape.ToArray(); var squeezed = new TensorShape( 1,1,1,1,1,1,1,1 ); Assert.IsTrue(dims.Length == squeezed.rank); var index = squeezed.rank; foreach (var dim in dims) if (dim > 1) squeezed[--index] = dim; return squeezed; } /// /// Return a TensorShape of dimensions [S,R,N,1,1,1,1,T*D*H*W*C] /// /// new TensorShape public TensorShape Flatten() { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); return new TensorShape(shape.sequenceLength, shape.numberOfDirections, shape.batch, 1, 1, 1, 1, shape.flatWidth); } #endregion #region Comparison operators /// /// Compares two `TensorShape` objects /// /// left object /// right object /// `true` if contents of the objects `a` and `b` are equal public static bool operator ==(TensorShape a, TensorShape b) { if (a.rank != b.rank) return false; for (var i = 0; i < a.rank; ++i) { if (a[i] != b[i]) return false; } return true; } /// /// Compares two `TensorShape` objects /// /// left object /// right object /// `true` if contents of the objects `a` and `b` are not equal public static bool operator !=(TensorShape a, TensorShape b) { return !(a == b); } /// /// Compares `this` object to other object /// /// other object /// `true` if contents of the objects `a` and `b` are equal public override bool Equals(System.Object obj) { // Check for null values and compare run-time types. if (obj == null || GetType() != obj.GetType()) return false; return this == (TensorShape)obj; } /// /// Object hash code /// /// object hash code public override int GetHashCode() { var shape = this; if (!hasNamedDimensions) shape = AsNamed(); return shape.sequenceLength ^ shape.numberOfDirections ^ shape.batch ^ shape.extraDimension ^ shape.depth ^ shape.height ^ shape.width ^ shape.channels; } #endregion /// /// Object summary /// /// object summary as a string public override string ToString() { if (rank == 0) return "()"; if (hasNamedDimensions) { int b = batch; int h = height; int w = width; int c = channels; if (this.Is4D()) { return $"(n:{b}, h:{h}, w:{w}, c:{c})"; } int s = sequenceLength; int r = numberOfDirections; int t = extraDimension; int d = depth; return $"(s:{s}, r:{r}, n:{b}, t:{t}, d:{d}, h:{h}, w:{w}, c:{c})"; } else { StringBuilder sb = new StringBuilder(); sb.Append("("); for (int i = 0; i < rank; i++) { if (i != 0) sb.Append(", "); sb.Append(this[i]); } sb.Append(")"); return sb.ToString(); } } public TensorShape AsNamed() { if (hasNamedDimensions) #if UNITY_EDITOR throw new InvalidOperationException("TensorShape is already in the layout of named dimensions"); #else // For Burst we cannot throw exceptions, but this code should not execute anyway return this; #endif TensorShape shape; switch (rank) { case 0: // Treat a scalar as a rank-1 tensor shape = new TensorShape(1); break; case 1: shape = new TensorShape(this[0]); break; case 2: shape = new TensorShape(this[0], this[1]); break; case 3: shape = new TensorShape(this[0], this[1], this[2]); break; case 4: shape = new TensorShape(this[0], this[1], this[2], this[3]); break; case 5: shape = new TensorShape(this[0], this[1], this[2], this[3], this[4]); break; #if UNITY_EDITOR // Restricting this to editor-only since Burst cannot have exceptions, but this code should also not be // run since there are no rank-6/7 named tensor constructors case 6: case 7: throw new ArgumentException($"Converting from rank {rank} not supported."); #endif case 8: default: shape = new TensorShape(this[0], this[1], this[2], this[3], this[4], this[5], this[6], this[7]); break; } return shape; } public TensorShape AsUnnamed() { if (!hasNamedDimensions) #if UNITY_EDITOR throw new InvalidOperationException("TensorShape is already in the layout of unnamed dimensions"); #else // For Burst we cannot throw exceptions, but this code should not execute anyway return this; #endif int size = Burst.Intrinsics.X86.Popcnt.popcnt_u32((UInt32)m_UsesNamedDimensions); var shape = new int[size]; int s = 0; for (int i = 0; i < MaxRank; i++) { if (m_UsesNamedDimensions.HasFlag((NamedDimension)(1 << i))) shape[s++] = this[i]; } return new TensorShape(shape, true); } } /// /// Helper structure to iterate over tensor shape /// public struct TensorIterator { /// /// Tensor shape /// public readonly TensorShape shape; private readonly int m_shapeLength; /// /// Index /// public int index; /// /// dimension 0 /// public int d0; /// /// dimension 1 /// public int d1; /// /// dimension 2 /// public int d2; /// /// dimension 3 /// public int d3; /// /// dimension 4 /// public int d4; /// /// dimension 5 /// public int d5; /// /// dimension 6 /// public int d6; /// /// dimension 7 /// public int d7; /// /// Constructs Tensor shape iterator /// /// shape /// starting index public TensorIterator(TensorShape shape, int index = 0) { if (!shape.hasNamedDimensions) shape = shape.AsNamed(); this.shape = shape; m_shapeLength = shape.length; this.index = index; d0 = 0; d1 = 0; d2 = 0; d3 = 0; d4 = 0; d5 = 0; d6 = 0; d7 = 0; AssignIndexAndInvalidateDimensions(index); } /// /// Constructs Tensor shape iterator /// /// Tensor /// starting index public TensorIterator(Tensor tensor, int index = 0) : this(tensor.shape, index) { } internal void AssignIndexAndInvalidateDimensions(int index) { this.index = index; d0 = 0; d1 = 0; d2 = 0; d3 = 0; d4 = 0; d5 = 0; d6 = 0; d7 = 0; if (index != 0) shape.GetPositionsFromIndex(index, ref d0, ref d1, ref d2, ref d3, ref d4, ref d5, ref d6, ref d7); } /// /// Next element in the Tensor shape space /// public void Next() { ++index; ++d7; // carry-over chain if (d7 < shape[7]) return; d7 = 0; ++d6; if (d6 < shape[6]) return; d6 = 0; ++d5; if (d5 < shape[5]) return; d5 = 0; ++d4; if (d4 < shape[4]) return; d4 = 0; ++d3; if (d3 < shape[3]) return; d3 = 0; ++d2; if (d2 < shape[2]) return; d2 = 0; ++d1; if (d1 < shape[1]) return; d1 = 0; ++d0; } /// /// Advance iterator by `step` /// /// step count public void Advance(int step) { index += step; d7 += step; Assert.IsTrue(index >= 0); if (d7 >= shape[7] * 2 || d7 < 0) { // step is too large and would overflow the carry-over into the next dimension // or step is negative and would require a borrow from the next dimension AssignIndexAndInvalidateDimensions(index); return; } // carry-over chain if (d7 < shape[7]) return; d7 -= shape[7]; Assert.IsTrue(d7 < shape[7]); ++d6; if (d6 < shape[6]) return; d6 = 0; ++d5; if (d5 < shape[5]) return; d5 = 0; ++d4; if (d4 < shape[4]) return; d4 = 0; ++d3; if (d3 < shape[3]) return; d3 = 0; ++d2; if (d2 < shape[2]) return; d2 = 0; ++d1; if (d1 < shape[1]) return; d1 = 0; ++d0; } /// /// Is iterator in valid state /// /// `true` if iterator is still within shape public bool IsValid() { return index < m_shapeLength; } /// /// Index in reduced shape /// /// reduced shape /// index public int IndexInReducedShape(TensorShape reducedShape) { int rd0 = Math.Min(d0, reducedShape[0]-1); int rd1 = Math.Min(d1, reducedShape[1]-1); int rd2 = Math.Min(d2, reducedShape[2]-1); int rd3 = Math.Min(d3, reducedShape[3]-1); int rd4 = Math.Min(d4, reducedShape[4]-1); int rd5 = Math.Min(d5, reducedShape[5]-1); int rd6 = Math.Min(d6, reducedShape[6]-1); int rd7 = Math.Min(d7, reducedShape[7]-1); return reducedShape.Index(rd0, rd1, rd2, rd3, rd4, rd5, rd6, rd7); } /// /// Index with replaced `axis` value /// /// axis to replace /// new value for specific axis /// index public int IndexWithReplacedAxis(int axis, int newDimensionValue) { int nd0 = axis == 0 ? newDimensionValue : d0; int nd1 = axis == 1 ? newDimensionValue : d1; int nd2 = axis == 2 ? newDimensionValue : d2; int nd3 = axis == 3 ? newDimensionValue : d3; int nd4 = axis == 4 ? newDimensionValue : d4; int nd5 = axis == 5 ? newDimensionValue : d5; int nd6 = axis == 6 ? newDimensionValue : d6; int nd7 = axis == 7 ? newDimensionValue : d7; return shape.Index(nd0, nd1, nd2, nd3, nd4, nd5, nd6, nd7); } /// /// Access specific axis value /// /// axis public int this[int axis] { get { // switch case instead of ToArray() avoids GC allocation switch(axis) { case 0: return d0; case 1: return d1; case 2: return d2; case 3: return d3; case 4: return d4; case 5: return d5; case 6: return d6; default:return d7; } } } } // @TODO: most likely Tensor should still be struct - that way passing Tensor as argument into IOps would be safer (no hidden state mods), and Flatten & Reshape could return modified Tensor // ITensorData & Dispose mechanism should however allow Tensors to share the same ITensorData /// /// Multidimensional array-like data storage /// public class Tensor : UniqueResourceId, IDisposable, ITensorStatistics { private DataType m_preferredDataType; private ITensorData m_TensorOnDevice; private ITensorAllocator m_TensorAllocator; private float[] m_Cache; private bool m_CacheIsDirty; private bool m_Disposed = false; public static event Action tensorDisposed; #region Debug /// public string name { get; set; } /// /// Return if tensor was already disposed. /// internal bool disposed { get { return m_Disposed; } } #endregion /// /// Return this tensor allocator, see interface `ITensorAllocator`. /// public ITensorAllocator allocator { get { return m_TensorAllocator; } } #region Shape /// public TensorShape shape { get; private set; } /// public DataType dataType { get { if (m_TensorOnDevice == null) return m_preferredDataType; Assert.AreEqual(m_TensorOnDevice.dataType, m_preferredDataType); return m_TensorOnDevice.dataType; } } /// /// Return the number of sequences. /// public int sequenceLength { get { return shape.sequenceLength; } } /// /// Return the number of directions. /// public int numberOfDirections { get { return shape.numberOfDirections; } } /// /// Return the number of batches. /// public int batch { get { return shape.batch; } } /// /// Return the size of 3rd spatial dimension (axis is DataFeature3) /// Internal for now, please use myTensor.shape[DataFeature3] instead. /// internal int extraDimension { get { return shape.extraDimension; } } /// /// Return the spatial depth. /// public int depth { get { return shape.depth; } } /// /// Return the spatial height. /// public int height { get { return shape.height; } } /// /// Return the spatial width. /// public int width { get { return shape.width; } } /// /// Return the number of channels. /// public int channels { get { return shape.channels; } } /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel spatial depth. /// public int kernelSpatialDepth { get { return shape.kernelSpatialDepth; } } /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel spatial width. /// public int kernelWidth { get { return shape.kernelWidth; } } /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel spatial height. /// public int kernelHeight { get { return shape.kernelHeight; } } /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel depth (aka the number of input channels of the associated operator). /// public int kernelDepth { get { return shape.kernelDepth; } } /// /// Kernel dimension ordering is [D,H,W,C,K] for efficiency purpose. /// Return kernel count (aka the number of output channels of the associated operator). /// public int kernelCount { get { return shape.kernelCount; } } /// /// Return the number of batch. /// public int flatHeight { get { return shape.flatHeight; } } /// /// Return T*D*H*W*C. /// public int flatWidth { get { return shape.flatWidth; } } /// /// Return the total number of elements in this tensor. /// public int length { get { return shape.length; } } /// /// Return the count of non-unit dimension of this tensor shape. /// For example [1,1,N,1,1,1,1,C] dimensions is 2. /// public int dimensions { get { return shape.dimensions; } } #endregion #region Constructors /// /// Create a Tensor from a `shape`, an array of data `srcData` and an optional debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// `srcData` must be of size `s[0]*s[1]*s[2]*s[3]*s[4]*s[5]*s[6]*s[7]`. /// /// shape /// source data /// name public Tensor(int[] shape, float[] srcData, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), srcData, name) {} /// /// Create a Tensor of shape [N,H,W,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*h*w*c`. /// /// batch /// height /// width /// channels /// source data /// name public Tensor(int n, int h, int w, int c, float[] srcData, string name = "") : this(new TensorShape(n, h, w, c), srcData, name) {} /// /// Create a Tensor of shape [N,1,1,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*c`. /// /// batch /// channels /// source data /// name public Tensor(int n, int c, float[] srcData, string name = "") : this(new TensorShape(n, c), srcData, name) {} /// /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `shape.length`. /// /// shape /// source data /// name public Tensor(TensorShape shape, float[] srcData, string name = "") { this.name = name; this.shape = shape; tensorOnDevice = new ArrayTensorData(shape); Assert.IsTrue(srcData.Length >= length); m_TensorOnDevice.Upload(srcData, shape, 0); m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor with specified `shape`, a BarracudaArray of data `srcData` and an optional debug `name`. /// `srcData` must be of size `shape.length`. /// /// shape /// source data /// name public Tensor(TensorShape shape, BarracudaArray srcData, string name = "") { this.name = name; this.shape = shape; var tensorData = new ArrayTensorData(shape, srcData.Type); tensorOnDevice = tensorData; Assert.IsTrue(srcData.Length >= length); BarracudaArray.Copy(srcData, 0, tensorData.array, 0, shape.length); m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a `shape`, an array of data `srcData` and an optional name debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// `srcData` must be of size `s[0]*s[1]*s[2]*s[3]*s[4]*s[5]*s[6]*s[7]`. /// /// shape /// source data /// name public Tensor(int[] shape, float[][] srcData, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), srcData, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,H,W,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*h*w*c`. /// /// batch /// height /// width /// channels /// source data /// name public Tensor(int n, int h, int w, int c, float[][] srcData, string name = "") : this(new TensorShape(n, h, w, c), srcData, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,1,1,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*c`. /// /// batch /// channels /// source data /// name public Tensor(int n, int c, float[][] srcData, string name = "") : this(new TensorShape(n, c), srcData, name) {} /// /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `shape.length`. /// /// shape /// source data /// name public Tensor(TensorShape shape, float[][] srcData, string name = "") { this.name = name; this.shape = shape; var arrayTensorData = new ArrayTensorData(shape); for (var i = 0; i < Math.Min(flatHeight, srcData.Length); ++i) { var src = srcData[i]; var dstOffset = i * flatWidth; BarracudaArray.Copy(src, 0, arrayTensorData.array, dstOffset, Math.Min(flatWidth, src.Length)); } tensorOnDevice = arrayTensorData; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a `shape`, an array of data `srcData` and an optional name debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// `srcData` must be of size `s[0]*s[1]*s[2]*s[3]*s[4]*s[5]*s[6]*s[7]`. /// /// shape /// source data /// name public Tensor(int[] shape, float[,] srcData, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), srcData, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,1,1,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*c`. /// /// batch /// channels /// source data /// name public Tensor(int n, int c, float[,] srcData, string name = "") : this(new TensorShape(n, c), srcData, name) {} /// /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `shape.length`. /// /// shape /// source data /// name public Tensor(TensorShape shape, float[,] srcData, string name = "") : this(shape, (Array)srcData, name) {} internal Tensor(TensorShape shape, Array srcData, string name = "") { this.name = name; this.shape = shape; var numItemToCopy = Math.Min(shape.length, srcData.Length); float[] tmpArray = new float[numItemToCopy]; Buffer.BlockCopy(srcData, 0, tmpArray, 0, numItemToCopy*Marshal.SizeOf()); var arrayTensorData = new ArrayTensorData(shape); BarracudaArray.Copy(tmpArray, arrayTensorData.array); tensorOnDevice = arrayTensorData; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a `shape`, an array of data `srcData` and an optional name debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// `srcData` must be of size `s[0]*s[1]*s[2]*s[3]*s[4]*s[5]*s[6]*s[7]`. /// /// shape /// source data /// name public Tensor(int[] shape, float[,,,] srcData, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), srcData, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,H,W,C], an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `n*h*w*c`. /// /// batch /// height /// width /// channels /// source data /// name public Tensor(int n, int h, int w, int c, float[,,,] srcData, string name = "") : this(new TensorShape(n, h, w, c), srcData, name) {} /// /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`. /// `srcData` must be of size `shape.length`. /// /// shape /// source data /// name public Tensor(TensorShape shape, float[,,,] srcData, string name = "") : this(shape, (Array)srcData, name) {} /// /// Create a Tensor from a `shape`, associated ComputeBuffer `srcBuffer` filled with tensor values, and an optional debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// `srcBuffer` must be larger than `s[0]*s[1]*s[2]*s[3]*s[4]*s[5]*s[6]*s[7]`. /// /// shape /// source buffer /// name public Tensor(int[] shape, ComputeBuffer srcBuffer, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), srcBuffer, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,H,W,C], associated ComputeBuffer `srcBuffer` filled with tensor values, and an optional debug `name`. /// `srcBuffer` must be larger than `n*h*w*c`. /// /// batch /// height /// width /// channels /// source buffer /// name public Tensor(int n, int h, int w, int c, ComputeBuffer srcBuffer, string name = "") : this(new TensorShape(n, h, w, c), srcBuffer, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,1,1,C], associated ComputeBuffer `srcBuffer` filled with tensor values, and an optional debug `name`. /// `srcBuffer` must be larger than `n*c`. /// /// batch /// channels /// source buffer /// name public Tensor(int n, int c, ComputeBuffer srcBuffer, string name = "") : this(new TensorShape(n, c), srcBuffer, name) {} /// /// Create a Tensor with specified `shape`, associated ComputeBuffer `srcBuffer` filled with tensor values, and an optional debug `name`. /// `srcBuffer` must be larger than `shape.length`. /// /// shape /// source buffer /// name /// thrown if specified buffer is too small or stride is mismatched public Tensor(TensorShape shape, ComputeBuffer srcBuffer, string name = "") { this.name = name; this.shape = shape; if (srcBuffer.count < shape.length) throw new ArgumentException($"Compute buffer `{name}` capacity is {srcBuffer.count} less than {shape.length} required for shape {shape}"); if (srcBuffer.stride != 4) throw new ArgumentException($"Currently only compute buffers with stride of 4 are supported. Compute buffer `{name}` stride is {srcBuffer.stride} instead"); tensorOnDevice = new ComputeTensorData(srcBuffer, shape, offset:0, name, ComputeInfo.channelsOrder); m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a texture, shape is [1,1,1,1,1, `texture.height`, `texture.width`, `channels`]. /// If `channels` is set to -1 (default value), then number of channels in the new Tensor will match the number of channels in the texture. /// Just like `Texture2D.GetPixels` when reading from LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc) this function will remap pixel values from byte values to the range of [0.0 .. 1.0]. Pixel values from HDR textures (such as ARGBFloat or ARGBHalf) will be left unchanged. /// /// source texture /// channels /// name public Tensor(Texture srcTexture, int channels = -1, string name = "") : this(new [] { srcTexture }, channels, name) {} /// /// Create a Tensor from multiple texture, shape is [1,1, `srcTextures.length`,1,1, `texture.height`, `texture.width`, `channels`]. /// If `channels` is set to -1 (default value), then number of channels in the new Tensor will match the number of channels in the texture. /// Just like `Texture2D.GetPixels` when reading from LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc) this function will remap pixel values from byte values to the range of [0.0 .. 1.0]. Pixel values from HDR textures (such as ARGBFloat or ARGBHalf) will be left unchanged. /// `flipY` flips the texture along the Y direction /// `scale` and `bias` respectively scale and bias the input texture as so: scale*v+bias /// /// source textures /// flipY /// scale /// bias /// channels /// name public Tensor(Texture srcTexture, bool flipY, Vector4 scale, Vector4 bias, int channels = -1, string name = "") : this(new [] { srcTexture }, flipY, false, scale, bias, channels, name) {} /// /// Create a Tensor from multiple texture, shape is [1,1, `srcTextures.length`,1,1, `texture.height`, `texture.width`, `channels`]. /// If `channels` is set to -1 (default value), then number of channels in the new Tensor will match the number of channels in the texture. /// All textures must be of the same size and dimension. /// Just like `Texture2D.GetPixels` when reading from LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc) this function will remap pixel values from byte values to the range of [0.0 .. 1.0]. Pixel values from HDR textures (such as ARGBFloat or ARGBHalf) will be left unchanged. /// /// source textures /// channels /// name public Tensor(Texture[] srcTextures, int channels = -1, string name = "") { this.name = name; var tensorData = new TextureAsTensorData(srcTextures, channels); //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + tensorData.shape + " [TEX] " + srcTextures); shape = tensorData.shape; Assert.IsTrue(tensorData.maxCapacity >= length); tensorOnDevice = tensorData; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from multiple texture, shape is [1,1, `srcTextures.length`,1,1, `texture.height`, `texture.width`, `channels`]. /// If `channels` is set to -1 (default value), then number of channels in the new Tensor will match the number of channels in the texture. /// All textures must be of the same size and dimension. /// Just like `Texture2D.GetPixels` when reading from LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc) this function will remap pixel values from byte values to the range of [0.0 .. 1.0]. Pixel values from HDR textures (such as ARGBFloat or ARGBHalf) will be left unchanged. /// `flipY` flips the texture along the Y direction /// If `concatOnBatch` is True then the textures are concatenated on the batch dimension : resulting `srcTextures.length`, `texture.height`, `texture.width`, `texture.channels` /// `scale` and `bias` respectively scale and bias the input texture as so: scale*v+bias /// /// source textures /// flipY /// concatOnBatch /// scale /// bias /// channels /// name public Tensor(Texture[] srcTextures, bool flipY, bool concatOnBatch, Vector4 scale, Vector4 bias, int channels = -1, string name = "") { this.name = name; var tensorData = new TextureAsTensorData(srcTextures, flipY ? TextureAsTensorData.Flip.Y : TextureAsTensorData.Flip.None, concatOnBatch ? TextureAsTensorData.InterpretDepthAs.Batch : TextureAsTensorData.InterpretDepthAs.Channels, TextureAsTensorData.InterpretColorAs.AverageMultipleChannels, scale, bias, channels); //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + tensorData.shape + " [TEX] " + srcTextures); shape = tensorData.shape; Assert.IsTrue(tensorData.maxCapacity >= length); tensorOnDevice = tensorData; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a `shape`, an ITensorData `data` and an optional debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// /// shape /// data /// name public Tensor(int[] shape, ITensorData data, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), data, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,H,W,C], an ITensorData `data` and an optional debug `name`. /// `srcData` must be of size `n*h*w*c`. /// /// batch /// height /// width /// channels /// data /// name public Tensor(int n, int h, int w, int c, ITensorData data, string name = "") : this(new TensorShape(n, h, w, c), data, name) {} /// /// Create a Tensor of shape [1,1,N,1,1,1,1,C], an ITensorData `data` and an optional debug `name`. /// `srcData` must be of size `n*c`. /// /// batch /// channels /// data /// name public Tensor(int n, int c, ITensorData data, string name = "") : this(new TensorShape(n, c), data, name) {} /// /// Create a Tensor with specified `shape`, an ITensorData `data` and an optional debug `name`. /// /// shape /// data /// name public Tensor(TensorShape shape, ITensorData data, string name = "") { this.name = name; this.shape = shape; tensorOnDevice = data; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create an uninitialized Tensor with a shape of [1,1,1,1,1,1,1,1] and an optional debug `name`. /// /// name public Tensor(string name = "") : this(new TensorShape(1,1,1,1), name) {} /// /// Create an uninitialized Tensor from a `shape` and an optional debug `name`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C] /// S and R must be 1. /// /// shape /// name public Tensor(int[] shape, string name = "", bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), name) {} /// /// Create an uninitialized Tensor of shape [1,1,N,1,1,H,W,C] and an optional debug `name`. /// /// batch /// height /// width /// channels /// name public Tensor(int n, int h, int w, int c, string name = "") : this(new TensorShape(n, h, w, c), name) {} /// /// Create an uninitialized Tensor of shape [1,1,N,1,1,1,1,C] and an optional debug `name`. /// /// batch /// channels /// name public Tensor(int n, int c, string name = "") : this(new TensorShape(n, c), name) {} /// /// Create an uninitialized Tensor with specified `shape` and an optional debug `name`. /// /// shape /// name public Tensor(TensorShape shape, string name = "", DataType dataType = DataType.Float) { this.name = name; this.shape = shape; m_preferredDataType = dataType; tensorOnDevice = null; m_TensorAllocator = null; m_Cache = null; m_CacheIsDirty = false; } /// /// Create a Tensor from a `shape`, an ITensorData `data` and an ITensorAllocator `allocator`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// /// shape /// data /// allocator public Tensor(int[] shape, ITensorData data, ITensorAllocator allocator, bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), data, allocator) {} /// /// Create a Tensor of shape [1,1,N,1,1,H,W,C], an ITensorData `data` and an ITensorAllocator `allocator`. /// `data` must be of size `n*h*w*c`. /// /// batch /// height /// width /// channels /// data /// allocator public Tensor(int n, int h, int w, int c, ITensorData data, ITensorAllocator allocator) : this(new TensorShape(n, h, w, c), data, allocator) {} /// /// Create a Tensor of shape [1,1,N,1,1,1,1,C], an ITensorData `data` and an ITensorAllocator `allocator`. /// `srcData` must be of size `n*c`. /// /// batch /// channels /// data /// allocator public Tensor(int n, int c, ITensorData data, ITensorAllocator allocator) : this(new TensorShape(n, c), data, allocator) {} /// /// Create a Tensor with specified `shape`, an ITensorData `data` and an ITensorAllocator `allocator` /// /// shape /// data /// allocator public Tensor(TensorShape shape, ITensorData data, ITensorAllocator allocator, DataType dataType = DataType.Float) { Assert.IsTrue(data == null || data.dataType == dataType); this.name = ""; this.shape = shape; m_preferredDataType = dataType; tensorOnDevice = data; m_TensorAllocator = allocator; m_Cache = null; m_CacheIsDirty = false; } /// /// Create an uninitialized Tensor with a shape of [1,1,1,1,1,1,1,1] and an ITensorAllocator `allocator`. /// /// allocator public Tensor(ITensorAllocator allocator) : this(new TensorShape(1,1,1,1,1,1,1,1), allocator) {} /// /// Create an uninitialized Tensor from a `shape` and an ITensorAllocator `allocator`. /// `shape` must be of size 8, the order is [S,R,N,T,D,H,W,C]. /// S and R must be 1. /// /// shape /// allocator public Tensor(int[] shape, ITensorAllocator allocator, bool unnamedDimensions = false) : this(new TensorShape(shape, unnamedDimensions), allocator) {} /// /// Create an uninitialized Tensor of shape [1,1,N,1,1,H,W,C] and an ITensorAllocator `allocator`. /// /// batch /// height /// width /// channels /// allocator public Tensor(int n, int h, int w, int c, ITensorAllocator allocator) : this(new TensorShape(n, h, w, c), allocator) {} /// /// Create an uninitialized Tensor of shape [1,1,N,1,1,1,1,C] and an ITensorAllocator `allocator`. /// /// batch /// channels /// allocator public Tensor(int n, int c, ITensorAllocator allocator) : this(new TensorShape(n, c), allocator) {} /// /// Create an uninitialized Tensor with specified `shape` and ITensorAllocator `allocator`. /// /// shape /// allocator public Tensor(TensorShape shape, ITensorAllocator allocator) { this.name = ""; this.shape = shape; tensorOnDevice = null; m_TensorAllocator = allocator; m_Cache = null; m_CacheIsDirty = false; } #endregion /// /// Destructor will also dispose associated memories. /// ~Tensor() { Dispose(); } private void PinToDevice(ITensorData onDevice, bool disposeUnpinned = true) { Assert.IsTrue(onDevice?.maxCapacity >= length || onDevice == null); if (m_TensorAllocator != null) m_TensorAllocator.MoveToDevice(this, onDevice, m_TensorOnDevice, disposeUnpinned); else if (disposeUnpinned) m_TensorOnDevice?.Dispose(); tensorOnDevice = onDevice; } /// /// Upload tensor values to the device. /// This call associates tensor with the uninitialized block of data residing on a device. /// `destination` should be allocated on a target device. Previous contents of `destination` will be overwritten after this call. /// By default local cache will be discarded after this call, set `invalidateCacheAfterUpload` to false to keep the cache. /// /// destination /// invalidate cache after upload public void UploadToDevice(ITensorData destination, bool invalidateCacheAfterUpload = true) { if (m_TensorOnDevice == destination && !m_CacheIsDirty) return; PrepareCacheForAccess(); PinToDevice(destination, disposeUnpinned: true); m_CacheIsDirty = true; if (invalidateCacheAfterUpload) UploadAndInvalidateCache(); else UploadIfDirty(); } /// /// Upload tensor values to the device. /// This call allocates `destination` tensor on a target device. Previous contents of `destination` will be overwritten after this call. /// No content will be copied/initialized from the tensor regardless of the current cache/data on device /// /// destination public void AllocateOnDevice(ITensorData destination) { if (m_TensorOnDevice == destination) return; PinToDevice(destination, disposeUnpinned: true); m_Cache = null; m_CacheIsDirty = false; } /// /// Associates tensor with the block of data residing on a device. /// Tensor values will be downloaded from the `source` upon the first access. /// `source` should contain initialized and valid data representing tensor values. /// See also `PrepareCacheForAccess()` to schedule download as soon as possible. /// /// source public void AttachToDevice(ITensorData source) { if (m_TensorOnDevice == source && !m_CacheIsDirty) return; UploadIfDirty(); PinToDevice(source, disposeUnpinned: true); if (m_Cache != null) PrepareCacheForAccess(); } /// /// Remove tensor from device, will first sync the cache with device data. /// /// dispose device data /// Tensor data public ITensorData DetachFromDevice(bool disposeDeviceData = true) { PrepareCacheForAccess(); ITensorData unpinned = (disposeDeviceData) ? null : m_TensorOnDevice; PinToDevice(null, disposeDeviceData); return unpinned; } private void UploadIfDirty() { if (m_CacheIsDirty && m_TensorOnDevice != null) m_TensorOnDevice.Upload(m_Cache, shape); m_CacheIsDirty = false; } public void InvalidateCache() { // remove cache only, if pinned to device // otherwise cache holds the only copy of the tensor data and we can not loose it if (m_TensorOnDevice == null) return; m_Cache = null; m_CacheIsDirty = false; } private void UploadAndInvalidateCache() { UploadIfDirty(); InvalidateCache(); } /// /// Populate the cache with on device data. /// Blocking read if `blocking` is true (default) /// /// blocking read if `true` /// `true` if data is ready public bool PrepareCacheForAccess(bool blocking = true) { // non-blocking, schedule download for later if (!blocking && m_TensorOnDevice != null && m_Cache == null) if (!m_TensorOnDevice.ScheduleAsyncDownload(length)) return false; // blocking, have to get data now! if (m_Cache == null) { if (m_TensorOnDevice != null) m_Cache = m_TensorOnDevice.Download(shape); else m_Cache = new float[length]; m_CacheIsDirty = false; } return true; } /// /// Upload cache to device memory and delete it. /// public void FlushCache(bool uploadCache) { if(uploadCache) UploadAndInvalidateCache(); else InvalidateCache(); } // @TODO: choose approach to handle case when tensors after Flatten/Reshape are written into OR taken ownership of // 1) owns data, copy on PrepareCacheForAccess() and PinForWrite() // 2) always copy data in Flatten()/Reshape(), remove from Tensor interface // 2) always copy data in Flatten()/Reshape(), implement ICloneable for GPU ITensorData private Tensor ShallowCopy(TensorShape newShape, string newName) { Tensor copy; if (m_TensorAllocator != null) copy = m_TensorAllocator.Alloc(newShape, m_TensorOnDevice, AllocScope.LayerOutput, dataType); else copy = new Tensor(newShape, m_TensorOnDevice, null, dataType); copy.name = newName; copy.m_Cache = m_Cache; copy.m_CacheIsDirty = m_CacheIsDirty; return copy; } /// /// Create a copy of the current Tensor, sharing data storage with original tensor. /// /// new name /// shallow copy of the Tensor public Tensor ShallowCopy(string newName = null) { return ShallowCopy(shape, newName ?? $"shallowcopy of {name}"); } /// /// Create a flattened copy of the current Tensor ie of shape [1,1,N,1,1,1,1,T*D*H*W*C] /// /// new name /// shallow copy of the Tensor with new shape public Tensor Flatten(string newName = null) { var newShape = shape.Flatten(); return ShallowCopy(newShape, newName ?? $"flatten of {name}"); } /// /// Create a reshaped copy of the current Tensor. /// `newShape`.length must be equal to this.shape.length. /// /// new shape /// new name /// shallow copy of the Tensor with new shape and name public Tensor Reshape(TensorShape newShape, string newName = null) { Assert.AreEqual(shape.length, newShape.length); return ShallowCopy(newShape, newName ?? $"reshape of {name}"); } /// /// Create a copy of the current Tensor. /// /// new copy of the Tensor public Tensor DeepCopy() { // @TODO: use Tensor allocator var copy = new Tensor(shape, $"clone of {name}"); if (m_TensorOnDevice is ICloneable) { UploadIfDirty(); var copyOfTensorData = (m_TensorOnDevice as ICloneable).Clone() as ITensorData; copy.AttachToDevice(copyOfTensorData); } else { PrepareCacheForAccess(); copy.PrepareCacheForAccess(); Array.Copy(m_Cache, 0, copy.m_Cache, 0, length); } return copy; } /// /// Remove system reference to this tensor, caller assume ownership. /// public void TakeOwnership() { m_TensorAllocator?.WaiveOwnership(this); m_TensorAllocator = null; } /// Called from ITensorAllocator, puts Tensor in the ready for reuse state. internal ITensorData Invalidate() { ITensorData unpinned = m_TensorOnDevice; PinToDevice(null, false); Assert.AreEqual(m_TensorOnDevice, null); m_Cache = null; m_CacheIsDirty = false; tensorOnDevice = null; m_TensorAllocator = null; return unpinned; } internal void Init(TensorShape shape, ITensorData buffer, ITensorAllocator allocator, DataType dataType) { Assert.IsTrue(buffer == null || buffer.dataType == dataType); this.shape = shape; m_preferredDataType = dataType; tensorOnDevice = buffer; m_TensorAllocator = allocator; m_Disposed = false; } /// /// Dispose Tensor and associated memories. /// public virtual void Dispose() { m_Disposing = true; if (m_TensorAllocator != null) { m_TensorAllocator.Release(this, true); } else if (m_TensorOnDevice != null) { //;;UnityEngine.D.Log("DISPOSE " + name + " " + shape + " @ " + m_TensorOnDevice.GetType().Name); m_TensorOnDevice.Dispose(); } m_Cache = null; m_CacheIsDirty = false; tensorOnDevice = null; m_TensorAllocator = null; m_Disposing = false; m_Disposed = true; tensorDisposed?.Invoke(this); } #region Render Texture /// /// Fill a `target` RenderTexture with a portion of the tensor applying `scale` and `bias`. Portion of the target is specified by `batch` and `fromChannel`. /// `batch` specifies the tensor batch to read values from. /// `fromChannel` specifies the first tensor channel to start reading values from. /// Number of channels in the `target` texture specifies how many channels to read from the tensor, starting from index `fromChannel`. /// Resolution of the `target` must match the spatial dimensions of the tensor. /// `scale` multiplier and `bias` addition is applied to the values read from the tensor and, if `target` is LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc), clamped to the range from 0.0 to 1.0. /// /// target RenderTexture /// batch /// from channel /// scale /// bias /// lut table public void ToRenderTexture(RenderTexture target, int batch, int fromChannel, Vector4 scale, Vector4 bias, Texture3D lut = null) { if (tensorOnDevice is TextureAsTensorData || !SystemInfo.supportsComputeShaders) { var gpuBackend = new PixelShaderOps(null); gpuBackend.TensorToRenderTexture(this, target, batch, fromChannel, scale, bias, lut); } else if (tensorOnDevice is ComputeTensorData) { var gpuBackend = new ReferenceComputeOps(null); gpuBackend.TensorToRenderTexture(this, target, batch, fromChannel, scale, bias, lut); } } /// /// Fill a `target` RenderTexture with a portion of the tensor applying `scale` and `bias`. Portion of the target is specified by `batch` and `fromChannel`. /// `batch` specifies the tensor batch to read values from. /// `fromChannel` specifies the first tensor channel to start reading values from. /// Number of channels in the `target` texture specifies how many channels to read from the tensor, starting from index `fromChannel`. /// Resolution of the `target` must match the spatial dimensions of the tensor. /// `scale` multiplier and `bias` addition is applied to the values read from the tensor and, if `target` is LDR texture (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc), clamped to the range from 0.0 to 1.0. /// /// target RenderTexture /// batch /// from channel /// scale /// bias /// lut table public void ToRenderTexture(RenderTexture target, int batch = 0, int fromChannel = 0, float scale = 1.0f, float bias = 0f, Texture3D lut = null) { ToRenderTexture(target, batch, fromChannel, new Vector4(scale,scale,scale,scale), new Vector4(bias,bias,bias,bias), lut); } /// /// Create new RenderTexture and fill it with a portion of the tensor applying `scale` and `bias`. Portion of the target is specified by `batch` and `fromChannel`. /// `format` specifies the type of the new RenderTexture. /// `batch` specifies the tensor batch to read values from. /// `fromChannel` specifies the first tensor channel to start reading values from. /// Number of channels in the `target` texture specifies how many channels to read from the tensor, starting from index `fromChannel`. /// `scale` multiplier and `bias` addition is applied to the values read from the tensor and, if `format` is LDR (RGBA32, ARGB32, RGB24, Alpha8, RG16, R8, etc), clamped to the range from 0.0 to 1.0. /// /// RenderTexture format /// batch /// from channel /// scale /// bias /// lut table /// created RenderTexture public RenderTexture ToRenderTexture(RenderTextureFormat format, int batch = 0, int fromChannel = 0, float scale = 1.0f, float bias = 0f, Texture3D lut = null) { var target = new RenderTexture(width, height, 0, format); ToRenderTexture(target, batch, fromChannel, scale, bias, lut); return target; } /// /// Create new RenderTexture and fill it with a portion of the tensor applying `scale` and `bias`. Portion of the target is specified by `batch` and `fromChannel`. /// `batch` specifies the tensor batch to read values from. /// `fromChannel` specifies the first tensor channel to start reading values from. /// Number of channels in the `target` texture specifies how many channels to read from the tensor, starting from index `fromChannel`. /// Resolution of the `target` must match the spatial dimensions of the tensor. /// `scale` multiplier and `bias` addition is applied to the values read from the tensor and clamped to the range from 0.0 to 1.0. /// /// batch /// from channel /// scale /// bias /// lut table /// public RenderTexture ToRenderTexture(int batch = 0, int fromChannel = 0, float scale = 1.0f, float bias = 0f, Texture3D lut = null) { return ToRenderTexture(RenderTextureFormat.Default, batch, fromChannel, scale, bias, lut); } #endregion #region Data access /// /// Allow to use negative axis to access tensorShape backward. /// `axis` should be from -rank to rank (exclusive). /// /// axis /// remapped axis public int Axis(int axis) { return shape.Axis(axis); } /// /// Given an element dimensions indices [0,0,N,0,0,H,W,C] return this element offset in memory. /// /// batch /// height /// width /// channels /// flat index (offset in memory) public int Index(int b, int h, int w, int ch) { return shape.Index(b, h, w, ch); } /// /// Given an element dimensions indices [0,0,N,0,D,H,W,C] return this element offset in memory. /// /// batch /// depth /// height /// width /// channels /// public int Index(int b, int d, int h, int w, int ch) { return shape.Index(b, d, h, w, ch); } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] return this element offset in memory. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// flat index (offset in memory) public int Index(int s, int r, int n, int t, int d, int h, int w, int c) { return shape.Index(s, r, n, t, d, h, w, c); } /// /// Given an element dimensions indices [0,0,N,0,0,H,W,C] return this element offset in memory, clamping indices to tensor dimensions. /// /// batch /// height /// width /// channels /// flat index (offset in memory) public int IndexWithClamp(int n, int h, int w, int c) { return shape.IndexWithClamp(n, h, w, c); } /// /// Given an element dimensions indices [0,0,N,0,D,H,W,C] return this element offset in memory, clamping indices to tensor dimensions. /// /// batch /// depth /// height /// width /// channels /// flat index (offset in memory) public int IndexWithClamp(int n, int d, int h, int w, int c) { return shape.IndexWithClamp(n, d, h, w, c); } /// /// Given an element dimensions indices[0,0,N,0,0,H,W,C] with broadcast support, return this element offset in memory. /// /// batch /// height /// width /// channels /// flat index (offset in memory) public int IndexWithBroadcast(int n, int h, int w, int c) { return shape.IndexWithBroadcast(n, h, w, c); } /// /// Given an element dimensions indices [S,R,N,T,D,H,W,C] with broadcast support, return this element offset in memory. /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels /// flat index (offset in memory) public int IndexWithBroadcast(int s, int r, int n, int t, int d, int h, int w, int c) { return shape.IndexWithBroadcast(s,r,n,t,d,h,w,c); } /// /// Given an element dimensions indices [0,0,N,0,0,0,0,C] return this element offset in memory. /// /// y /// x /// flat index (offset in memory) public int Index(int y, int x) { return shape.Index(y, x); } /// /// Access element at offset `index` in this Tensor. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// /// flat index public float this[int index] { get { PrepareCacheForAccess(); return m_Cache[index]; } set { PrepareCacheForAccess(); m_Cache[index] = value; m_CacheIsDirty = true; } } /// /// Access element at index [0,0,N,0,0,0,0,C] in this Tensor. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// /// batch /// channels public float this[int b, int ch] { get { PrepareCacheForAccess(); return m_Cache[Index(b, ch)]; } set { PrepareCacheForAccess(); m_Cache[Index(b, ch)] = value; m_CacheIsDirty = true; } } /// /// Access element at index [0,0,N,0,0,H,W,C] in this Tensor. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// /// batch /// height /// width /// channels public float this[int b, int h, int w, int ch] { get { PrepareCacheForAccess(); return m_Cache[Index(b, h, w, ch)]; } set { PrepareCacheForAccess(); m_Cache[Index(b, h, w, ch)] = value; m_CacheIsDirty = true; } } /// /// Access element at index [0,0,N,0,D,H,W,C] in this Tensor. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// public float this[int b, int d, int h, int w, int ch] { get { PrepareCacheForAccess(); return m_Cache[Index(b, d, h, w, ch)]; } set { PrepareCacheForAccess(); m_Cache[Index(b, d, h, w, ch)] = value; m_CacheIsDirty = true; } } /// /// Access element at index [S,R,N,T,D,H,W,C] in this Tensor. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// /// sequence /// direction /// batch /// time /// depth /// height /// width /// channels public float this[int s, int r, int n, int t, int d, int h, int w, int c] { get { PrepareCacheForAccess(); return m_Cache[Index(s, r, n, t , d, h, w, c)]; } set { PrepareCacheForAccess(); m_Cache[Index(s, r, n, t , d, h, w, c)] = value; m_CacheIsDirty = true; } } /// /// Return the cached linear memory representation of this tensor data. /// This will create a blocking read, if this Tensor is a result of a computation on a different device (GPU). /// IMPORTANT: Modifying contents of the returned array will have undefined behavior. /// /// cached linear memory representation of this tensor data public float[] ToReadOnlyArray() { // @TODO: implement via ITensorData.SharedAccess(), public float[] ToReadOnlyArray(ref int arrayOffset) PrepareCacheForAccess(); return m_Cache; } #endregion /// /// Device specific internal representation of Tensor data /// public ITensorData tensorOnDevice { get { return m_TensorOnDevice; } private set { m_TensorOnDevice = value; if (value != null) m_preferredDataType = value.dataType; } } /// /// Upload data to device and return its instance /// public ITensorData data { get { if (m_TensorOnDevice == null) UploadToDevice(new ArrayTensorData(shape, dataType)); return m_TensorOnDevice; } } /// public int cacheBytes => m_Cache?.Length * sizeof(float) ?? 0; /// public ITensorDataStatistics GetTensorDataStatistics() { return m_TensorOnDevice; } /// /// Tensor metadata summary /// /// Tensor metadata summary public override string ToString() { return $"(`{name}` {shape}, alloc: {m_TensorAllocator?.GetType()}, onDevice:{m_TensorOnDevice})"; } #region Obsolete private bool m_Disposing = false; // to protect from infinite-loop. in case UnpinAndDisposeTensor() is called from Dispose() /// /// Unload tensor data from device and dispose this Tensor /// /// device specific Tensor data [ObsoleteAttribute("Use Dispose instead.", false)] public ITensorData UnpinAndDisposeTensor() { // NOTE: since this Tensor is going to be Disposed // there is no need to populate cache with data from tensorOnDevice // we can save on skipping PrepareCacheForAccess() call ITensorData unpinned = tensorOnDevice; PinToDevice(null, false); if (!m_Disposing) Dispose(); return unpinned; } /// /// Read-only array of Tensor data /// [ObsoleteAttribute("Use ToReadOnlyArray instead.", false)] public float[] readonlyArray { get { PrepareCacheForAccess(); return m_Cache; } } /// /// Offset into read-only array of Tensor data /// [ObsoleteAttribute("Use ToReadOnlyArray instead.", false)] public int readonlyArrayOffset { get { return 0; } } #endregion } } // namespace Barracuda