Skip to content

Commit

Permalink
update tensorflow.net to 0.20.0 (#5404)
Browse files Browse the repository at this point in the history
* upgrade to 3.1

* write inline data using invariantCulture

* upodate tensorflow

* update Microsoft.ML.Vision

* fix test && comment

* udpate tensorflow.net to 0.20.1

* update tf major version

* downgrade tf runtime to 1.14.1

* Update Dependencies.props

* Update Dependencies.props

* update tffact to stop running test on linux with glibc < 2.3)

* fix TensorFlowTransformInputShapeTest

* use tf.v1 api

* fix comment:

* fix building error

* fix test

* fix nit

* remove linq

Co-authored-by: BigBigMiao <[email protected]>
  • Loading branch information
LittleLittleCloud and BigBigMiao authored Oct 13, 2020
1 parent 35d5a47 commit afba0bd
Show file tree
Hide file tree
Showing 9 changed files with 96 additions and 151 deletions.
6 changes: 3 additions & 3 deletions build/Dependencies.props
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
<SystemDrawingCommonPackageVersion>4.5.0</SystemDrawingCommonPackageVersion>
<SystemIOFileSystemAccessControl>4.5.0</SystemIOFileSystemAccessControl>
<SystemSecurityPrincipalWindows>4.5.0</SystemSecurityPrincipalWindows>
<TensorFlowVersion>1.14.0</TensorFlowVersion>
<TensorFlowMajorVersion>1</TensorFlowMajorVersion>
<TensorflowDotNETVersion>0.11.8.1</TensorflowDotNETVersion>
<TensorFlowVersion>2.3.1</TensorFlowVersion>
<TensorFlowMajorVersion>2</TensorFlowMajorVersion>
<TensorflowDotNETVersion>0.20.1</TensorflowDotNETVersion>
</PropertyGroup>

<!-- Model Builder Dependencies -->
Expand Down
1 change: 1 addition & 0 deletions src/Microsoft.ML.TensorFlow/TensorTypeExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
using System;
using Microsoft.ML.Internal.Utilities;
using Tensorflow;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

namespace Microsoft.ML.TensorFlow
{
Expand Down
60 changes: 13 additions & 47 deletions src/Microsoft.ML.TensorFlow/TensorflowTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
using Tensorflow;
using static Microsoft.ML.TensorFlow.TensorFlowUtils;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

[assembly: LoadableClass(TensorFlowTransformer.Summary, typeof(IDataTransform), typeof(TensorFlowTransformer),
typeof(TensorFlowEstimator.Options), typeof(SignatureDataTransform), TensorFlowTransformer.UserName, TensorFlowTransformer.ShortName)]
Expand Down Expand Up @@ -280,6 +281,7 @@ internal TensorFlowTransformer(IHostEnvironment env, Session session, string[] o
_addBatchDimensionInput = addBatchDimensionInput;
Inputs = inputColumnNames;
Outputs = outputColumnNames;
tf.compat.v1.disable_eager_execution();

(TFOutputTypes, OutputTypes, TFOutputOperations) = GetOutputInfo(Host, Session, Outputs);
(TFInputTypes, TFInputShapes, TFInputOperations) = GetInputInfo(Host, Session, Inputs, batchSize);
Expand Down Expand Up @@ -344,15 +346,15 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status
new ObjectDisposedException(nameof(graph));

var cstatus = status == null ? new Status() : status;
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus);
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle);

cstatus.Check();

if (n == -1)
return new TensorShape(new int[0]);

var dims = new long[n];
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus);
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle);
cstatus.Check();
return new TensorShape(dims.Select(x => (int)x).ToArray());
}
Expand Down Expand Up @@ -426,12 +428,14 @@ private protected override void SaveModel(ModelSaveContext ctx)
ctx.Writer.WriteBoolByte(_addBatchDimensionInput);
if (isFrozen)
{
Status status = new Status();
var buffer = Session.graph.ToGraphDef(status);
ctx.SaveBinaryStream("TFModel", w =>
using (var status = new Status())
using (var buffer = Session.graph.ToGraphDef(status))
{
w.WriteByteArray(buffer.MemoryBlock.ToArray());
});
ctx.SaveBinaryStream("TFModel", w =>
{
w.WriteByteArray(buffer.DangerousMemoryBlock.ToArray());
});
}
}

Host.AssertNonEmpty(Inputs);
Expand Down Expand Up @@ -801,48 +805,10 @@ public Tensor GetTensor()
// This is done to reduce memory allocation every time tensor is created.
_denseData = new T[_vBuffer.Length];
_vBuffer.CopyTo(_denseData);
var tensor = CastDataAndReturnAsTensor(_denseData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);
return tensor;
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_vBuffer.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
}

public void BufferTrainingData()
{
_srcgetter(ref _vBuffer);
Expand All @@ -853,7 +819,7 @@ public void BufferTrainingData()
public Tensor GetBufferedBatchTensor()
{
_position = 0;
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);

_bufferedData = new T[_bufferedDataSize];
return tensor;
Expand Down
44 changes: 43 additions & 1 deletion src/Microsoft.ML.TensorFlow/TensorflowUtils.cs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
using Microsoft.ML.Runtime;
using Microsoft.ML.TensorFlow;
using Microsoft.ML.Transforms;
using NumSharp;
using Tensorflow;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

namespace Microsoft.ML.TensorFlow
{
Expand Down Expand Up @@ -410,6 +412,46 @@ internal static bool IsTypeSupported(TF_DataType tfoutput)
}
}

internal static Tensor CastDataAndReturnAsTensor<T>(T[] data, TensorShape tfShape)
{
var dims = tfShape.dims.Select(x => (long)x).ToArray();

if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
string[] strings = new string[data.Length];
for (int i = 0; i < strings.Length; i++)
{
strings[i] = data[i].ToString();
}

return new Tensor(strings);
}

return new Tensor(new NDArray(data, tfShape));
}

/// <summary>
/// Use the runner class to easily configure inputs, outputs and targets to be passed to the session runner.
/// </summary>
Expand Down Expand Up @@ -491,7 +533,7 @@ public Tensor[] Run()
{
c_api.TF_SessionRun(_session, null, _inputs, _inputValues,
_inputs.Length, _outputs, _outputValues, _outputValues.Length, _operations,
_operations.Length, IntPtr.Zero, _status);
_operations.Length, IntPtr.Zero, _status.Handle);
}
catch (Exception ex)
{
Expand Down
87 changes: 6 additions & 81 deletions src/Microsoft.ML.Vision/DnnRetrainTransform.cs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
using Tensorflow;
using static Microsoft.ML.TensorFlow.TensorFlowUtils;
using static Tensorflow.Binding;
using Utils = Microsoft.ML.Internal.Utilities.Utils;

[assembly: LoadableClass(DnnRetrainTransformer.Summary, typeof(IDataTransform), typeof(DnnRetrainTransformer),
typeof(DnnRetrainEstimator.Options), typeof(SignatureDataTransform), DnnRetrainTransformer.UserName, DnnRetrainTransformer.ShortName)]
Expand Down Expand Up @@ -607,15 +608,15 @@ internal static TensorShape GetTensorShape(TF_Output output, Graph graph, Status
new ObjectDisposedException(nameof(graph));

var cstatus = status == null ? new Status() : status;
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus);
var n = c_api.TF_GraphGetTensorNumDims(graph, output, cstatus.Handle);

cstatus.Check();

if (n == -1)
return new TensorShape(new int[0]);

var dims = new long[n];
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus);
c_api.TF_GraphGetTensorShape(graph, output, dims, dims.Length, cstatus.Handle);
cstatus.Check();
return new TensorShape(dims.Select(x => (int)x).ToArray());
}
Expand Down Expand Up @@ -1040,49 +1041,11 @@ public Tensor GetBufferedBatchTensor()
}
else
{
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape);
_position = 0;
return tensor;
}
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(float))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_bufferedData.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
}
}

private class TensorValueGetterVec<T> : ITensorValueGetter
Expand Down Expand Up @@ -1126,45 +1089,7 @@ public Tensor GetTensor()
// This is done to reduce memory allocation every time tensor is created.
_denseData = new T[_vBuffer.Length];
_vBuffer.CopyTo(_denseData);
return CastDataAndReturnAsTensor(_denseData);
}

private Tensor CastDataAndReturnAsTensor(T[] data)
{
if (typeof(T) == typeof(sbyte))
return new Tensor((sbyte[])(object)data, _dims, TF_DataType.TF_INT8);
else if (typeof(T) == typeof(long))
return new Tensor((long[])(object)data, _dims, TF_DataType.TF_INT64);
else if (typeof(T) == typeof(Int32))
return new Tensor((Int32[])(object)data, _dims, TF_DataType.TF_INT32);
else if (typeof(T) == typeof(Int16))
return new Tensor((Int16[])(object)data, _dims, TF_DataType.TF_INT16);
else if (typeof(T) == typeof(byte))
return new Tensor((byte[])(object)data, _dims, TF_DataType.TF_UINT8);
else if (typeof(T) == typeof(ulong))
return new Tensor((ulong[])(object)data, _dims, TF_DataType.TF_UINT64);
else if (typeof(T) == typeof(UInt32))
return new Tensor((UInt32[])(object)data, _dims, TF_DataType.TF_UINT32);
else if (typeof(T) == typeof(UInt16))
return new Tensor((UInt16[])(object)data, _dims, TF_DataType.TF_UINT16);
else if (typeof(T) == typeof(bool))
return new Tensor((bool[])(object)data, _dims, TF_DataType.TF_BOOL);
else if (typeof(T) == typeof(float))
return new Tensor((float[])(object)data, _dims, TF_DataType.TF_FLOAT);
else if (typeof(T) == typeof(double))
return new Tensor((double[])(object)data, _dims, TF_DataType.TF_DOUBLE);
else if (typeof(T) == typeof(ReadOnlyMemory<char>))
{
byte[][] bytes = new byte[_vBuffer.Length][];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = Encoding.UTF8.GetBytes(((ReadOnlyMemory<char>)(object)data[i]).ToArray());
}

return new Tensor(bytes, _tfShape.dims.Select(x => (long)x).ToArray());
}

return new Tensor(new NDArray(data, _tfShape));
return TensorFlowUtils.CastDataAndReturnAsTensor(_denseData, _tfShape);
}

public void BufferTrainingData()
Expand All @@ -1177,7 +1102,7 @@ public void BufferTrainingData()
public Tensor GetBufferedBatchTensor()
{
_position = 0;
var tensor = CastDataAndReturnAsTensor(_bufferedData);
var tensor = TensorFlowUtils.CastDataAndReturnAsTensor(_bufferedData, _tfShape);
_bufferedData = new T[_bufferedDataSize];
return tensor;
}
Expand Down
Loading

0 comments on commit afba0bd

Please sign in to comment.