I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,109 @@
using Godot;
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using System.Collections.Generic;
using System.Linq;
namespace GodotONNX
{
/// <include file='docs/ONNXInference.xml' path='docs/members[@name="ONNXInference"]/ONNXInference/*'/>
public partial class ONNXInference : GodotObject
{
private InferenceSession session;
/// <summary>
/// Path to the ONNX model. Use Initialize to change it.
/// </summary>
private string modelPath;
private int batchSize;
private SessionOptions SessionOpt;
/// <summary>
/// init function
/// </summary>
/// <param name="Path"></param>
/// <param name="BatchSize"></param>
/// <returns>Returns the output size of the model</returns>
public int Initialize(string Path, int BatchSize)
{
modelPath = Path;
batchSize = BatchSize;
SessionOpt = SessionConfigurator.MakeConfiguredSessionOptions();
session = LoadModel(modelPath);
return session.OutputMetadata["output"].Dimensions[1];
}
/// <include file='docs/ONNXInference.xml' path='docs/members[@name="ONNXInference"]/Run/*'/>
public Godot.Collections.Dictionary<string, Godot.Collections.Array<float>> RunInference(Godot.Collections.Array<float> obs, int state_ins)
{
//Current model: Any (Godot Rl Agents)
//Expects a tensor of shape [batch_size, input_size] type float named obs and a tensor of shape [batch_size] type float named state_ins
//Fill the input tensors
// create span from inputSize
var span = new float[obs.Count]; //There's probably a better way to do this
for (int i = 0; i < obs.Count; i++)
{
span[i] = obs[i];
}
IReadOnlyCollection<NamedOnnxValue> inputs = new List<NamedOnnxValue>
{
NamedOnnxValue.CreateFromTensor("obs", new DenseTensor<float>(span, new int[] { batchSize, obs.Count })),
NamedOnnxValue.CreateFromTensor("state_ins", new DenseTensor<float>(new float[] { state_ins }, new int[] { batchSize }))
};
IReadOnlyCollection<string> outputNames = new List<string> { "output", "state_outs" }; //ONNX is sensible to these names, as well as the input names
IDisposableReadOnlyCollection<DisposableNamedOnnxValue> results;
//We do not use "using" here so we get a better exception explaination later
try
{
results = session.Run(inputs, outputNames);
}
catch (OnnxRuntimeException e)
{
//This error usually means that the model is not compatible with the input, beacause of the input shape (size)
GD.Print("Error at inference: ", e);
return null;
}
//Can't convert IEnumerable<float> to Variant, so we have to convert it to an array or something
Godot.Collections.Dictionary<string, Godot.Collections.Array<float>> output = new Godot.Collections.Dictionary<string, Godot.Collections.Array<float>>();
DisposableNamedOnnxValue output1 = results.First();
DisposableNamedOnnxValue output2 = results.Last();
Godot.Collections.Array<float> output1Array = new Godot.Collections.Array<float>();
Godot.Collections.Array<float> output2Array = new Godot.Collections.Array<float>();
foreach (float f in output1.AsEnumerable<float>())
{
output1Array.Add(f);
}
foreach (float f in output2.AsEnumerable<float>())
{
output2Array.Add(f);
}
output.Add(output1.Name, output1Array);
output.Add(output2.Name, output2Array);
//Output is a dictionary of arrays, ex: { "output" : [0.1, 0.2, 0.3, 0.4, ...], "state_outs" : [0.5, ...]}
results.Dispose();
return output;
}
/// <include file='docs/ONNXInference.xml' path='docs/members[@name="ONNXInference"]/Load/*'/>
public InferenceSession LoadModel(string Path)
{
using Godot.FileAccess file = FileAccess.Open(Path, Godot.FileAccess.ModeFlags.Read);
byte[] model = file.GetBuffer((int)file.GetLength());
//file.Close(); file.Dispose(); //Close the file, then dispose the reference.
return new InferenceSession(model, SessionOpt); //Load the model
}
public void FreeDisposables()
{
session.Dispose();
SessionOpt.Dispose();
}
}
}

View File

@ -0,0 +1,131 @@
using Godot;
using Microsoft.ML.OnnxRuntime;
namespace GodotONNX
{
/// <include file='docs/SessionConfigurator.xml' path='docs/members[@name="SessionConfigurator"]/SessionConfigurator/*'/>
public static class SessionConfigurator
{
public enum ComputeName
{
CUDA,
ROCm,
DirectML,
CoreML,
CPU
}
/// <include file='docs/SessionConfigurator.xml' path='docs/members[@name="SessionConfigurator"]/GetSessionOptions/*'/>
public static SessionOptions MakeConfiguredSessionOptions()
{
SessionOptions sessionOptions = new();
SetOptions(sessionOptions);
return sessionOptions;
}
private static void SetOptions(SessionOptions sessionOptions)
{
sessionOptions.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_WARNING;
ApplySystemSpecificOptions(sessionOptions);
}
/// <include file='docs/SessionConfigurator.xml' path='docs/members[@name="SessionConfigurator"]/SystemCheck/*'/>
static public void ApplySystemSpecificOptions(SessionOptions sessionOptions)
{
//Most code for this function is verbose only, the only reason it exists is to track
//implementation progress of the different compute APIs.
//December 2022: CUDA is not working.
string OSName = OS.GetName(); //Get OS Name
//ComputeName ComputeAPI = ComputeCheck(); //Get Compute API
// //TODO: Get CPU architecture
//Linux can use OpenVINO (C#) on x64 and ROCm on x86 (GDNative/C++)
//Windows can use OpenVINO (C#) on x64
//TODO: try TensorRT instead of CUDA
//TODO: Use OpenVINO for Intel Graphics
// Temporarily using CPU on all platforms to avoid errors detected with DML
ComputeName ComputeAPI = ComputeName.CPU;
//match OS and Compute API
GD.Print($"OS: {OSName} Compute API: {ComputeAPI}");
// CPU is set by default without appending necessary
// sessionOptions.AppendExecutionProvider_CPU(0);
/*
switch (OSName)
{
case "Windows": //Can use CUDA, DirectML
if (ComputeAPI is ComputeName.CUDA)
{
//CUDA
//sessionOptions.AppendExecutionProvider_CUDA(0);
//sessionOptions.AppendExecutionProvider_DML(0);
}
else if (ComputeAPI is ComputeName.DirectML)
{
//DirectML
//sessionOptions.AppendExecutionProvider_DML(0);
}
break;
case "X11": //Can use CUDA, ROCm
if (ComputeAPI is ComputeName.CUDA)
{
//CUDA
//sessionOptions.AppendExecutionProvider_CUDA(0);
}
if (ComputeAPI is ComputeName.ROCm)
{
//ROCm, only works on x86
//Research indicates that this has to be compiled as a GDNative plugin
//GD.Print("ROCm not supported yet, using CPU.");
//sessionOptions.AppendExecutionProvider_CPU(0);
}
break;
case "macOS": //Can use CoreML
if (ComputeAPI is ComputeName.CoreML)
{ //CoreML
//TODO: Needs testing
//sessionOptions.AppendExecutionProvider_CoreML(0);
//CoreML on ARM64, out of the box, on x64 needs .tar file from GitHub
}
break;
default:
GD.Print("OS not Supported.");
break;
}
*/
}
/// <include file='docs/SessionConfigurator.xml' path='docs/members[@name="SessionConfigurator"]/ComputeCheck/*'/>
public static ComputeName ComputeCheck()
{
string adapterName = Godot.RenderingServer.GetVideoAdapterName();
//string adapterVendor = Godot.RenderingServer.GetVideoAdapterVendor();
adapterName = adapterName.ToUpper(new System.Globalization.CultureInfo(""));
//TODO: GPU vendors for MacOS, what do they even use these days?
if (adapterName.Contains("INTEL"))
{
return ComputeName.DirectML;
}
if (adapterName.Contains("AMD") || adapterName.Contains("RADEON"))
{
return ComputeName.DirectML;
}
if (adapterName.Contains("NVIDIA"))
{
return ComputeName.CUDA;
}
GD.Print("Graphics Card not recognized."); //Should use CPU
return ComputeName.CPU;
}
}
}

View File

@ -0,0 +1,31 @@
<docs>
<members name="ONNXInference">
<ONNXInference>
<summary>
The main <c>ONNXInference</c> Class that handles the inference process.
</summary>
</ONNXInference>
<Initialize>
<summary>
Starts the inference process.
</summary>
<param name="Path">Path to the ONNX model, expects a path inside resources.</param>
<param name="BatchSize">How many observations will the model recieve.</param>
</Initialize>
<Run>
<summary>
Runs the given input through the model and returns the output.
</summary>
<param name="obs">Dictionary containing all observations.</param>
<param name="state_ins">How many different agents are creating these observations.</param>
<returns>A Dictionary of arrays, containing instructions based on the observations.</returns>
</Run>
<Load>
<summary>
Loads the given model into the inference process, using the best Execution provider available.
</summary>
<param name="Path">Path to the ONNX model, expects a path inside resources.</param>
<returns>InferenceSession ready to run.</returns>
</Load>
</members>
</docs>

View File

@ -0,0 +1,29 @@
<docs>
<members name="SessionConfigurator">
<SessionConfigurator>
<summary>
The main <c>SessionConfigurator</c> Class that handles the execution options and providers for the inference process.
</summary>
</SessionConfigurator>
<GetSessionOptions>
<summary>
Creates a SessionOptions with all available execution providers.
</summary>
<returns>SessionOptions with all available execution providers.</returns>
</GetSessionOptions>
<SystemCheck>
<summary>
Appends any execution provider available in the current system.
</summary>
<remarks>
This function is mainly verbose for tracking implementation progress of different compute APIs.
</remarks>
</SystemCheck>
<ComputeCheck>
<summary>
Checks for available GPUs.
</summary>
<returns>An integer identifier for each compute platform.</returns>
</ComputeCheck>
</members>
</docs>