DotNet in Arize for LLM Observability

3 weeks ago 29
ARTICLE AD BOX

I tried to implement a simple LLM observability using Arize.ai and dotnet using this code and even though I can get the results on my console but it does not reflect on the Arize trace page, what to do? I tried implementing in python it went smooth and after snooping through the official docs of Arize I couldnt find anything related to .net just python and typescript.
Does arize.ai doesnt support .net or am I missing something?

using System; using System.Collections.Generic; using System.Diagnostics; using System.Net.Http; using System.Text; using System.Text.Json; using System.Threading.Tasks; using OpenTelemetry; using OpenTelemetry.Trace; using OpenTelemetry.Resources; using OpenTelemetry.Exporter; public class Program { private static readonly HttpClient httpClient = new HttpClient(); public static async Task Main(string[] args) { // Configuration string space_id = "********************"; string api_key = Environment.GetEnvironmentVariable("ARIZE_API_KEY"); string openaiApiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY"); string serviceName = "kovai-llm-observability"; string projectName = "llm-deotnet-app"; string endpoint = "https://otlp.arize.com"; Console.WriteLine("Starting LLM Observability Demo"); Console.WriteLine($"Project: {projectName}"); Console.WriteLine($"Endpoint: {endpoint}\n"); // Initialize OpenTelemetry var tracerProvider = Sdk.CreateTracerProviderBuilder() .AddSource(serviceName) .SetResourceBuilder(ResourceBuilder.CreateDefault() .AddService(projectName) .AddTelemetrySdk() .AddAttributes(new Dictionary<string, object> { ["openinference.project.name"] = projectName, ["model_version"] = "v1" })) .AddOtlpExporter("trace", opt => { opt.Endpoint = new Uri(endpoint); opt.Protocol = OtlpExportProtocol.Grpc; opt.Headers = $"space_id={space_id},api_key={api_key},te=trailers"; opt.ExportProcessorType = ExportProcessorType.Batch; }) .Build(); var tracer = tracerProvider.GetTracer(serviceName); // Run workflow await RunLLMWorkflow(tracer, openaiApiKey); // Flush traces Console.WriteLine("\nFlushing traces to Arize..."); var flushResult = tracerProvider.ForceFlush(10000); Console.WriteLine($"Flush result: {flushResult}"); tracerProvider.Dispose(); Console.WriteLine($"\nView traces at: https://app.arize.com\n"); } static async Task RunLLMWorkflow(Tracer tracer, string openaiApiKey) { // Start workflow span using var workflowSpan = tracer.StartActiveSpan("llm_workflow"); var traceId = Activity.Current?.TraceId.ToString(); var spanId = Activity.Current?.SpanId.ToString(); Console.WriteLine("=== Workflow Started ==="); Console.WriteLine($"Trace ID: {traceId}"); Console.WriteLine($"Span ID: {spanId}"); Console.WriteLine($"Span Type: CHAIN\n"); workflowSpan.SetAttribute("openinference.span.kind", "CHAIN"); workflowSpan.SetAttribute("user.id", "demo_user"); // Step 1: Preprocessing Console.WriteLine("Step 1: Preprocessing"); using (var prepSpan = tracer.StartActiveSpan("preprocessing")) { Console.WriteLine($" Span ID: {Activity.Current?.SpanId}"); Console.WriteLine($" Input: Who is the 2030 President of UK?"); prepSpan.SetAttribute("input.value", "Who is the 2030 President of UK?"); await Task.Delay(50); prepSpan.SetStatus(Status.Ok); Console.WriteLine($" Status: OK\n"); } // Step 2: OpenAI Call Console.WriteLine("Step 2: OpenAI LLM Call"); await CallOpenAI(tracer, openaiApiKey); // Step 3: Postprocessing Console.WriteLine("\nStep 3: Postprocessing"); using (var postSpan = tracer.StartActiveSpan("postprocessing")) { Console.WriteLine($" Span ID: {Activity.Current?.SpanId}"); Console.WriteLine($" Output: Processed response"); postSpan.SetAttribute("output.value", "Processed response"); await Task.Delay(50); postSpan.SetStatus(Status.Ok); Console.WriteLine($" Status: OK\n"); } workflowSpan.SetStatus(Status.Ok); Console.WriteLine("=== Workflow Complete ===\n"); } static async Task CallOpenAI(Tracer tracer, string apiKey) { var prompt = "Who is the 2030 President of UK?"; // Start LLM span using var llmSpan = tracer.StartActiveSpan("openai_chat_completion"); Console.WriteLine($" Span ID: {Activity.Current?.SpanId}"); Console.WriteLine($" Span Type: LLM"); Console.WriteLine($" Model: gpt-4"); Console.WriteLine($" Provider: openai"); Console.WriteLine($" Prompt: {prompt}"); // Set LLM attributes llmSpan.SetAttribute("openinference.span.kind", "LLM"); llmSpan.SetAttribute("llm.model_name", "gpt-4"); llmSpan.SetAttribute("llm.provider", "openai"); llmSpan.SetAttribute("llm.input_messages.0.message.role", "user"); llmSpan.SetAttribute("llm.input_messages.0.message.content", prompt); // Prepare request var request = new { model = "gpt-4", messages = new[] { new { role = "user", content = prompt } }, temperature = 0.7, max_tokens = 150 }; var json = JsonSerializer.Serialize(request); var content = new StringContent(json, Encoding.UTF8, "application/json"); httpClient.DefaultRequestHeaders.Clear(); httpClient.DefaultRequestHeaders.Add("Authorization", $"Bearer {apiKey}"); // Call API Console.WriteLine(" Calling API..."); var startTime = DateTime.UtcNow; var response = await httpClient.PostAsync("https://api.openai.com/v1/chat/completions", content); var latency = (DateTime.UtcNow - startTime).TotalMilliseconds; // Parse response var responseBody = await response.Content.ReadAsStringAsync(); var result = JsonSerializer.Deserialize<JsonElement>(responseBody); var completion = result.GetProperty("choices")[0].GetProperty("message").GetProperty("content").GetString(); var totalTokens = result.GetProperty("usage").GetProperty("total_tokens").GetInt32(); var promptTokens = result.GetProperty("usage").GetProperty("prompt_tokens").GetInt32(); var completionTokens = result.GetProperty("usage").GetProperty("completion_tokens").GetInt32(); // Set output attributes llmSpan.SetAttribute("llm.output_messages.0.message.content", completion); llmSpan.SetAttribute("llm.token_count.total", totalTokens); llmSpan.SetAttribute("llm.token_count.prompt", promptTokens); llmSpan.SetAttribute("llm.token_count.completion", completionTokens); llmSpan.SetAttribute("llm.response.latency_ms", latency); llmSpan.SetStatus(Status.Ok); // Print results Console.WriteLine($"\n Response: {completion}"); Console.WriteLine($" Latency: {latency:F0}ms"); Console.WriteLine($" Total Tokens: {totalTokens}"); Console.WriteLine($" Prompt Tokens: {promptTokens}"); Console.WriteLine($" Completion Tokens: {completionTokens}"); Console.WriteLine($" Status: OK"); } }
Read Entire Article