#!/usr/bin/env ruby
# uncloseai. - Ruby Client using Official OpenAI SDK
# A Ruby client for OpenAI-compatible APIs with streaming support

require 'openai'
require 'net/http'
require 'json'

puts "=== uncloseai. Ruby Client (Official OpenAI SDK) ===\n\n"

# Discover endpoints from environment variables
model_endpoint_1 = ENV['MODEL_ENDPOINT_1']
model_endpoint_2 = ENV['MODEL_ENDPOINT_2']
tts_endpoint_1 = ENV['TTS_ENDPOINT_1']

if !model_endpoint_1 || !model_endpoint_2 || !tts_endpoint_1
  puts "ERROR: No models discovered. Set environment variables:"
  puts "  MODEL_ENDPOINT_1, MODEL_ENDPOINT_2, TTS_ENDPOINT_1"
  exit 1
end

# Discover models from endpoint 1
puts "Discovering models from #{model_endpoint_1}..."
uri = URI("#{model_endpoint_1}/models")
response = Net::HTTP.get_response(uri)
models_1 = JSON.parse(response.body)['data']
model_1_id = models_1.first['id']
puts "Model 1: #{model_1_id}\n\n"

# Discover models from endpoint 2
puts "Discovering models from #{model_endpoint_2}..."
uri = URI("#{model_endpoint_2}/models")
response = Net::HTTP.get_response(uri)
models_2 = JSON.parse(response.body)['data']
model_2_id = models_2.first['id']
puts "Model 2: #{model_2_id}\n\n"

# Non-streaming chat with Model 1
puts "=== Non-Streaming Chat (Model 1) ==="
client_1 = OpenAI::Client.new(
  api_key: "dummy-key",
  base_url: model_endpoint_1
)

response = client_1.chat.completions.create(
  model: model_1_id,
  messages: [
    { role: "user", content: "Give a Python Fizzbuzz solution in one line of code?" }
  ],
  temperature: 0.5,
  max_tokens: 150
)

puts "Response: #{response.choices[0].message.content}\n\n"

# Streaming chat with Model 2
puts "=== Streaming Chat (Model 2) ==="
client_2 = OpenAI::Client.new(
  api_key: "dummy-key",
  base_url: model_endpoint_2
)

print "Response: "
stream = client_2.chat.completions.stream_raw(
  model: model_2_id,
  messages: [
    { role: "user", content: "Give a Python Fizzbuzz solution in one line of code?" }
  ],
  temperature: 0.5,
  max_tokens: 150
)
stream.each do |chunk|
  content = chunk.choices&.first&.delta&.content
  print content if content
end
puts "\n\n"

# TTS example
puts "=== TTS Speech Generation ==="
tts_client = OpenAI::Client.new(
  api_key: "dummy-key",
  base_url: tts_endpoint_1
)

begin
  response = tts_client.audio.speech.create(
    model: "tts-1",
    voice: "alloy",
    input: "I think so therefore, Today is a wonderful day to grow something people love!",
    speed: 0.9
  )

  # Response is a StringIO object, need to read its content
  audio_content = response.read
  File.binwrite("speech.mp3", audio_content)
  puts "[OK] Speech file created: speech.mp3 (#{audio_content.bytesize} bytes)\n\n"
rescue => e
  puts "[ERROR] TTS failed: #{e.class} - #{e.message}\n\n"
end

puts "=== Examples Complete ==="
