#!/usr/bin/env perl
# UncloseAI - Perl client for OpenAI-compatible APIs with streaming support

use strict;
use warnings;
use LWP::UserAgent;
use JSON;
use HTTP::Request;

package UncloseAI;

sub new {
    my ($class, %args) = @_;

    my $self = {
        models => [],
        tts_endpoints => [],
        api_key => $args{api_key},
        timeout => $args{timeout} // 30,
        debug => $args{debug} // 0,
        ua => LWP::UserAgent->new(timeout => $args{timeout} // 30),
        json => JSON->new->utf8
    };

    bless $self, $class;

    my $endpoints = $args{endpoints} // $self->_discover_endpoints_from_env('MODEL_ENDPOINT');
    my $tts_endpoints = $args{tts_endpoints} // $self->_discover_endpoints_from_env('TTS_ENDPOINT');

    print "[DEBUG] Initialized with " . scalar(@$endpoints) . " endpoint(s)\n" if $self->{debug};

    $self->_discover_models($endpoints);
    $self->{tts_endpoints} = $tts_endpoints;

    return $self;
}

sub list_models {
    my ($self) = @_;
    return $self->{models};
}

sub chat {
    my ($self, $messages, %options) = @_;

    my $model_info = $self->_resolve_model($options{model});

    my $payload = {
        model => $model_info->{id},
        messages => $messages,
        max_tokens => $options{max_tokens} // 100,
        temperature => $options{temperature} // 0.7
    };

    my $response = $self->_http_request($model_info->{endpoint} . '/chat/completions', 'POST', $payload);
    return $self->{json}->decode($response);
}

sub chat_stream {
    my ($self, $messages, $callback, %options) = @_;

    my $model_info = $self->_resolve_model($options{model});

    my $payload = {
        model => $model_info->{id},
        messages => $messages,
        max_tokens => $options{max_tokens} // 500,
        temperature => $options{temperature} // 0.7,
        stream => JSON::true
    };

    my $url = $model_info->{endpoint} . '/chat/completions';
    my $request = HTTP::Request->new('POST', $url);
    $request->header('Content-Type' => 'application/json');
    $request->header('Authorization' => "Bearer $self->{api_key}") if $self->{api_key};
    $request->content($self->{json}->encode($payload));

    my $buffer = '';
    $self->{ua}->request($request, sub {
        my ($chunk, $response) = @_;
        $buffer .= $chunk;

        # Process complete lines ending with newline
        while ($buffer =~ s/^(.*?)\n//) {
            my $line = $1;
            next if $line =~ /^\s*$/;  # Skip empty lines

            if ($line =~ /^data:\s*(.*)$/) {
                my $data = $1;
                last if $data eq '[DONE]';
                next if $data =~ /^\s*$/;  # Skip empty data

                eval {
                    my $parsed = $self->{json}->decode($data);
                    $callback->($parsed);
                };
                if ($@) {
                    print "[DEBUG] Parse error in line: $line\n" if $self->{debug};
                    print "[DEBUG] Error: $@\n" if $self->{debug};
                }
            }
        }
    });
}

sub tts {
    my ($self, $text, %options) = @_;

    die 'No TTS endpoints available' unless @{$self->{tts_endpoints}};

    my $payload = {
        model => $options{model} // 'tts-1',
        voice => $options{voice} // 'alloy',
        input => $text
    };

    return $self->_http_request($self->{tts_endpoints}[0] . '/audio/speech', 'POST', $payload);
}

sub _discover_endpoints_from_env {
    my ($self, $prefix) = @_;
    my @endpoints;

    for (my $i = 1; $i < 10000; $i++) {
        my $endpoint = $ENV{"${prefix}_$i"};
        last unless $endpoint;
        push @endpoints, $endpoint;
    }

    return \@endpoints;
}

sub _discover_models {
    my ($self, $endpoints) = @_;

    foreach my $endpoint (@$endpoints) {
        print "[DEBUG] Discovering from: $endpoint\n" if $self->{debug};

        eval {
            my $response = $self->_http_request("$endpoint/models", 'GET');
            my $data = $self->{json}->decode($response);

            foreach my $model (@{$data->{data}}) {
                push @{$self->{models}}, {
                    id => $model->{id},
                    endpoint => $endpoint,
                    max_tokens => $model->{max_model_len} // 8192
                };

                print "[DEBUG] Discovered: $model->{id}\n" if $self->{debug};
            }
        };
        if ($@) {
            print "[DEBUG] Error: $@\n" if $self->{debug};
        }
    }
}

sub _resolve_model {
    my ($self, $model) = @_;

    die 'No models available' unless @{$self->{models}};

    return $self->{models}[0] unless $model;

    foreach my $m (@{$self->{models}}) {
        return $m if $m->{id} eq $model;
    }

    die "Model '$model' not found";
}

sub _http_request {
    my ($self, $url, $method, $payload) = @_;
    $method //= 'GET';

    my $request = HTTP::Request->new($method => $url);

    if ($method eq 'POST' && $payload) {
        $request->header('Content-Type' => 'application/json');
        $request->content($self->{json}->encode($payload));
    }

    $request->header('Authorization' => "Bearer $self->{api_key}") if $self->{api_key};

    my $response = $self->{ua}->request($request);

    die $response->status_line unless $response->is_success;

    return $response->content;
}

# Demo when run as script
package main;

if (!caller) {
    print "=== UncloseAI Perl Client (with Streaming) ===\n\n";

    my $client = UncloseAI->new(debug => 1);

    if (@{$client->list_models()} == 0) {
        print "ERROR: No models discovered. Set environment variables:\n";
        print "  MODEL_ENDPOINT_1, MODEL_ENDPOINT_2, etc.\n";
        exit 1;
    }

    my $models = $client->list_models();
    print "\nDiscovered " . scalar(@$models) . " model(s):\n";
    foreach my $m (@$models) {
        print "  - $m->{id} (max_tokens: $m->{max_tokens})\n";
    }
    print "\n";

    # Non-streaming chat
    print "=== Non-Streaming Chat ===\n";
    my $response = $client->chat([
        { role => 'system', content => 'You are a helpful AI assistant.' },
        { role => 'user', content => 'Explain quantum computing in one sentence.' }
    ]);
    print "Response: " . $response->{choices}[0]{message}{content} . "\n\n";

    # Streaming chat
    print "=== Streaming Chat ===\n";
    my $model_id = @$models > 1 ? $models->[1]{id} : undef;
    print "Model: " . ($model_id // $models->[0]{id}) . "\n";
    print "Response: ";

    $client->chat_stream([
        { role => 'system', content => 'You are a coding assistant.' },
        { role => 'user', content => 'Write a Perl function to check if a number is prime' }
    ], sub {
        my ($chunk) = @_;
        my $content = $chunk->{choices}[0]{delta}{content};
        print $content if $content;
    }, model => $model_id, max_tokens => 200);

    print "\n\n";

    # TTS
    if (@{$client->{tts_endpoints}} > 0) {
        print "=== TTS Speech Generation ===\n";
        eval {
            my $audio_data = $client->tts('Hello from UncloseAI Perl client! This demonstrates streaming support.');
            open(my $fh, '>', 'speech.mp3') or die "Cannot open file: $!";
            binmode($fh);
            print $fh $audio_data;
            close($fh);
            print "[OK] Speech file created: speech.mp3 (" . length($audio_data) . " bytes)\n\n";
        };
        if ($@) {
            print "[ERROR] TTS Error: $@\n\n";
        }
    }

    print "=== Examples Complete ===\n";
}
