complete method

  1. @override
Future<AIResponse> complete(
  1. List<AIMessage> messages, {
  2. int? maxTokens,
  3. double? temperature,
  4. List<AITool>? tools,
})

Sends a completion request and returns the full response.

Implementation

@override
Future<AIResponse> complete(
  List<AIMessage> messages, {
  int? maxTokens,
  double? temperature,
  List<AITool>? tools,
}) async {
  final stopwatch = Stopwatch()..start();

  final body =
      _buildRequestBody(messages, maxTokens, temperature, tools: tools);
  final response = await _post('/chat/completions', body);
  stopwatch.stop();

  final json = jsonDecode(response.body) as Map<String, dynamic>;

  if (response.statusCode != 200) {
    throw _parseError(response.statusCode, json);
  }

  final choice = (json['choices'] as List).first as Map<String, dynamic>;
  final message = choice['message'] as Map<String, dynamic>;
  final usage = json['usage'] as Map<String, dynamic>;

  List<AIToolCall>? toolCalls;
  if (message.containsKey('tool_calls')) {
    final tCalls = message['tool_calls'] as List;
    toolCalls = tCalls
        .map((t) => AIToolCall(
              id: t['id'] as String,
              name: t['function']['name'] as String,
              arguments: jsonDecode(t['function']['arguments'] as String)
                  as Map<String, dynamic>,
            ))
        .toList();
  }

  return AIResponse(
    content: message['content'] as String? ?? '',
    usage: AIUsage(
      promptTokens: usage['prompt_tokens'] as int? ?? 0,
      completionTokens: usage['completion_tokens'] as int? ?? 0,
      estimatedCostUsd: _estimateCost(
        usage['prompt_tokens'] as int? ?? 0,
        usage['completion_tokens'] as int? ?? 0,
      ),
    ),
    model: json['model'] as String? ?? config.model,
    provider: name,
    latency: stopwatch.elapsed,
    finishReason: choice['finish_reason'] as String?,
    toolCalls: toolCalls,
  );
}