response_turn namespace
#include <nxtai/response_turn.hpp>
Typedefs
-
using function_call = tools::
function_call -
using function_tool = tools::
function_tool -
using llm_request = responses::
openai_responses_request -
using output_item_result = agent::
output_item_result -
using response_stream_result = agent::
response_stream_result -
using stream_event = responses::
stream_event
Functions
-
void for_complete_words(std::
string& text, bool finish, auto fn) -
auto openai_responses_endpoint() → nxt::
io:: net:: endpoint -
template <typename Stream>auto read_message_item(Stream& stream, nxt::
ui:: yard& self) → nxt:: task<std:: optional<nlohmann::json>> -
template <typename Stream>auto read_openai_response_stream(nxt::
ui:: yard& self, Stream& stream) → nxt:: task<response_ stream_ result> -
template <typename Stream>auto read_output_item(Stream& stream, nxt::
ui:: yard& self, const stream_ event& first) → nxt:: task<output_ item_ result> -
template <typename Stream>auto read_reasoning_item(Stream& stream, nxt::
ui:: yard& self) → nxt:: task<std:: optional<nlohmann::json>> -
template <typename Stream>auto read_text_delta_item(Stream& stream, nxt::
ui:: yard& self, std:: string_view delta_event_type, tui:: Style style, auto on_delta) → nxt:: task<std:: optional<nlohmann::json>> -
auto request_response_turn(nxt::
ui:: yard& self, const llm_ request& request) → nxt:: task<response_ stream_ result> -
template <typename Stream>auto response_event_source(Stream& stream) → auto
-
auto run_requested_tools(nxt::
ui:: yard& self, const std:: vector<function_ tool>& tool_list, const std:: vector<function_ call>& calls) → nxt:: task<std:: vector<nlohmann::json>> -
auto stream_wrap_width(nxt::
ui:: yard& self) → std:: size_t -
template <typename ReadStream>auto with_openai_response_stream(nxt::
ui:: yard& self, const llm_ request& request, ReadStream read_stream) → nxt:: task<response_ stream_ result>