response_turn.hpp file
Namespaces
- namespace nxt
- namespace nxt::ai
- namespace nxt::ai::response_turn
Typedefs
- using function_call = tools::function_call
- using function_tool = tools::function_tool
- using llm_request = responses::openai_responses_request
- using output_item_result = agent::output_item_result
- using response_stream_result = agent::response_stream_result
- using stream_event = responses::stream_event
Functions
-
void for_complete_words(std::
string& text, bool finish, auto fn) -
auto openai_responses_endpoint() → nxt::
io:: net:: endpoint -
template <typename Stream>auto read_message_item(Stream& stream, nxt::
ui:: yard& self) → nxt:: task<std:: optional<nlohmann::json>> -
template <typename Stream>auto read_openai_response_stream(nxt::
ui:: yard& self, Stream& stream) → nxt:: task<response_stream_result> -
template <typename Stream>auto read_output_item(Stream& stream, nxt::
ui:: yard& self, const stream_event& first) → nxt:: task<output_item_result> -
template <typename Stream>auto read_reasoning_item(Stream& stream, nxt::
ui:: yard& self) → nxt:: task<std:: optional<nlohmann::json>> -
template <typename Stream>auto read_text_delta_item(Stream& stream, nxt::
ui:: yard& self, std:: string_view delta_event_type, tui::Style style, auto on_delta) → nxt:: task<std:: optional<nlohmann::json>> -
auto request_response_turn(nxt::
ui:: yard& self, const llm_request& request) → nxt:: task<response_stream_result> -
template <typename Stream>auto response_event_source(Stream& stream) → auto
-
auto run_requested_tools(nxt::
ui:: yard& self, const std:: vector<function_tool>& tool_list, const std:: vector<function_call>& calls) → nxt:: task<std:: vector<nlohmann::json>> -
auto stream_wrap_width(nxt::
ui:: yard& self) → std:: size_t -
template <typename ReadStream>auto with_openai_response_stream(nxt::
ui:: yard& self, const llm_request& request, ReadStream read_stream) → nxt:: task<response_stream_result>