diff --git a/CMakeLists.txt b/CMakeLists.txt index 1551d0a7..51725539 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -168,6 +168,18 @@ if (ENABLE_VOICE) target_link_libraries(abaddon ${CMAKE_DL_LIBS}) + # FFmpeg for video support + find_package(PkgConfig QUIET) + pkg_check_modules(FFMPEG QUIET libavcodec libavformat libavdevice libswscale libavutil) + if (FFMPEG_FOUND) + target_compile_definitions(abaddon PRIVATE WITH_VIDEO) + target_include_directories(abaddon PUBLIC ${FFMPEG_INCLUDE_DIRS}) + target_link_libraries(abaddon ${FFMPEG_LIBRARIES}) + target_link_directories(abaddon PUBLIC ${FFMPEG_LIBRARY_DIRS}) + else() + message(WARNING "FFmpeg not found - video calling and screen sharing will be disabled") + endif() + if (ENABLE_RNNOISE) target_compile_definitions(abaddon PRIVATE WITH_RNNOISE) @@ -227,4 +239,3 @@ install(TARGETS abaddon RUNTIME) install(DIRECTORY res/css DESTINATION ${ABADDON_RESOURCE_DIR}) install(DIRECTORY res/fonts DESTINATION ${ABADDON_RESOURCE_DIR}) install(DIRECTORY res/res DESTINATION ${ABADDON_RESOURCE_DIR}) - diff --git a/res/emojis.db b/res/emojis.db new file mode 100644 index 00000000..e69de29b diff --git a/src/abaddon.cpp b/src/abaddon.cpp index 337bd5d5..31a1a637 100644 --- a/src/abaddon.cpp +++ b/src/abaddon.cpp @@ -15,6 +15,7 @@ #include "dialogs/friendpicker.hpp" #include "dialogs/verificationgate.hpp" #include "dialogs/textinput.hpp" +#include "dialogs/call.hpp" #include "windows/guildsettingswindow.hpp" #include "windows/profilewindow.hpp" #include "windows/pinnedwindow.hpp" @@ -85,6 +86,7 @@ Abaddon::Abaddon() spdlog::get("voice")->debug("{} SSRC: {}", m.UserID, m.SSRC); m_audio.AddSSRC(m.SSRC); }); + m_discord.signal_call_create().connect(sigc::mem_fun(*this, &Abaddon::ActionCallCreate)); #endif m_discord.signal_channel_accessibility_changed().connect([this](Snowflake id, bool accessible) { @@ -320,6 +322,10 @@ int Abaddon::StartGTK() { m_main_window->GetChatWindow()->signal_action_reaction_add().connect(sigc::mem_fun(*this, &Abaddon::ActionReactionAdd)); m_main_window->GetChatWindow()->signal_action_reaction_remove().connect(sigc::mem_fun(*this, &Abaddon::ActionReactionRemove)); +#ifdef WITH_VOICE + m_main_window->GetChatWindow()->signal_action_start_call().connect(sigc::mem_fun(*this, &Abaddon::ActionStartCall)); +#endif + ActionReloadCSS(); AttachCSSMonitor(); @@ -494,8 +500,17 @@ void Abaddon::OnVoiceConnected() { void Abaddon::OnVoiceDisconnected() { m_audio.StopCaptureDevice(); m_audio.RemoveAllSSRCs(); + + // Don't close the voice window if we're still in a voice channel + // This handles the case where we reconnect (e.g., for screen share mode change) + // The window should only close when we actually leave the channel if (m_voice_window != nullptr) { - m_voice_window->close(); + const auto channel_id = m_discord.GetVoiceChannelID(); + if (!channel_id.IsValid() || static_cast(channel_id) == 0) { + // We're not in a channel anymore, close the window + m_voice_window->close(); + } + // Otherwise, we're still in a channel, just reconnecting - keep the window open } } @@ -863,6 +878,7 @@ void Abaddon::ActionSetToken() { m_discord.UpdateToken(m_discord_token); m_main_window->UpdateComponents(); GetSettings().DiscordToken = m_discord_token; + m_settings.Save(); // Save immediately so token persists } m_main_window->UpdateMenus(); } @@ -1092,6 +1108,35 @@ void Abaddon::ActionJoinVoiceChannel(Snowflake channel_id) { void Abaddon::ActionDisconnectVoice() { m_discord.DisconnectFromVoice(); } + +void Abaddon::ActionCallCreate(CallCreateData data) { + const auto channel = m_discord.GetChannel(data.ChannelID); + if (!channel.has_value()) return; + + bool is_group_call = (channel->Type == ChannelType::GROUP_DM); + + CallDialog dlg(*m_main_window, data.ChannelID, is_group_call); + const auto response = dlg.run(); + + if (response == Gtk::RESPONSE_OK && dlg.GetAccepted()) { + if (is_group_call) { + m_discord.JoinCall(data.ChannelID); + } else { + m_discord.AcceptCall(data.ChannelID); + } + } else { + m_discord.RejectCall(data.ChannelID); + } +} + +void Abaddon::ActionStartCall(Snowflake channel_id) { + const auto channel = m_discord.GetChannel(channel_id); + if (!channel.has_value()) return; + + if (channel->Type == ChannelType::DM || channel->Type == ChannelType::GROUP_DM) { + m_discord.StartCall(channel_id); + } +} #endif std::optional Abaddon::ShowTextPrompt(const Glib::ustring &prompt, const Glib::ustring &title, const Glib::ustring &placeholder, Gtk::Window *window) { @@ -1190,6 +1235,9 @@ int main(int argc, char **argv) { auto log_voice = spdlog::stdout_color_mt("voice"); auto log_discord = spdlog::stdout_color_mt("discord"); auto log_ra = spdlog::stdout_color_mt("remote-auth"); +#ifdef WITH_VIDEO + auto log_video = spdlog::stdout_color_mt("video"); +#endif Gtk::Main::init_gtkmm_internals(); // why??? return Abaddon::Get().StartGTK(); diff --git a/src/abaddon.hpp b/src/abaddon.hpp index 6093523f..a389e0a8 100644 --- a/src/abaddon.hpp +++ b/src/abaddon.hpp @@ -62,6 +62,8 @@ class Abaddon { #ifdef WITH_VOICE void ActionJoinVoiceChannel(Snowflake channel_id); void ActionDisconnectVoice(); + void ActionCallCreate(CallCreateData data); + void ActionStartCall(Snowflake channel_id); #endif std::optional ShowTextPrompt(const Glib::ustring &prompt, const Glib::ustring &title, const Glib::ustring &placeholder = "", Gtk::Window *window = nullptr); diff --git a/src/components/channellist/channellisttree.cpp b/src/components/channellist/channellisttree.cpp index e824933c..ad83cf11 100644 --- a/src/components/channellist/channellisttree.cpp +++ b/src/components/channellist/channellisttree.cpp @@ -35,7 +35,7 @@ ChannelListTree::ChannelListTree() , m_menu_dm_copy_id("_Copy ID", true) , m_menu_dm_close("") // changes depending on if group or not #ifdef WITH_VOICE - , m_menu_dm_join_voice("Join _Voice", true) + , m_menu_dm_join_voice("Start _Call", true) , m_menu_dm_disconnect_voice("_Disconnect Voice", true) #endif , m_menu_thread_copy_id("_Copy ID", true) @@ -943,10 +943,12 @@ Gtk::TreeModel::iterator ChannelListTree::AddGuild(const GuildData &guild, const } std::map> threads; - for (const auto &tmp : *guild.Threads) { - const auto thread = discord.GetChannel(tmp.ID); - if (thread.has_value()) - threads[*thread->ParentID].push_back(*thread); + if (guild.Threads.has_value()) { + for (const auto &tmp : *guild.Threads) { + const auto thread = discord.GetChannel(tmp.ID); + if (thread.has_value()) + threads[*thread->ParentID].push_back(*thread); + } } const auto add_threads = [&](const ChannelData &channel, const Gtk::TreeRow &row) { row[m_columns.m_expanded] = true; diff --git a/src/components/chatwindow.cpp b/src/components/chatwindow.cpp index 6a44d84c..4f1706c3 100644 --- a/src/components/chatwindow.cpp +++ b/src/components/chatwindow.cpp @@ -47,6 +47,22 @@ ChatWindow::ChatWindow() { m_topic_text.set_halign(Gtk::ALIGN_START); m_topic_text.show(); +#ifdef WITH_VOICE + m_call_button.set_label("Start Call"); + m_call_button.set_tooltip_text("Start a voice call"); + m_call_button.set_halign(Gtk::ALIGN_START); + m_call_button.set_margin_start(5); + m_call_button.set_margin_end(5); + m_call_button.set_margin_top(2); + m_call_button.set_margin_bottom(2); + m_call_button.signal_clicked().connect([this]() { + if (m_active_channel.IsValid()) { + m_signal_action_start_call.emit(m_active_channel); + } + }); + m_call_button.hide(); +#endif + m_input->set_valign(Gtk::ALIGN_END); m_input->signal_submit().connect(sigc::mem_fun(*this, &ChatWindow::OnInputSubmit)); @@ -104,6 +120,9 @@ ChatWindow::ChatWindow() { m_tab_switcher->show(); #endif m_main->add(m_topic); +#ifdef WITH_VOICE + m_main->add(m_call_button); +#endif m_main->add(*m_chat); m_main->add(m_completer); m_main->add(*m_input); @@ -142,6 +161,16 @@ void ChatWindow::SetActiveChannel(Snowflake id) { if (m_is_replying) StopReplying(); if (m_is_editing) StopEditing(); +#ifdef WITH_VOICE + const auto &discord = Abaddon::Get().GetDiscordClient(); + const auto channel = discord.GetChannel(id); + if (channel.has_value() && (channel->Type == ChannelType::DM || channel->Type == ChannelType::GROUP_DM)) { + m_call_button.show(); + } else { + m_call_button.hide(); + } +#endif + #ifdef WITH_LIBHANDY m_tab_switcher->ReplaceActiveTab(id); #endif @@ -396,3 +425,9 @@ ChatWindow::type_signal_action_reaction_add ChatWindow::signal_action_reaction_a ChatWindow::type_signal_action_reaction_remove ChatWindow::signal_action_reaction_remove() { return m_signal_action_reaction_remove; } + +#ifdef WITH_VOICE +ChatWindow::type_signal_action_start_call ChatWindow::signal_action_start_call() { + return m_signal_action_start_call; +} +#endif diff --git a/src/components/chatwindow.hpp b/src/components/chatwindow.hpp index f7493cd4..092b469e 100644 --- a/src/components/chatwindow.hpp +++ b/src/components/chatwindow.hpp @@ -79,6 +79,10 @@ class ChatWindow { Gtk::EventBox m_topic; // todo probably make everything else go on the stack Gtk::Label m_topic_text; +#ifdef WITH_VOICE + Gtk::Button m_call_button; +#endif + ChatList *m_chat; ChatInput *m_input; @@ -110,6 +114,11 @@ class ChatWindow { type_signal_action_reaction_add signal_action_reaction_add(); type_signal_action_reaction_remove signal_action_reaction_remove(); +#ifdef WITH_VOICE + using type_signal_action_start_call = sigc::signal; + type_signal_action_start_call signal_action_start_call(); +#endif + private: type_signal_action_message_edit m_signal_action_message_edit; type_signal_action_chat_submit m_signal_action_chat_submit; @@ -118,4 +127,8 @@ class ChatWindow { type_signal_action_insert_mention m_signal_action_insert_mention; type_signal_action_reaction_add m_signal_action_reaction_add; type_signal_action_reaction_remove m_signal_action_reaction_remove; + +#ifdef WITH_VOICE + type_signal_action_start_call m_signal_action_start_call; +#endif }; diff --git a/src/dialogs/call.cpp b/src/dialogs/call.cpp new file mode 100644 index 00000000..a4a5ad7e --- /dev/null +++ b/src/dialogs/call.cpp @@ -0,0 +1,110 @@ +#include "call.hpp" +#include "abaddon.hpp" + +CallDialog::CallDialog(Gtk::Window &parent, Snowflake channel_id, bool is_group_call) + : Gtk::Dialog(is_group_call ? "Incoming Group Call" : "Incoming Call", parent, true) + , m_channel_id(channel_id) + , m_is_group_call(is_group_call) + , m_main_layout(Gtk::ORIENTATION_VERTICAL) + , m_info_layout(Gtk::ORIENTATION_HORIZONTAL) + , m_button_box(Gtk::ORIENTATION_HORIZONTAL) + , m_accept_button(is_group_call ? "Join" : "Accept") + , m_reject_button("Reject") { + set_default_size(350, 150); + get_style_context()->add_class("app-window"); + get_style_context()->add_class("app-popup"); + + const auto &discord = Abaddon::Get().GetDiscordClient(); + const auto channel = discord.GetChannel(channel_id); + + if (!channel.has_value()) { + m_title_label.set_text("Unknown Call"); + m_info_label.set_text("Channel information unavailable"); + } else if (m_is_group_call) { + m_title_label.set_markup("Group Call"); + m_info_label.set_text(channel->GetRecipientsDisplay()); + m_avatar.property_pixbuf() = Abaddon::Get().GetImageManager().GetPlaceholder(64); + } else { + const auto recipients = channel->GetDMRecipients(); + if (recipients.empty()) { + m_title_label.set_text("Unknown Call"); + m_info_label.set_text("User information unavailable"); + m_avatar.property_pixbuf() = Abaddon::Get().GetImageManager().GetPlaceholder(64); + } else { + const auto &user = recipients[0]; + m_title_label.set_markup("" + Glib::Markup::escape_text(user.GetDisplayName()) + ""); + m_info_label.set_text("is calling you"); + + auto &img = Abaddon::Get().GetImageManager(); + m_avatar.property_pixbuf() = img.GetPlaceholder(64); + + if (user.HasAnimatedAvatar() && Abaddon::Get().GetSettings().ShowAnimations) { + auto cb = [this](const Glib::RefPtr &pb) { + m_avatar.property_pixbuf_animation() = pb; + }; + img.LoadAnimationFromURL(user.GetAvatarURL("gif", "64"), 64, 64, sigc::track_obj(cb, *this)); + } else { + auto cb = [this](const Glib::RefPtr &pb) { + m_avatar.property_pixbuf() = pb->scale_simple(64, 64, Gdk::INTERP_BILINEAR); + }; + img.LoadFromURL(user.GetAvatarURL("png", "64"), sigc::track_obj(cb, *this)); + } + } + } + + m_avatar.set_margin_end(10); + m_avatar.set_halign(Gtk::ALIGN_START); + m_avatar.set_valign(Gtk::ALIGN_CENTER); + + m_title_label.set_halign(Gtk::ALIGN_START); + m_title_label.set_valign(Gtk::ALIGN_CENTER); + m_info_label.set_halign(Gtk::ALIGN_START); + m_info_label.set_valign(Gtk::ALIGN_CENTER); + m_info_label.set_single_line_mode(true); + m_info_label.set_ellipsize(Pango::ELLIPSIZE_END); + + m_accept_button.signal_clicked().connect(sigc::mem_fun(*this, &CallDialog::OnAccept)); + m_reject_button.signal_clicked().connect(sigc::mem_fun(*this, &CallDialog::OnReject)); + + m_button_box.pack_start(m_accept_button, Gtk::PACK_SHRINK); + m_button_box.pack_start(m_reject_button, Gtk::PACK_SHRINK); + m_button_box.set_layout(Gtk::BUTTONBOX_END); + m_button_box.set_margin_top(10); + + Gtk::Box *title_box = Gtk::manage(new Gtk::Box(Gtk::ORIENTATION_VERTICAL)); + title_box->add(m_title_label); + title_box->add(m_info_label); + title_box->set_halign(Gtk::ALIGN_START); + title_box->set_valign(Gtk::ALIGN_CENTER); + + m_info_layout.add(m_avatar); + m_info_layout.add(*title_box); + m_info_layout.set_margin_bottom(10); + + m_main_layout.add(m_info_layout); + m_main_layout.add(m_button_box); + + get_content_area()->add(m_main_layout); + + signal_response().connect([this](int response_id) { + if (response_id != Gtk::RESPONSE_OK) { + m_accepted = false; + } + }); + + show_all_children(); +} + +bool CallDialog::GetAccepted() const { + return m_accepted; +} + +void CallDialog::OnAccept() { + m_accepted = true; + response(Gtk::RESPONSE_OK); +} + +void CallDialog::OnReject() { + m_accepted = false; + response(Gtk::RESPONSE_CANCEL); +} diff --git a/src/dialogs/call.hpp b/src/dialogs/call.hpp new file mode 100644 index 00000000..f02cc1ec --- /dev/null +++ b/src/dialogs/call.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include "discord/snowflake.hpp" +#include "discord/objects.hpp" +#include "discord/channel.hpp" + +class CallDialog : public Gtk::Dialog { +public: + CallDialog(Gtk::Window &parent, Snowflake channel_id, bool is_group_call); + + bool GetAccepted() const; + +protected: + void OnAccept(); + void OnReject(); + + bool m_accepted = false; + Snowflake m_channel_id; + bool m_is_group_call; + + Gtk::Box m_main_layout; + Gtk::Box m_info_layout; + Gtk::Image m_avatar; + Gtk::Label m_title_label; + Gtk::Label m_info_label; + Gtk::ButtonBox m_button_box; + Gtk::Button m_accept_button; + Gtk::Button m_reject_button; +}; diff --git a/src/dialogs/screensharedialog.cpp b/src/dialogs/screensharedialog.cpp new file mode 100644 index 00000000..810fc9fb --- /dev/null +++ b/src/dialogs/screensharedialog.cpp @@ -0,0 +1,245 @@ +#ifdef WITH_VIDEO + +#include "screensharedialog.hpp" +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#endif + +#ifdef __APPLE__ +#include +#include +#endif + +ScreenShareDialog::ScreenShareDialog(Gtk::Window& parent) + : Gtk::Dialog("Compartir Pantalla", parent, true) + , m_screens_box(Gtk::ORIENTATION_VERTICAL) { + set_default_size(400, 300); + get_style_context()->add_class("app-window"); + get_style_context()->add_class("app-popup"); + + // Pestaña Pantallas + m_screens_box.set_spacing(10); + m_screens_box.set_margin_start(10); + m_screens_box.set_margin_end(10); + m_screens_box.set_margin_top(10); + m_screens_box.set_margin_bottom(10); + m_notebook.append_page(m_screens_box, "Pantallas"); + + // Pestaña Ventanas (placeholder para futuro) + // m_windows_box.set_spacing(10); + // m_notebook.append_page(m_windows_box, "Ventanas"); + + load_monitors(); + + add_button("Cancelar", Gtk::RESPONSE_CANCEL); + + get_content_area()->pack_start(m_notebook, true, true, 0); + + show_all_children(); +} + +void ScreenShareDialog::load_monitors() { +#ifdef __linux__ + auto display = Gdk::Display::get_default(); + if (!display) { + if (auto logger = spdlog::get("ui")) { + logger->error("Failed to get default display"); + } + return; + } + + int n_monitors = display->get_n_monitors(); + if (auto logger = spdlog::get("ui")) { + logger->info("Found {} monitors", n_monitors); + } + + for (int i = 0; i < n_monitors; i++) { + auto monitor = display->get_monitor(i); + if (!monitor) continue; + + Gdk::Rectangle geo; + monitor->get_geometry(geo); + + ScreenSource src; + src.name = "Monitor " + std::to_string(i + 1); + src.x = geo.get_x(); + src.y = geo.get_y(); + src.width = geo.get_width(); + src.height = geo.get_height(); + src.is_window = false; + + // Crear un botón por monitor + auto btn = Gtk::manage(new Gtk::Button()); + std::string label_text = src.name + "\n" + + std::to_string(src.width) + "x" + std::to_string(src.height) + + " @ (" + std::to_string(src.x) + ", " + std::to_string(src.y) + ")"; + btn->set_label(label_text); + btn->set_halign(Gtk::ALIGN_FILL); + btn->set_margin_bottom(5); + + btn->signal_clicked().connect([this, src]() { + on_source_clicked(src); + }); + + m_screens_box.pack_start(*btn, false, false, 0); + } + +#elif defined(_WIN32) + struct MonitorInfo { + int x, y, width, height; + std::string name; + }; + std::vector monitors; + + // Static callback function for EnumDisplayMonitors (must be static or global) + static BOOL CALLBACK MonitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData) { + auto* monitors_vec = reinterpret_cast*>(dwData); + MonitorInfo info; + info.x = lprcMonitor->left; + info.y = lprcMonitor->top; + info.width = lprcMonitor->right - lprcMonitor->left; + info.height = lprcMonitor->bottom - lprcMonitor->top; + + MONITORINFOEX monitorInfo; + monitorInfo.cbSize = sizeof(MONITORINFOEX); + if (GetMonitorInfo(hMonitor, &monitorInfo)) { + if (sizeof(TCHAR) == 2) { + // Wide build: convert UTF-16 to UTF-8 + int utf8_len = WideCharToMultiByte(CP_UTF8, 0, monitorInfo.szDevice, -1, nullptr, 0, nullptr, nullptr); + if (utf8_len > 0) { + std::vector utf8_buffer(utf8_len); + int result = WideCharToMultiByte(CP_UTF8, 0, monitorInfo.szDevice, -1, utf8_buffer.data(), utf8_len, nullptr, nullptr); + if (result > 0) { + info.name = std::string(utf8_buffer.data()); + } else { + info.name = "Monitor " + std::to_string(monitors_vec->size() + 1); + } + } else { + info.name = "Monitor " + std::to_string(monitors_vec->size() + 1); + } + } else { + // ANSI build: direct assignment + info.name = std::string(monitorInfo.szDevice); + } + } else { + info.name = "Monitor " + std::to_string(monitors_vec->size() + 1); + } + + monitors_vec->push_back(info); + return TRUE; + } + + EnumDisplayMonitors(NULL, NULL, MonitorEnumProc, reinterpret_cast(&monitors)); + + if (auto logger = spdlog::get("ui")) { + logger->info("Found {} monitors on Windows", monitors.size()); + } + + for (size_t i = 0; i < monitors.size(); i++) { + const auto& mon = monitors[i]; + + ScreenSource src; + src.name = "Monitor " + std::to_string(i + 1); + src.x = mon.x; + src.y = mon.y; + src.width = mon.width; + src.height = mon.height; + src.is_window = false; + + auto btn = Gtk::manage(new Gtk::Button()); + std::string label_text = src.name + "\n" + + std::to_string(src.width) + "x" + std::to_string(src.height) + + " @ (" + std::to_string(src.x) + ", " + std::to_string(src.y) + ")"; + btn->set_label(label_text); + btn->set_halign(Gtk::ALIGN_FILL); + btn->set_margin_bottom(5); + + btn->signal_clicked().connect([this, src]() { + on_source_clicked(src); + }); + + m_screens_box.pack_start(*btn, false, false, 0); + } + +#elif defined(__APPLE__) + uint32_t display_count = 0; + CGError err = CGGetActiveDisplayList(0, NULL, &display_count); + if (err != kCGErrorSuccess) { + if (auto logger = spdlog::get("ui")) { + logger->error("Failed to get display count: {}", err); + } + return; + } + + if (display_count == 0) { + if (auto logger = spdlog::get("ui")) { + logger->warn("No displays found"); + } + return; + } + + std::vector displays(display_count); + err = CGGetActiveDisplayList(display_count, displays.data(), &display_count); + if (err != kCGErrorSuccess) { + if (auto logger = spdlog::get("ui")) { + logger->error("Failed to get display list: {}", err); + } + return; + } + + if (auto logger = spdlog::get("ui")) { + logger->info("Found {} displays on macOS", display_count); + } + + for (uint32_t i = 0; i < display_count; i++) { + CGRect bounds = CGDisplayBounds(displays[i]); + + ScreenSource src; + src.name = "Pantalla " + std::to_string(i + 1); + src.x = static_cast(bounds.origin.x); + src.y = static_cast(bounds.origin.y); + src.width = static_cast(bounds.size.width); + src.height = static_cast(bounds.size.height); + src.is_window = false; + + auto btn = Gtk::manage(new Gtk::Button()); + std::string label_text = src.name + "\n" + + std::to_string(src.width) + "x" + std::to_string(src.height) + + " @ (" + std::to_string(src.x) + ", " + std::to_string(src.y) + ")"; + btn->set_label(label_text); + btn->set_halign(Gtk::ALIGN_FILL); + btn->set_margin_bottom(5); + + btn->signal_clicked().connect([this, src]() { + on_source_clicked(src); + }); + + m_screens_box.pack_start(*btn, false, false, 0); + } + +#else + if (auto logger = spdlog::get("ui")) { + logger->warn("Platform not supported for monitor detection"); + } +#endif +} + +void ScreenShareDialog::on_source_clicked(ScreenSource source) { + m_selected = source; + if (auto logger = spdlog::get("ui")) { + logger->info("Selected screen source: {} ({}x{} @ {},{})", + source.name, source.width, source.height, source.x, source.y); + } + response(Gtk::RESPONSE_OK); +} + +std::optional ScreenShareDialog::get_selected_source() const { + return m_selected; +} + +#endif diff --git a/src/dialogs/screensharedialog.hpp b/src/dialogs/screensharedialog.hpp new file mode 100644 index 00000000..adce73de --- /dev/null +++ b/src/dialogs/screensharedialog.hpp @@ -0,0 +1,38 @@ +#pragma once +#ifdef WITH_VIDEO + +#include +#include +#include +#include +#include +#include +#include +#include + +struct ScreenSource { + std::string name; + int x, y, width, height; // Geometría para pasar a FFmpeg + bool is_window; // true = ventana, false = monitor completo + unsigned long xid; // ID de ventana X11 (si aplica, para futuro) + + ScreenSource() : x(0), y(0), width(0), height(0), is_window(false), xid(0) {} +}; + +class ScreenShareDialog : public Gtk::Dialog { +public: + ScreenShareDialog(Gtk::Window& parent); + std::optional get_selected_source() const; + +private: + Gtk::Notebook m_notebook; + Gtk::Box m_screens_box; + // Gtk::Box m_windows_box; // Implementar luego con libwnck + + std::optional m_selected; + + void load_monitors(); + void on_source_clicked(ScreenSource source); +}; + +#endif diff --git a/src/discord/discord.cpp b/src/discord/discord.cpp index 34600ea3..c01f5d0a 100644 --- a/src/discord/discord.cpp +++ b/src/discord/discord.cpp @@ -6,9 +6,64 @@ #include #include "abaddon.hpp" +#ifdef WITH_VIDEO +#include "video/capture.hpp" +#include "discord/rtppacketizer.hpp" +#include +#include +#endif using namespace std::string_literals; +#ifdef WITH_VIDEO +namespace { +using NalSpan = std::pair; + +size_t FindAnnexBStartCode(const uint8_t *data, size_t len, size_t offset) { + for (size_t i = offset; i + 3 < len; ++i) { + if (data[i] != 0 || data[i + 1] != 0) continue; + if (data[i + 2] == 1) return i; + if (data[i + 2] == 0 && data[i + 3] == 1) return i; + } + return len; +} + +void ExtractH264NalUnits(const uint8_t *data, size_t len, std::vector &out) { + out.clear(); + if (len == 0) return; + + const size_t first_sc = FindAnnexBStartCode(data, len, 0); + if (first_sc != len) { + size_t sc = first_sc; + while (sc != len) { + const size_t sc_len = (data[sc + 2] == 1) ? 3 : 4; + const size_t nal_start = sc + sc_len; + const size_t next_sc = FindAnnexBStartCode(data, len, nal_start); + const size_t nal_end = (next_sc == len) ? len : next_sc; + if (nal_end > nal_start) { + out.emplace_back(data + nal_start, nal_end - nal_start); + } + sc = next_sc; + } + return; + } + + // AVCC length-prefixed NAL units (assume 4-byte lengths). + size_t offset = 0; + while (offset + 4 <= len) { + const uint32_t nal_len = (static_cast(data[offset]) << 24) | + (static_cast(data[offset + 1]) << 16) | + (static_cast(data[offset + 2]) << 8) | + (static_cast(data[offset + 3]) << 0); + offset += 4; + if (nal_len == 0 || offset + nal_len > len) break; + out.emplace_back(data + offset, nal_len); + offset += nal_len; + } +} +} // namespace +#endif + DiscordClient::DiscordClient(bool mem_store) : m_decompress_buf(InflateChunkSize) , m_store(mem_store) @@ -38,6 +93,24 @@ DiscordClient::DiscordClient(bool mem_store) m_signal_voice_client_state_update.emit(state); }; m_voice.signal_state_update().connect(sigc::track_obj(signal_state_update_cb, m_signal_voice_client_state_update)); + +#ifdef WITH_VIDEO + const auto voice_video_state_cb = [this](DiscordVoiceClient::State state) { + if (state == DiscordVoiceClient::State::Connected) { + spdlog::get("discord")->debug("Voice connected, checking for pending video start"); + StartPendingVideo(); + } + }; + m_voice.signal_state_update().connect(sigc::track_obj(voice_video_state_cb, m_signal_voice_client_state_update)); + + // CRITICAL: Connect video state changed signal to update main gateway + // This sends Opcode 4 (Voice State Update) with self_video=true/false + // Without this, Discord ignores all video packets sent via UDP + const auto video_state_changed_cb = [this](bool active) { + SendVoiceStateUpdate(); + }; + m_voice.signal_video_state_changed().connect(sigc::track_obj(video_state_changed_cb, m_signal_voice_client_state_update)); +#endif #endif LoadEventMap(); @@ -1385,6 +1458,36 @@ void DiscordClient::SetVoiceDeafened(bool is_deaf) { m_deaf_requested = is_deaf; SendVoiceStateUpdate(); } + +void DiscordClient::AcceptCall(Snowflake channel_id) { + CallConnectMessage msg; + msg.ChannelID = channel_id; + msg.Ringing = true; + m_websocket.Send(msg); + ConnectToVoice(channel_id); +} + +void DiscordClient::RejectCall(Snowflake channel_id) { + CallConnectMessage msg; + msg.ChannelID = channel_id; + msg.Ringing = false; + m_websocket.Send(msg); +} + +void DiscordClient::JoinCall(Snowflake channel_id) { + CallConnectMessage msg; + msg.ChannelID = channel_id; + msg.Ringing = true; + m_websocket.Send(msg); + ConnectToVoice(channel_id); +} + +void DiscordClient::StartCall(Snowflake channel_id) { + // To start a new call, we just connect to voice. + // Discord will automatically create CALL_CREATE when someone connects to voice in a DM/Group DM. + // CallConnectMessage with Ringing=true is only used when accepting/joining an EXISTING call. + ConnectToVoice(channel_id); +} #endif std::optional> DiscordClient::GetVoiceState(Snowflake user_id) const { @@ -1755,6 +1858,15 @@ void DiscordClient::HandleGatewayMessage(std::string str) { case GatewayEvent::CALL_CREATE: { HandleGatewayCallCreate(m); } break; + case GatewayEvent::STREAM_CREATE: { + HandleGatewayStreamCreate(m); + } break; + case GatewayEvent::STREAM_SERVER_UPDATE: { + HandleGatewayStreamServerUpdate(m); + } break; + case GatewayEvent::STREAM_UPDATE: { + HandleGatewayStreamUpdate(m); + } break; #endif } } break; @@ -2487,6 +2599,304 @@ void DiscordClient::HandleGatewayCallCreate(const GatewayMessage &msg) { for (const auto &state : data.VoiceStates) { CheckVoiceState(state); } + + m_signal_call_create.emit(data); +} + +void DiscordClient::HandleGatewayStreamCreate(const GatewayMessage &msg) { + const auto log = spdlog::get("discord"); + if (log && log->should_log(spdlog::level::debug)) { + log->debug("STREAM_CREATE received: {}", msg.Data.dump()); + } + + const auto &data = msg.Data; + if (!data.is_object()) return; + + if (const auto it = data.find("stream_key"); it != data.end() && it->is_string()) { + m_voice.SetStreamKey(it->get_ref()); + } +} + +void DiscordClient::HandleGatewayStreamServerUpdate(const GatewayMessage &msg) { + const auto log = spdlog::get("discord"); + if (log && log->should_log(spdlog::level::debug)) { + log->debug("STREAM_SERVER_UPDATE received: {}", msg.Data.dump()); + } + + const auto &data = msg.Data; + if (!data.is_object()) return; + + if (const auto it = data.find("token"); it != data.end() && it->is_string()) { + m_voice.SetStreamToken(it->get_ref()); + } + + if (const auto it = data.find("endpoint"); it != data.end() && it->is_string()) { + m_voice.SetStreamEndpoint(it->get_ref()); + } +} + +void DiscordClient::HandleGatewayStreamUpdate(const GatewayMessage &msg) { + const auto log = spdlog::get("discord"); + if (log && log->should_log(spdlog::level::debug)) { + log->debug("STREAM_UPDATE received: {}", msg.Data.dump()); + } + + const auto &data = msg.Data; + if (!data.is_object()) return; + + if (const auto it = data.find("paused"); it != data.end() && it->is_boolean()) { + m_voice.SetStreamPaused(it->get()); + } +} + +#ifdef WITH_VIDEO +void DiscordClient::StartPendingVideo() { + if (m_pending_video_start == PendingVideoStart::None) { + spdlog::get("discord")->debug("StartPendingVideo: no pending video start"); + return; + } + if (!m_voice.IsConnected()) { + spdlog::get("discord")->debug("StartPendingVideo: voice not connected yet"); + return; + } + + spdlog::get("discord")->info("StartPendingVideo: starting {} (is_screenshare={})", + m_pending_video_start == PendingVideoStart::ScreenShare ? "screen share" : "camera", + m_voice.IsScreenShareMode()); + + if (!m_voice.m_video_capture) { + m_voice.m_video_capture = std::make_unique(); + } + + if (m_voice.m_video_packet_connection.connected()) { + m_voice.m_video_packet_connection.disconnect(); + } + + m_voice.m_video_sequence.store(0, std::memory_order_relaxed); + + // Must be connected before activating video (Opcode 12). + m_voice.SetVideoStatus(true); + + m_voice.m_video_packet_connection = m_voice.m_video_capture->signal_packet().connect([this](const std::vector &packet_data, uint32_t timestamp_pts) { + // CRITICAL: Convert encoder PTS (in codec time_base units) to RTP timestamp (90000 Hz) + // Encoder uses time_base = 1/30, so timestamp_pts is in 1/30 second units + // RTP video uses 90000 Hz clock, so we multiply by 90000/30 = 3000 + // But timestamp_pts is already incremented by 3000 per frame, so it should be correct + // However, we need to ensure it's in 90000 Hz units, not codec time_base units + const uint32_t rtp_timestamp = timestamp_pts; // timestamp_pts is already in 90000 Hz units (3000 per frame) + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"B\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Lambda received packet\",\"data\":{\"packet_size\":" << packet_data.size() << ",\"rtp_timestamp\":" << rtp_timestamp << ",\"is_connected\":" << (m_voice.IsConnected() ? "true" : "false") << ",\"is_paused\":" << (m_voice.IsStreamPaused() ? "true" : "false") << ",\"video_ssrc\":" << m_voice.m_video_ssrc << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + if (!m_voice.IsConnected()) { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"B\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Rejected: not connected\",\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + return; + } + if (m_voice.IsStreamPaused()) { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"B\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Rejected: stream paused\",\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + return; + } + if (m_voice.m_video_ssrc == 0) { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"B\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Rejected: video_ssrc is 0\",\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + return; + } + + std::vector nals; + nals.reserve(8); + ExtractH264NalUnits(packet_data.data(), packet_data.size(), nals); + if (nals.empty()) { + nals.emplace_back(packet_data.data(), packet_data.size()); + } + + uint16_t seq = m_voice.m_video_sequence.load(std::memory_order_relaxed); + size_t total_rtp_packets = 0; + for (size_t i = 0; i < nals.size(); ++i) { + const auto &[nal_data, nal_len] = nals[i]; + if (nal_len == 0) continue; + + auto rtp_packets = RTPPacketizer::PacketizeH264( + nal_data, nal_len, + m_voice.m_video_ssrc, + static_cast(seq + 1), + rtp_timestamp, + 105 + ); + + if (rtp_packets.empty()) continue; + + const bool last_nal = (i + 1) == nals.size(); + rtp_packets.back().Marker = last_nal; + + for (auto &rtp_packet : rtp_packets) { + rtp_packet.Data[1] = static_cast((rtp_packet.Marker ? 0x80 : 0x00) | 105); + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"D\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Sending RTP packet\",\"data\":{\"rtp_size\":" << rtp_packet.Data.size() << ",\"sequence\":" << (seq + 1) << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + m_voice.m_udp.SendEncryptedRTP(rtp_packet.Data.data(), rtp_packet.Data.size()); + total_rtp_packets++; + } + + seq = static_cast(seq + rtp_packets.size()); + } + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"C,D\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Processed packet\",\"data\":{\"nals_count\":" << nals.size() << ",\"total_rtp_packets\":" << total_rtp_packets << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + m_voice.m_video_sequence.store(seq, std::memory_order_relaxed); + }); + + bool started = false; + if (m_pending_video_start == PendingVideoStart::ScreenShare) { + spdlog::get("discord")->debug("Starting screen capture with geometry: {}x{} @ {},{}", + m_screen_share_width, m_screen_share_height, + m_screen_share_x, m_screen_share_y); + started = m_voice.m_video_capture->StartScreenCapture(m_screen_share_x, m_screen_share_y, + m_screen_share_width, m_screen_share_height); + } else if (m_pending_video_start == PendingVideoStart::Camera) { + spdlog::get("discord")->debug("Starting camera capture..."); + started = m_voice.m_video_capture->StartCameraCapture(); + } + + if (started) { + m_voice.m_video_capture->ForceKeyframe(); + spdlog::get("discord")->info("Video capture started"); + } else { + spdlog::get("discord")->error("Failed to start video capture"); + m_voice.SetVideoStatus(false); + } + + m_pending_video_start = PendingVideoStart::None; +} +#endif + +void DiscordClient::StartScreenShare(Snowflake guild_id, Snowflake channel_id, + int x, int y, int width, int height) { +#ifdef WITH_VIDEO + spdlog::get("discord")->info("StartScreenShare called (guild_id: {}, channel_id: {}, geometry: {}x{} @ {},{})", + guild_id, channel_id, width, height, x, y); + + // Store geometry parameters for later use in StartPendingVideo + m_screen_share_x = x; + m_screen_share_y = y; + m_screen_share_width = width; + m_screen_share_height = height; + if (!m_voice.IsConnected()) { + spdlog::get("discord")->warn("Cannot start screen share: not connected to voice"); + return; + } + + // IMPORTANT: Differentiate between DM and Guild + // If guild_id is 0 or invalid, it's a private call ("call") + // If there's a guild_id, it's a guild ("guild") + const bool is_dm = !guild_id.IsValid() || static_cast(guild_id) == 0; + + if (!is_dm) { + nlohmann::json payload; + payload["op"] = static_cast(GatewayOp::StreamCreate); + payload["d"] = nlohmann::json::object(); + payload["d"]["type"] = "guild"; + payload["d"]["guild_id"] = guild_id; + payload["d"]["channel_id"] = channel_id; + payload["d"]["preferred_region"] = nullptr; + + m_websocket.Send(payload.dump()); + spdlog::get("discord")->debug("Sent STREAM_CREATE opcode 18 for guild"); + } + + // CRITICAL: Set screen share mode BEFORE checking or reconnecting + // This ensures Identify() will send the correct type + m_voice.SetScreenShareMode(true); + m_pending_video_start = PendingVideoStart::ScreenShare; + + spdlog::get("discord")->debug("Set screen share mode, current identified_as_screenshare: {}", + m_voice.IsIdentifiedAsScreenShare()); + + // DM: reconnect immediately to re-identify with `type: "screen"`. + // Guild: STREAM_CREATE is still required so others can watch. + if (!m_voice.IsIdentifiedAsScreenShare()) { + spdlog::get("discord")->info("Reconnecting voice to send Opcode 0 with type=screen"); + m_voice.Reconnect(); + return; + } + + spdlog::get("discord")->debug("Already identified as screen share, starting video immediately"); + StartPendingVideo(); +#endif +} + +void DiscordClient::StopScreenShare() { +#ifdef WITH_VIDEO + m_pending_video_start = PendingVideoStart::None; + // Reset geometry + m_screen_share_x = 0; + m_screen_share_y = 0; + m_screen_share_width = 0; + m_screen_share_height = 0; + if (m_voice.m_video_packet_connection.connected()) { + m_voice.m_video_packet_connection.disconnect(); + } + if (m_voice.m_video_capture) { + m_voice.m_video_capture->Stop(); + } + m_voice.SetVideoStatus(false); + // Reset screen share mode when stopping + m_voice.SetScreenShareMode(false); +#endif +} + +void DiscordClient::StartCamera() { +#ifdef WITH_VIDEO + spdlog::get("discord")->info("StartCamera called"); + if (!m_voice.IsConnected()) { + spdlog::get("discord")->warn("Cannot start camera: not connected to voice"); + return; + } + + m_voice.SetScreenShareMode(false); + m_pending_video_start = PendingVideoStart::Camera; + + if (m_voice.IsIdentifiedAsScreenShare()) { + m_voice.Reconnect(); + return; + } + + StartPendingVideo(); +#endif +} + +void DiscordClient::StopCamera() { +#ifdef WITH_VIDEO + m_pending_video_start = PendingVideoStart::None; + if (m_voice.m_video_packet_connection.connected()) { + m_voice.m_video_packet_connection.disconnect(); + } + if (m_voice.m_video_capture) { + m_voice.m_video_capture->Stop(); + } + m_voice.SetVideoStatus(false); +#endif } #endif @@ -2987,17 +3397,19 @@ void DiscordClient::HandleReadyReadState(const ReadyEventData &data) { for (const auto &guild : data.Guilds) { if (!guild.JoinedAt.has_value()) continue; // doubt this can happen but whatever const auto joined_at = Snowflake::FromISO8601(*guild.JoinedAt); - for (const auto &channel : *guild.Channels) { - if (channel.LastMessageID.has_value()) { - // unread messages from before you joined dont count as unread - if (*channel.LastMessageID < joined_at) continue; - if (std::find_if(data.ReadState.Entries.begin(), data.ReadState.Entries.end(), [id = channel.ID](const ReadStateEntry &e) { - return e.ID == id; - }) == data.ReadState.Entries.end()) { - // cant be unread if u cant even see the channel - // better to check here since HasChannelPermission hits the store - if (HasChannelPermission(GetUserData().ID, channel.ID, Permission::VIEW_CHANNEL)) - m_unread[channel.ID] = 0; + if (guild.Channels.has_value()) { + for (const auto &channel : *guild.Channels) { + if (channel.LastMessageID.has_value()) { + // unread messages from before you joined dont count as unread + if (*channel.LastMessageID < joined_at) continue; + if (std::find_if(data.ReadState.Entries.begin(), data.ReadState.Entries.end(), [id = channel.ID](const ReadStateEntry &e) { + return e.ID == id; + }) == data.ReadState.Entries.end()) { + // cant be unread if u cant even see the channel + // better to check here since HasChannelPermission hits the store + if (HasChannelPermission(GetUserData().ID, channel.ID, Permission::VIEW_CHANNEL)) + m_unread[channel.ID] = 0; + } } } } @@ -3009,12 +3421,16 @@ void DiscordClient::HandleReadyGuildSettings(const ReadyEventData &data) { std::unordered_map> category_children; for (const auto &guild : data.Guilds) { - for (const auto &channel : *guild.Channels) - if (channel.ParentID.has_value() && !channel.IsThread()) - category_children[*channel.ParentID].push_back(channel.ID); - for (const auto &thread : *guild.Threads) - if (thread.ThreadMember.has_value() && thread.ThreadMember->IsMuted.has_value() && *thread.ThreadMember->IsMuted) - m_muted_channels.insert(thread.ID); + if (guild.Channels.has_value()) { + for (const auto &channel : *guild.Channels) + if (channel.ParentID.has_value() && !channel.IsThread()) + category_children[*channel.ParentID].push_back(channel.ID); + } + if (guild.Threads.has_value()) { + for (const auto &thread : *guild.Threads) + if (thread.ThreadMember.has_value() && thread.ThreadMember->IsMuted.has_value() && *thread.ThreadMember->IsMuted) + m_muted_channels.insert(thread.ID); + } } const auto now = Snowflake::FromNow(); @@ -3058,9 +3474,20 @@ void DiscordClient::SendVoiceStateUpdate() { msg.SelfMute = m_mute_requested; msg.SelfDeaf = m_deaf_requested; - msg.SelfVideo = false; + // CRITICAL: Set self_video to true when video/camera is active + // This tells Discord's main gateway that we're transmitting video + // Without this, Discord ignores all video packets sent via UDP + msg.SelfVideo = m_voice.IsVideoActive(); + + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"E\",\"location\":\"discord.cpp:" << __LINE__ << "\",\"message\":\"Sending Voice State Update (Opcode 4)\",\"data\":{\"self_video\":" << (msg.SelfVideo ? "true" : "false") << ",\"self_mute\":" << (msg.SelfMute ? "true" : "false") << ",\"self_deaf\":" << (msg.SelfDeaf ? "true" : "false") << ",\"channel_id\":" << (msg.ChannelID.has_value() ? std::to_string(static_cast(*msg.ChannelID)) : "null") << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion m_websocket.Send(msg); + spdlog::get("discord")->debug("Sent Voice State Update (Opcode 4): self_video={}", msg.SelfVideo); } void DiscordClient::OnVoiceConnected() { @@ -3153,6 +3580,11 @@ void DiscordClient::LoadEventMap() { m_event_map["STAGE_INSTANCE_CREATE"] = GatewayEvent::STAGE_INSTANCE_CREATE; m_event_map["STAGE_INSTANCE_UPDATE"] = GatewayEvent::STAGE_INSTANCE_UPDATE; m_event_map["STAGE_INSTANCE_DELETE"] = GatewayEvent::STAGE_INSTANCE_DELETE; +#ifdef WITH_VIDEO + m_event_map["STREAM_CREATE"] = GatewayEvent::STREAM_CREATE; + m_event_map["STREAM_SERVER_UPDATE"] = GatewayEvent::STREAM_SERVER_UPDATE; + m_event_map["STREAM_UPDATE"] = GatewayEvent::STREAM_UPDATE; +#endif } DiscordClient::type_signal_gateway_ready DiscordClient::signal_gateway_ready() { @@ -3407,6 +3839,10 @@ DiscordClient::type_signal_voice_client_state_update DiscordClient::signal_voice DiscordClient::type_signal_voice_channel_changed DiscordClient::signal_voice_channel_changed() { return m_signal_voice_channel_changed; } + +DiscordClient::type_signal_call_create DiscordClient::signal_call_create() { + return m_signal_call_create; +} #endif DiscordClient::type_signal_voice_user_disconnect DiscordClient::signal_voice_user_disconnect() { @@ -3423,4 +3859,4 @@ DiscordClient::type_signal_voice_state_set DiscordClient::signal_voice_state_set DiscordClient::type_signal_voice_speaker_state_changed DiscordClient::signal_voice_speaker_state_changed() { return m_signal_voice_speaker_state_changed; -} \ No newline at end of file +} diff --git a/src/discord/discord.hpp b/src/discord/discord.hpp index e1c9c402..3f1ec0c8 100644 --- a/src/discord/discord.hpp +++ b/src/discord/discord.hpp @@ -215,6 +215,17 @@ class DiscordClient { void SetVoiceMuted(bool is_mute); void SetVoiceDeafened(bool is_deaf); + + void AcceptCall(Snowflake channel_id); + void RejectCall(Snowflake channel_id); + void JoinCall(Snowflake channel_id); + void StartCall(Snowflake channel_id); // Start a call in DM/Group DM + + void StartScreenShare(Snowflake guild_id, Snowflake channel_id, + int x = 0, int y = 0, int width = 0, int height = 0); + void StopScreenShare(); + void StartCamera(); + void StopCamera(); #endif [[nodiscard]] std::optional> GetVoiceState(Snowflake user_id) const; @@ -314,6 +325,9 @@ class DiscordClient { #ifdef WITH_VOICE void HandleGatewayVoiceServerUpdate(const GatewayMessage &msg); void HandleGatewayCallCreate(const GatewayMessage &msg); + void HandleGatewayStreamCreate(const GatewayMessage &msg); + void HandleGatewayStreamServerUpdate(const GatewayMessage &msg); + void HandleGatewayStreamUpdate(const GatewayMessage &msg); #endif void HandleGatewayVoiceStateUpdate(const GatewayMessage &msg); @@ -387,6 +401,22 @@ class DiscordClient { #ifdef WITH_VOICE DiscordVoiceClient m_voice; +#ifdef WITH_VIDEO + enum class PendingVideoStart { + None, + ScreenShare, + Camera, + }; + + PendingVideoStart m_pending_video_start = PendingVideoStart::None; + // Geometry parameters for screen share (stored when StartScreenShare is called) + int m_screen_share_x = 0; + int m_screen_share_y = 0; + int m_screen_share_width = 0; + int m_screen_share_height = 0; + void StartPendingVideo(); +#endif + bool m_mute_requested = false; bool m_deaf_requested = false; @@ -489,6 +519,7 @@ class DiscordClient { using type_signal_voice_requested_disconnect = sigc::signal; using type_signal_voice_client_state_update = sigc::signal; using type_signal_voice_channel_changed = sigc::signal; + using type_signal_call_create = sigc::signal; #endif using type_signal_voice_user_disconnect = sigc::signal; @@ -562,6 +593,7 @@ class DiscordClient { type_signal_voice_requested_disconnect signal_voice_requested_disconnect(); type_signal_voice_client_state_update signal_voice_client_state_update(); type_signal_voice_channel_changed signal_voice_channel_changed(); + type_signal_call_create signal_call_create(); #endif type_signal_voice_user_disconnect signal_voice_user_disconnect(); @@ -636,6 +668,7 @@ class DiscordClient { type_signal_voice_requested_disconnect m_signal_voice_requested_disconnect; type_signal_voice_client_state_update m_signal_voice_client_state_update; type_signal_voice_channel_changed m_signal_voice_channel_changed; + type_signal_call_create m_signal_call_create; #endif type_signal_voice_user_disconnect m_signal_voice_user_disconnect; diff --git a/src/discord/objects.cpp b/src/discord/objects.cpp index 0742dca3..b06a13c5 100644 --- a/src/discord/objects.cpp +++ b/src/discord/objects.cpp @@ -703,6 +703,13 @@ void from_json(const nlohmann::json &j, CallCreateData &m) { JS_ON("voice_states", m.VoiceStates); } +void to_json(nlohmann::json &j, const CallConnectMessage &m) { + j["op"] = GatewayOp::CallConnect; + j["d"] = nlohmann::json::object(); + j["d"]["channel_id"] = m.ChannelID; + j["d"]["ringing"] = m.Ringing; +} + void to_json(nlohmann::json &j, const ModifyCurrentUserVoiceStateObject &m) { JS_IF("channel_id", m.ChannelID); JS_IF("suppress", m.Suppress); diff --git a/src/discord/objects.hpp b/src/discord/objects.hpp index 7135f991..f346156d 100644 --- a/src/discord/objects.hpp +++ b/src/discord/objects.hpp @@ -114,6 +114,9 @@ enum class GatewayEvent : int { STAGE_INSTANCE_CREATE, STAGE_INSTANCE_UPDATE, STAGE_INSTANCE_DELETE, + STREAM_CREATE, + STREAM_SERVER_UPDATE, + STREAM_UPDATE, }; enum class GatewayCloseCode : uint16_t { @@ -961,6 +964,13 @@ struct CallCreateData { friend void from_json(const nlohmann::json &j, CallCreateData &m); }; +struct CallConnectMessage { + Snowflake ChannelID; + bool Ringing; // true to accept, false to reject + + friend void to_json(nlohmann::json &j, const CallConnectMessage &m); +}; + struct ModifyCurrentUserVoiceStateObject { std::optional ChannelID; std::optional Suppress; diff --git a/src/discord/rtppacketizer.cpp b/src/discord/rtppacketizer.cpp new file mode 100644 index 00000000..8d3389a1 --- /dev/null +++ b/src/discord/rtppacketizer.cpp @@ -0,0 +1,102 @@ +#ifdef WITH_VOICE + +#include "rtppacketizer.hpp" +#include + +std::vector RTPPacketizer::PacketizeH264( + const uint8_t *data, + size_t len, + uint32_t ssrc, + uint16_t sequence, + uint32_t timestamp, + uint8_t payload_type) { + + std::vector packets; + + if (len == 0) return packets; + + // Extract NAL unit type (first 5 bits of first byte) + uint8_t nal_type = data[0] & 0x1F; + uint8_t f_nri = data[0] & 0xE0; // F and NRI bits + + if (len <= MAX_PAYLOAD_SIZE) { + // Single NAL Unit Packet (RFC 6184 section 5.6) + RTPPacket packet; + packet.Marker = true; + packet.Data.resize(12 + len); + + // RTP Header + packet.Data[0] = 0x80; // Version 2, no padding, no extension, no CSRC + packet.Data[1] = payload_type & 0x7F; // Payload type + packet.Data[2] = (sequence >> 8) & 0xFF; + packet.Data[3] = sequence & 0xFF; + packet.Data[4] = (timestamp >> 24) & 0xFF; + packet.Data[5] = (timestamp >> 16) & 0xFF; + packet.Data[6] = (timestamp >> 8) & 0xFF; + packet.Data[7] = timestamp & 0xFF; + packet.Data[8] = (ssrc >> 24) & 0xFF; + packet.Data[9] = (ssrc >> 16) & 0xFF; + packet.Data[10] = (ssrc >> 8) & 0xFF; + packet.Data[11] = ssrc & 0xFF; + + // Copy NAL unit + std::memcpy(packet.Data.data() + 12, data, len); + + packets.push_back(packet); + } else { + // FU-A Fragmentation (RFC 6184 section 5.8) + size_t offset = 1; // Skip NAL header + size_t remaining = len - 1; + bool first = true; + + while (remaining > 0) { + RTPPacket packet; + size_t fragment_size = remaining > (MAX_PAYLOAD_SIZE - 2) ? (MAX_PAYLOAD_SIZE - 2) : remaining; + bool last = (remaining <= MAX_PAYLOAD_SIZE - 2); + + packet.Marker = last; + packet.Data.resize(12 + 2 + fragment_size); + + // RTP Header + packet.Data[0] = 0x80; + packet.Data[1] = (payload_type & 0x7F) | (last ? 0x80 : 0x00); // Marker bit + packet.Data[2] = (sequence >> 8) & 0xFF; + packet.Data[3] = sequence & 0xFF; + packet.Data[4] = (timestamp >> 24) & 0xFF; + packet.Data[5] = (timestamp >> 16) & 0xFF; + packet.Data[6] = (timestamp >> 8) & 0xFF; + packet.Data[7] = timestamp & 0xFF; + packet.Data[8] = (ssrc >> 24) & 0xFF; + packet.Data[9] = (ssrc >> 16) & 0xFF; + packet.Data[10] = (ssrc >> 8) & 0xFF; + packet.Data[11] = ssrc & 0xFF; + + // FU Indicator (F + NRI from original NAL) + packet.Data[12] = f_nri | 28; // FU-A type + + // FU Header + packet.Data[13] = nal_type; + if (first) { + packet.Data[13] |= 0x80; // S bit + first = false; + } + if (last) { + packet.Data[13] |= 0x40; // E bit + } + + // Copy fragment + std::memcpy(packet.Data.data() + 14, data + offset, fragment_size); + + packets.push_back(packet); + + offset += fragment_size; + remaining -= fragment_size; + sequence++; + } + } + + return packets; +} + +#endif + diff --git a/src/discord/rtppacketizer.hpp b/src/discord/rtppacketizer.hpp new file mode 100644 index 00000000..29d45809 --- /dev/null +++ b/src/discord/rtppacketizer.hpp @@ -0,0 +1,30 @@ +#pragma once +#ifdef WITH_VOICE + +#include +#include + +class RTPPacketizer { +public: + struct RTPPacket { + std::vector Data; + bool Marker; + }; + + // Packetize H.264 NAL units according to RFC 6184 + // Returns vector of RTP packets + static std::vector PacketizeH264( + const uint8_t *data, + size_t len, + uint32_t ssrc, + uint16_t sequence, + uint32_t timestamp, + uint8_t payload_type = 105 // H.264 payload type + ); + +private: + static constexpr size_t MAX_PAYLOAD_SIZE = 1100; // Conservative MTU limit +}; + +#endif + diff --git a/src/discord/voiceclient.cpp b/src/discord/voiceclient.cpp index f8505239..bd4f463a 100644 --- a/src/discord/voiceclient.cpp +++ b/src/discord/voiceclient.cpp @@ -52,7 +52,7 @@ void UDPSocket::SendEncrypted(const uint8_t *data, size_t len) { const uint32_t timestamp = Abaddon::Get().GetAudio().GetRTPTimestamp(); - std::vector rtp(12 + len + crypto_aead_xchacha20poly1305_ietf_ABYTES + sizeof(uint32_t), 0); + std::vector rtp(12 + len + crypto_aead_xchacha20poly1305_ietf_ABYTES + sizeof(uint64_t), 0); rtp[0] = 0x80; // ver 2 rtp[1] = 0x78; // payload type 0x78 rtp[2] = (m_sequence >> 8) & 0xFF; @@ -67,9 +67,8 @@ void UDPSocket::SendEncrypted(const uint8_t *data, size_t len) { rtp[11] = (m_ssrc >> 0) & 0xFF; std::array nonce_bytes = {}; - static uint32_t nonce = 0; - nonce++; - std::memcpy(nonce_bytes.data(), &nonce, sizeof(uint32_t)); + const uint64_t nonce = m_nonce_counter.fetch_add(1, std::memory_order_relaxed); + std::memcpy(nonce_bytes.data(), &nonce, sizeof(uint64_t)); unsigned long long ciphertext_len; crypto_aead_xchacha20poly1305_ietf_encrypt( @@ -80,8 +79,8 @@ void UDPSocket::SendEncrypted(const uint8_t *data, size_t len) { nonce_bytes.data(), m_secret_key.data()); - rtp.resize(12 + ciphertext_len + 4); - std::memcpy(rtp.data() + rtp.size() - sizeof(uint32_t), &nonce, sizeof(uint32_t)); + rtp.resize(12 + ciphertext_len + sizeof(uint64_t)); + std::memcpy(rtp.data() + rtp.size() - sizeof(uint64_t), &nonce, sizeof(uint64_t)); Send(rtp.data(), rtp.size()); } @@ -537,6 +536,15 @@ DiscordVoiceClient::type_signal_state_update DiscordVoiceClient::signal_state_up return m_signal_state_update; } +void DiscordVoiceClient::SetVideoStatus(bool active) { + m_video_active.store(active); + m_signal_video_state_changed.emit(active); +} + +bool DiscordVoiceClient::IsVideoActive() const noexcept { + return m_video_active.load(); +} + void from_json(const nlohmann::json &j, VoiceGatewayMessage &m) { JS_D("op", m.Opcode); m.Data = j.at("d"); diff --git a/src/discord/voiceclient.hpp b/src/discord/voiceclient.hpp index aa1014cf..314a09de 100644 --- a/src/discord/voiceclient.hpp +++ b/src/discord/voiceclient.hpp @@ -9,10 +9,15 @@ #include #include #include +#include #include #include #include #include +#ifdef WITH_VIDEO +#include "video/capture.hpp" +#include "discord/rtppacketizer.hpp" +#endif // clang-format on enum class VoiceGatewayCloseCode : uint16_t { @@ -87,6 +92,7 @@ struct VoiceIdentifyMessage { std::string SessionID; std::string Token; bool Video; + bool IsScreenShare = false; // todo streams i guess? friend void to_json(nlohmann::json &j, const VoiceIdentifyMessage &m); @@ -119,6 +125,7 @@ struct VoiceSelectProtocolMessage { uint16_t Port; std::string Mode; std::string Protocol; + std::optional Codecs; friend void to_json(nlohmann::json &j, const VoiceSelectProtocolMessage &m); }; @@ -133,6 +140,31 @@ struct VoiceSessionDescriptionData { friend void from_json(const nlohmann::json &j, VoiceSessionDescriptionData &m); }; +struct VoiceVideoSourceUpdateMessage { + uint32_t AudioSSRC; + uint32_t VideoSSRC; + uint32_t RTXSSRC; + struct Stream { + std::string Type; + std::string RID; + uint32_t SSRC; + bool Active; + int Quality; + uint32_t RTXSSRC; + int MaxBitrate; + int MaxFramerate; + struct Resolution { + int Width; + int Height; + } MaxResolution; + + friend void to_json(nlohmann::json &j, const Stream &s); + }; + std::vector Streams; + + friend void to_json(nlohmann::json &j, const VoiceVideoSourceUpdateMessage &m); +}; + enum class VoiceSpeakingType { Microphone = 1 << 0, Soundshare = 1 << 1, @@ -164,10 +196,14 @@ class UDPSocket { void Run(); void SetSecretKey(std::array key); void SetSSRC(uint32_t ssrc); - void SendEncrypted(const uint8_t *data, size_t len); - void SendEncrypted(const std::vector &data); + void SetEncryptionMode(const std::string &mode); + void SendEncrypted(const uint8_t *data, size_t len, uint32_t ssrc = 0); + void SendEncrypted(const std::vector &data, uint32_t ssrc = 0); + void SendEncryptedRTP(const uint8_t *rtp_packet, size_t len); + void SendEncryptedRTP(const std::vector &rtp_packet); void Send(const uint8_t *data, size_t len); std::vector Receive(); + void SetReceiveTimeout(int timeout_ms); void Stop(); private: @@ -186,8 +222,11 @@ class UDPSocket { std::array m_secret_key; uint32_t m_ssrc; + std::string m_encryption_mode; + std::atomic m_packet_counter{0}; - uint16_t m_sequence = 0; + std::atomic m_sequence{0}; + std::atomic m_nonce_counter{0}; public: using type_signal_data = sigc::signal>; @@ -197,7 +236,11 @@ class UDPSocket { type_signal_data m_signal_data; }; +class DiscordClient; // Forward declaration + class DiscordVoiceClient { + friend class DiscordClient; + public: DiscordVoiceClient(); ~DiscordVoiceClient(); @@ -211,11 +254,24 @@ class DiscordVoiceClient { void SetServerID(Snowflake id); void SetUserID(Snowflake id); + void SetStreamKey(std::string_view key); + void SetStreamToken(std::string_view token); + void SetStreamEndpoint(std::string_view endpoint); + void SetStreamPaused(bool paused) noexcept; + [[nodiscard]] bool IsStreamPaused() const noexcept; + void SetScreenShareMode(bool is_screenshare) noexcept; + [[nodiscard]] bool IsScreenShareMode() const noexcept; + [[nodiscard]] bool IsIdentifiedAsScreenShare() const noexcept; + void Reconnect(); + // todo serialize void SetUserVolume(Snowflake id, float volume); [[nodiscard]] float GetUserVolume(Snowflake id) const; [[nodiscard]] std::optional GetSSRCOfUser(Snowflake id) const; + void SetVideoStatus(bool active); + [[nodiscard]] bool IsVideoActive() const noexcept; + // Is a websocket and udp connection fully established [[nodiscard]] bool IsConnected() const noexcept; [[nodiscard]] bool IsConnecting() const noexcept; @@ -255,6 +311,12 @@ class DiscordVoiceClient { std::string m_session_id; std::string m_endpoint; std::string m_token; + std::string m_stream_key; + std::string m_stream_token; + std::string m_stream_endpoint; + std::atomic m_stream_paused = false; + std::atomic m_is_screenshare = false; + std::atomic m_identified_as_screenshare = false; Snowflake m_server_id; Snowflake m_channel_id; Snowflake m_user_id; @@ -268,6 +330,11 @@ class DiscordVoiceClient { uint16_t m_port; uint32_t m_ssrc; + uint32_t m_video_ssrc = 0; + uint32_t m_rtx_ssrc = 0; + std::atomic m_video_active{false}; + std::string m_encryption_mode; + int m_heartbeat_msec; Waiter m_heartbeat_waiter; std::thread m_heartbeat_thread; @@ -286,6 +353,13 @@ class DiscordVoiceClient { std::array m_opus_buffer; +#ifdef WITH_VIDEO + std::unique_ptr m_video_capture; + sigc::connection m_video_packet_connection; + std::atomic m_video_timestamp{0}; + std::atomic m_video_sequence{0}; +#endif + std::shared_ptr m_log; std::atomic m_state; @@ -294,15 +368,24 @@ class DiscordVoiceClient { using type_signal_disconnected = sigc::signal; using type_signal_speaking = sigc::signal; using type_signal_state_update = sigc::signal; + using type_signal_video_state_changed = sigc::signal; + using type_signal_pli_received = sigc::signal; + using type_signal_fir_received = sigc::signal; type_signal_connected m_signal_connected; type_signal_disconnected m_signal_disconnected; type_signal_speaking m_signal_speaking; type_signal_state_update m_signal_state_update; + type_signal_video_state_changed m_signal_video_state_changed; + type_signal_pli_received m_signal_pli_received; + type_signal_fir_received m_signal_fir_received; public: type_signal_connected signal_connected(); type_signal_disconnected signal_disconnected(); type_signal_speaking signal_speaking(); type_signal_state_update signal_state_update(); + type_signal_video_state_changed signal_video_state_changed(); + type_signal_pli_received signal_pli_received(); + type_signal_fir_received signal_fir_received(); }; #endif diff --git a/src/settings.cpp b/src/settings.cpp index e9b6fc76..888d5e83 100644 --- a/src/settings.cpp +++ b/src/settings.cpp @@ -41,23 +41,34 @@ void SettingsManager::HandleReadToken() { if (!m_settings.UseKeychain) return; - // Move to keychain if present in .ini - std::string token = m_settings.DiscordToken; + // Keep whatever was read from the config file as a fallback. If the keychain + // backend isn't available (e.g. Secret Service not running), we must not + // clobber the token in memory. + const std::string token_in_config = m_settings.DiscordToken; - if (!token.empty()) { + // Move to keychain if present in .ini + if (!token_in_config.empty()) { keychain::Error error {}; - keychain::setPassword(KeychainPackage, KeychainService, KeychainUser, token, error); + keychain::setPassword(KeychainPackage, KeychainService, KeychainUser, token_in_config, error); if (error) { spdlog::get("ui")->error("Keychain error setting token: {}", error.message); + return; } else { m_file.remove_key("discord", "token"); + return; } } keychain::Error error {}; - m_settings.DiscordToken = keychain::getPassword(KeychainPackage, KeychainService, KeychainUser, error); + const auto token = keychain::getPassword(KeychainPackage, KeychainService, KeychainUser, error); if (error && error.type != keychain::ErrorType::NotFound) { spdlog::get("ui")->error("Keychain error reading token: {} ({})", error.message, error.code); + return; + } + if (!error) { + m_settings.DiscordToken = token; + } else { + m_settings.DiscordToken = token_in_config; } #endif } @@ -70,10 +81,19 @@ void SettingsManager::HandleWriteToken() { keychain::setPassword(KeychainPackage, KeychainService, KeychainUser, m_settings.DiscordToken, error); if (error) { spdlog::get("ui")->error("Keychain error setting token: {}", error.message); + // If keychain fails, fall back to saving in config file + m_file.set_string("discord", "token", m_settings.DiscordToken); + return; } + // If keychain succeeds, remove token from config file for security + if (m_file.has_key("discord", "token")) { + m_file.remove_key("discord", "token"); + } + return; } #endif - // else it will get enumerated over as part of definitions + // If keychain is disabled, save token in config file + m_file.set_string("discord", "token", m_settings.DiscordToken); } void SettingsManager::DefineSettings() { @@ -171,11 +191,15 @@ SettingsManager::Settings &SettingsManager::GetSettings() { return m_settings; } -void SettingsManager::Close() { +void SettingsManager::Save() { if (m_ok) { for (auto &[k, setting] : m_definitions) { switch (setting.Type) { case SettingDefinition::TypeString: + // Skip token here - it's handled by HandleWriteToken() + if (setting.Section == "discord" && setting.Name == "token") { + break; + } if (m_settings.*(setting.Ptr.String) != m_read_settings.*(setting.Ptr.String)) { m_file.set_string(setting.Section, setting.Name, m_settings.*(setting.Ptr.String)); } @@ -208,4 +232,8 @@ void SettingsManager::Close() { spdlog::get("ui")->error("Failed to save settings Keyfile: {}", e.what().c_str()); } } -} \ No newline at end of file +} + +void SettingsManager::Close() { + Save(); +} diff --git a/src/settings.hpp b/src/settings.hpp index 5805452b..9779ff46 100644 --- a/src/settings.hpp +++ b/src/settings.hpp @@ -60,6 +60,7 @@ class SettingsManager { SettingsManager(const std::string &filename); void Close(); + void Save(); // Save settings immediately without closing [[nodiscard]] bool IsValid() const; Settings &GetSettings(); diff --git a/src/video/capture.cpp b/src/video/capture.cpp new file mode 100644 index 00000000..36926ec5 --- /dev/null +++ b/src/video/capture.cpp @@ -0,0 +1,518 @@ +#ifdef WITH_VIDEO + +#include "capture.hpp" +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +} + +VideoCapture::VideoCapture() { + avdevice_register_all(); +} + +VideoCapture::~VideoCapture() { + Stop(); + Cleanup(); +} + +bool VideoCapture::StartCameraCapture(const std::string &device) { + if (m_running.load()) { + Stop(); + } + + m_is_screen = false; + m_device = device.empty() ? GetDefaultCameraDevice() : device; + + if (!InitializeEncoder()) { + return false; + } + + m_running = true; + m_capture_thread = std::thread(&VideoCapture::CaptureThread, this); + return true; +} + +bool VideoCapture::StartScreenCapture(int x, int y, int width, int height) { + if (m_running.load()) { + Stop(); + } + + m_is_screen = true; + m_device = GetDefaultScreenDevice(); + + // Store geometry if provided (all values > 0) + if (width > 0 && height > 0) { + m_screen_x = x; + m_screen_y = y; + m_screen_width = width; + m_screen_height = height; + } else { + // Reset to defaults (automatic detection) + m_screen_x = 0; + m_screen_y = 0; + m_screen_width = 0; + m_screen_height = 0; + } + + if (!InitializeEncoder()) { + return false; + } + + m_running = true; + m_capture_thread = std::thread(&VideoCapture::CaptureThread, this); + return true; +} + +void VideoCapture::Stop() { + if (!m_running.load()) return; + + m_running = false; + if (m_capture_thread.joinable()) { + m_capture_thread.join(); + } + + Cleanup(); +} + +void VideoCapture::ForceKeyframe() { + m_force_keyframe = true; +} + +VideoCapture::type_signal_packet VideoCapture::signal_packet() { + return m_signal_packet; +} + +std::string VideoCapture::GetDefaultCameraDevice() { +#ifdef _WIN32 + return "video=Integrated Camera"; // Default Windows camera +#elif __APPLE__ + return "0"; // avfoundation device index +#else + return "/dev/video0"; // v4l2 device +#endif +} + +std::string VideoCapture::GetDefaultScreenDevice() { +#ifdef _WIN32 + return "desktop"; // gdigrab +#elif __APPLE__ + return "1"; // avfoundation screen index +#else + // Get DISPLAY environment variable, default to :0.0 + const char *display = std::getenv("DISPLAY"); + if (display && strlen(display) > 0) { + return std::string(display); + } + return ":0.0"; // x11grab default +#endif +} + +bool VideoCapture::InitializeEncoder() { + Cleanup(); + + AVFormatContext *format_ctx = nullptr; + AVCodecContext *codec_ctx = nullptr; + AVFrame *frame = nullptr; + AVPacket *packet = nullptr; + SwsContext *sws_ctx = nullptr; + + // Open input device + const AVInputFormat *input_format = nullptr; + AVDictionary *options = nullptr; + + if (m_is_screen) { +#ifdef _WIN32 + input_format = av_find_input_format("gdigrab"); + av_dict_set(&options, "framerate", "30", 0); + + // Use geometry if provided + if (m_screen_width > 0 && m_screen_height > 0) { + av_dict_set(&options, "offset_x", std::to_string(m_screen_x).c_str(), 0); + av_dict_set(&options, "offset_y", std::to_string(m_screen_y).c_str(), 0); + std::string video_size = std::to_string(m_screen_width) + "x" + std::to_string(m_screen_height); + av_dict_set(&options, "video_size", video_size.c_str(), 0); + } else { + av_dict_set(&options, "offset_x", "0", 0); + av_dict_set(&options, "offset_y", "0", 0); + } +#elif __APPLE__ + input_format = av_find_input_format("avfoundation"); + // Format: "screen:audio" or just screen index + // Note: avfoundation uses screen indices, geometry handling may need adjustment + // For now, use default behavior if geometry not provided +#else + input_format = av_find_input_format("x11grab"); + av_dict_set(&options, "framerate", "30", 0); + + // Use geometry if provided + if (m_screen_width > 0 && m_screen_height > 0) { + // Set video_size with provided dimensions + std::string video_size = std::to_string(m_screen_width) + "x" + std::to_string(m_screen_height); + av_dict_set(&options, "video_size", video_size.c_str(), 0); + + // Build input URL with offset: ":display.screen+X,Y" + // Extract display part from m_device (e.g., ":0.0" or "hostname:0.0") + std::string display_part = m_device; + if (display_part.find(':') == std::string::npos) { + display_part = ":" + display_part; + } + // Ensure screen number is present + if (display_part.find('.') == std::string::npos) { + display_part = display_part + ".0"; + } + // Append offset + m_device = display_part + "+" + std::to_string(m_screen_x) + "," + std::to_string(m_screen_y); + } else { + // Default behavior: use automatic detection + av_dict_set(&options, "video_size", "1280x720", 0); + // Ensure device format is correct for x11grab + // Format should be "hostname:display.screen" or ":display.screen" + // If device doesn't have ':', prepend it + if (m_device.find(':') == std::string::npos) { + m_device = ":" + m_device; + } + // If device doesn't have screen number, add .0 + if (m_device.find('.') == std::string::npos && m_device.back() != '0') { + m_device = m_device + ".0"; + } + } + + // Try to use XAUTHORITY if available + const char *xauth = std::getenv("XAUTHORITY"); + if (xauth) { + av_dict_set(&options, "xauth", xauth, 0); + } +#endif + } else { +#ifdef _WIN32 + input_format = av_find_input_format("dshow"); +#elif __APPLE__ + input_format = av_find_input_format("avfoundation"); +#else + input_format = av_find_input_format("v4l2"); + av_dict_set(&options, "video_size", "1280x720", 0); + av_dict_set(&options, "framerate", "30", 0); +#endif + } + + int ret = avformat_open_input(&format_ctx, m_device.c_str(), input_format, &options); + if (ret < 0) { + char errbuf[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE); + spdlog::get("video")->error("Failed to open input device: {} (error: {})", m_device, errbuf); + if (m_is_screen) { + spdlog::get("video")->warn("Screen capture requires X11 authorization. Try running: xhost +local:"); + spdlog::get("video")->warn("Or ensure XAUTHORITY environment variable is set correctly"); + } + av_dict_free(&options); + return false; + } + av_dict_free(&options); + + if (avformat_find_stream_info(format_ctx, nullptr) < 0) { + spdlog::get("video")->error("Failed to find stream info"); + avformat_close_input(&format_ctx); + return false; + } + + int video_stream_idx = -1; + for (unsigned int i = 0; i < format_ctx->nb_streams; i++) { + if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + video_stream_idx = i; + break; + } + } + + if (video_stream_idx == -1) { + spdlog::get("video")->error("No video stream found"); + avformat_close_input(&format_ctx); + return false; + } + + AVCodecParameters *codecpar = format_ctx->streams[video_stream_idx]->codecpar; + + // Find H.264 encoder + const AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_H264); + if (!encoder) { + spdlog::get("video")->error("H.264 encoder not found"); + avformat_close_input(&format_ctx); + return false; + } + + codec_ctx = avcodec_alloc_context3(encoder); + if (!codec_ctx) { + spdlog::get("video")->error("Failed to allocate codec context"); + avformat_close_input(&format_ctx); + return false; + } + + // Configure encoder + codec_ctx->width = 1280; + codec_ctx->height = 720; + codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; + codec_ctx->time_base = {1, 30}; + codec_ctx->framerate = {30, 1}; + // Force more frequent keyframes for screen share to ensure visibility + // Even if screen is static, we need regular keyframes so Discord can decode + // For screen share, use smaller GOP to ensure viewers can join quickly + codec_ctx->gop_size = m_is_screen ? 30 : 60; // Screen: 1 second, Camera: 2 seconds + codec_ctx->keyint_min = m_is_screen ? 30 : 60; // Minimum keyframe interval + codec_ctx->max_b_frames = 0; // CRITICAL: No B-frames for zero latency + codec_ctx->bit_rate = 2000000; // 2 Mbps + + // Set encoder options for zero latency streaming + av_opt_set(codec_ctx->priv_data, "preset", "ultrafast", 0); + av_opt_set(codec_ctx->priv_data, "tune", "zerolatency", 0); + av_opt_set(codec_ctx->priv_data, "repeat_headers", "1", 0); + av_opt_set(codec_ctx->priv_data, "annexb", "1", 0); + + // CRITICAL: x264 parameters for zero latency + // keyint: Maximum keyframe interval (same as gop_size) + // min-keyint: Minimum keyframe interval (same as keyint_min) + // scenecut=0: Disable scene change detection (reduces lag) + std::string x264_params = "keyint=" + std::to_string(codec_ctx->gop_size) + + ":min-keyint=" + std::to_string(codec_ctx->keyint_min) + + ":scenecut=0"; + av_opt_set(codec_ctx->priv_data, "x264-params", x264_params.c_str(), 0); + + if (avcodec_open2(codec_ctx, encoder, nullptr) < 0) { + spdlog::get("video")->error("Failed to open encoder"); + avcodec_free_context(&codec_ctx); + avformat_close_input(&format_ctx); + return false; + } + + // Allocate frame + frame = av_frame_alloc(); + if (!frame) { + spdlog::get("video")->error("Failed to allocate frame"); + avcodec_free_context(&codec_ctx); + avformat_close_input(&format_ctx); + return false; + } + + frame->format = codec_ctx->pix_fmt; + frame->width = codec_ctx->width; + frame->height = codec_ctx->height; + if (av_frame_get_buffer(frame, 0) < 0) { + spdlog::get("video")->error("Failed to allocate frame buffer"); + av_frame_free(&frame); + avcodec_free_context(&codec_ctx); + avformat_close_input(&format_ctx); + return false; + } + + // Allocate packet + packet = av_packet_alloc(); + if (!packet) { + spdlog::get("video")->error("Failed to allocate packet"); + av_frame_free(&frame); + avcodec_free_context(&codec_ctx); + avformat_close_input(&format_ctx); + return false; + } + + // Initialize swscale context for format conversion + AVCodecContext *decoder_ctx = avcodec_alloc_context3(avcodec_find_decoder(codecpar->codec_id)); + if (decoder_ctx) { + avcodec_parameters_to_context(decoder_ctx, codecpar); + avcodec_open2(decoder_ctx, avcodec_find_decoder(codecpar->codec_id), nullptr); + sws_ctx = sws_getContext( + decoder_ctx->width, decoder_ctx->height, decoder_ctx->pix_fmt, + codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, + SWS_BILINEAR, nullptr, nullptr, nullptr); + avcodec_free_context(&decoder_ctx); + } + + // Store pointers + m_format_ctx = format_ctx; + m_codec_ctx = codec_ctx; + m_frame = frame; + m_packet = packet; + m_sws_ctx = sws_ctx; + + spdlog::get("video")->info("Video capture initialized: {}x{}", codec_ctx->width, codec_ctx->height); + return true; +} + +void VideoCapture::Cleanup() { + std::lock_guard lock(m_encoder_mutex); + + if (m_sws_ctx) { + sws_freeContext(m_sws_ctx); + m_sws_ctx = nullptr; + } + + if (m_packet) { + av_packet_free(&m_packet); + m_packet = nullptr; + } + + if (m_frame) { + av_frame_free(&m_frame); + m_frame = nullptr; + } + + if (m_codec_ctx) { + avcodec_free_context(&m_codec_ctx); + m_codec_ctx = nullptr; + } + + if (m_format_ctx) { + avformat_close_input(&m_format_ctx); + m_format_ctx = nullptr; + } +} + +void VideoCapture::CaptureThread() { + AVFormatContext *format_ctx = static_cast(m_format_ctx); + AVCodecContext *codec_ctx = static_cast(m_codec_ctx); + AVFrame *frame = static_cast(m_frame); + AVPacket *packet = static_cast(m_packet); + SwsContext *sws_ctx = static_cast(m_sws_ctx); + + int video_stream_idx = -1; + for (unsigned int i = 0; i < format_ctx->nb_streams; i++) { + if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + video_stream_idx = i; + break; + } + } + + AVCodecContext *decoder_ctx = avcodec_alloc_context3(avcodec_find_decoder(format_ctx->streams[video_stream_idx]->codecpar->codec_id)); + avcodec_parameters_to_context(decoder_ctx, format_ctx->streams[video_stream_idx]->codecpar); + avcodec_open2(decoder_ctx, avcodec_find_decoder(format_ctx->streams[video_stream_idx]->codecpar->codec_id), nullptr); + + AVFrame *decoded_frame = av_frame_alloc(); + AVPacket *input_packet = av_packet_alloc(); + + uint32_t timestamp = 0; + const uint32_t timestamp_increment = 3000; // 90000 / 30 = 3000 + uint32_t frame_count = 0; // Counter for forcing keyframes + + while (m_running.load()) { + // Read frame from device + int read_result = av_read_frame(format_ctx, input_packet); + if (read_result < 0) { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"av_read_frame failed\",\"data\":{\"error\":" << read_result << ",\"is_screen\":" << (m_is_screen ? "true" : "false") << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + break; + } + + if (input_packet->stream_index == video_stream_idx) { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"Received input packet\",\"data\":{\"input_size\":" << input_packet->size << ",\"stream_index\":" << input_packet->stream_index << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + // Decode frame + int send_result = avcodec_send_packet(decoder_ctx, input_packet); + if (send_result == 0) { + int receive_result = avcodec_receive_frame(decoder_ctx, decoded_frame); + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"Decoder receive result\",\"data\":{\"receive_result\":" << receive_result << ",\"frame_width\":" << (receive_result == 0 ? decoded_frame->width : 0) << ",\"frame_height\":" << (receive_result == 0 ? decoded_frame->height : 0) << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + while (receive_result == 0) { + // Convert to YUV420P + if (sws_ctx) { + sws_scale(sws_ctx, + decoded_frame->data, decoded_frame->linesize, 0, decoded_frame->height, + frame->data, frame->linesize); + } + + // Force keyframe if requested + { + std::lock_guard lock(m_encoder_mutex); + if (m_force_keyframe.load()) { + frame->pict_type = AV_PICTURE_TYPE_I; + m_force_keyframe = false; + frame_count = 0; // Reset counter after forced keyframe + } else { + // For screen share, force keyframe every 15 frames (0.5 seconds) to ensure visibility + // This prevents empty P-frames when screen is static and allows viewers to join quickly + // For camera, use every 30 frames (1 second) to balance quality and latency + const uint32_t keyframe_interval = m_is_screen ? 15 : 30; + if (frame_count % keyframe_interval == 0) { + frame->pict_type = AV_PICTURE_TYPE_I; + } else { + frame->pict_type = AV_PICTURE_TYPE_NONE; + } + } + } + + // CRITICAL: Set PTS in codec time_base units (1/30) + // The encoder will convert this internally, but we need to track it + // for RTP timestamp calculation (which uses 90000 Hz clock) + frame->pts = timestamp; + timestamp += timestamp_increment; + frame_count++; + + // Encode frame + int encode_send_result = avcodec_send_frame(codec_ctx, frame); + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"Encoder send frame\",\"data\":{\"encode_send_result\":" << encode_send_result << ",\"frame_pts\":" << frame->pts << ",\"pict_type\":" << static_cast(frame->pict_type) << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + if (encode_send_result == 0) { + while (avcodec_receive_packet(codec_ctx, packet) == 0) { + // Emit signal with encoded packet + std::vector packet_data(packet->data, packet->data + packet->size); + + // CRITICAL: Convert packet PTS from codec time_base to RTP timestamp (90000 Hz) + // Codec time_base is 1/30, RTP video clock is 90000 Hz + // We use av_rescale_q to convert from codec time_base to RTP clock + const uint32_t rtp_timestamp = static_cast( + av_rescale_q(packet->pts, codec_ctx->time_base, AVRational{1, 90000}) + ); + + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"VideoCapture emitting packet\",\"data\":{\"packet_size\":" << packet_data.size() << ",\"packet_pts\":" << packet->pts << ",\"rtp_timestamp\":" << rtp_timestamp << ",\"is_screen\":" << (m_is_screen ? "true" : "false") << ",\"flags\":" << packet->flags << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + m_signal_packet.emit(packet_data, rtp_timestamp); + av_packet_unref(packet); + } + } + receive_result = avcodec_receive_frame(decoder_ctx, decoded_frame); + } + } else { + // #region agent log + { + std::ofstream log_file("/home/klepto/programacion/abaddon/.cursor/debug.log", std::ios::app); + log_file << "{\"sessionId\":\"debug-session\",\"runId\":\"run1\",\"hypothesisId\":\"A\",\"location\":\"capture.cpp:" << __LINE__ << "\",\"message\":\"Decoder send failed\",\"data\":{\"send_result\":" << send_result << "},\"timestamp\":" << std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() << "}\n"; + } + // #endregion + } + } + + av_packet_unref(input_packet); + } + + av_frame_free(&decoded_frame); + av_packet_free(&input_packet); + avcodec_free_context(&decoder_ctx); +} + +#endif diff --git a/src/video/capture.hpp b/src/video/capture.hpp new file mode 100644 index 00000000..2b56fd9a --- /dev/null +++ b/src/video/capture.hpp @@ -0,0 +1,67 @@ +#pragma once +#ifdef WITH_VIDEO + +#include +#include +#include +#include +#include + +class VideoCapture { +public: + VideoCapture(); + ~VideoCapture(); + + // Start capturing from camera + bool StartCameraCapture(const std::string &device = ""); + + // Start capturing from screen + // If geometry parameters are provided (all > 0), use them for FFmpeg + // Otherwise, use default/automatic detection + bool StartScreenCapture(int x = 0, int y = 0, int width = 0, int height = 0); + + // Stop capturing + void Stop(); + + // Force a keyframe (I-frame) on next frame + void ForceKeyframe(); + + // Check if capturing + bool IsCapturing() const { return m_running.load(); } + + // Signal emitted when encoded packet is ready + // Parameters: encoded packet data, timestamp (90000 Hz clock) + using type_signal_packet = sigc::signal, uint32_t>; + type_signal_packet signal_packet(); + +private: + void CaptureThread(); + bool InitializeEncoder(); + void Cleanup(); + std::string GetDefaultCameraDevice(); + std::string GetDefaultScreenDevice(); + + std::atomic m_running{false}; + std::atomic m_force_keyframe{false}; + std::thread m_capture_thread; + std::mutex m_encoder_mutex; + + struct AVFormatContext *m_format_ctx = nullptr; + struct AVCodecContext *m_codec_ctx = nullptr; + struct AVFrame *m_frame = nullptr; + struct AVPacket *m_packet = nullptr; + struct SwsContext *m_sws_ctx = nullptr; + + bool m_is_screen = false; + std::string m_device; + + // Screen geometry for FFmpeg + int m_screen_x = 0; + int m_screen_y = 0; + int m_screen_width = 0; + int m_screen_height = 0; + + type_signal_packet m_signal_packet; +}; + +#endif diff --git a/src/windows/voice/voicewindow.cpp b/src/windows/voice/voicewindow.cpp index a9e9682e..802cdfd7 100644 --- a/src/windows/voice/voicewindow.cpp +++ b/src/windows/voice/voicewindow.cpp @@ -8,9 +8,13 @@ #include "abaddon.hpp" #include "audio/manager.hpp" #include "components/lazyimage.hpp" +#include "dialogs/screensharedialog.hpp" #include "voicewindowaudiencelistentry.hpp" #include "voicewindowspeakerlistentry.hpp" #include "windows/voicesettingswindow.hpp" +#include +#include +#include // clang-format on @@ -23,6 +27,10 @@ VoiceWindow::VoiceWindow(Snowflake channel_id) , m_mix_mono("Mix Mono") , m_stage_command("Request to Speak") , m_disconnect("Disconnect") +#ifdef WITH_VIDEO + , m_camera_button("Start Camera") + , m_screen_share_button("Share Screen") +#endif , m_stage_invite_lbl("You've been invited to speak") , m_stage_accept("Accept") , m_stage_decline("Decline") @@ -54,6 +62,11 @@ VoiceWindow::VoiceWindow(Snowflake channel_id) m_mute.signal_toggled().connect(sigc::mem_fun(*this, &VoiceWindow::OnMuteChanged)); m_deafen.signal_toggled().connect(sigc::mem_fun(*this, &VoiceWindow::OnDeafenChanged)); +#ifdef WITH_VIDEO + m_camera_button.signal_clicked().connect(sigc::mem_fun(*this, &VoiceWindow::OnCameraClicked)); + m_screen_share_button.signal_clicked().connect(sigc::mem_fun(*this, &VoiceWindow::OnScreenShareClicked)); +#endif + m_scroll.set_policy(Gtk::POLICY_NEVER, Gtk::POLICY_AUTOMATIC); m_scroll.set_hexpand(true); m_scroll.set_vexpand(true); @@ -245,6 +258,10 @@ VoiceWindow::VoiceWindow(Snowflake channel_id) m_controls.add(m_mix_mono); m_buttons.set_halign(Gtk::ALIGN_CENTER); if (m_is_stage) m_buttons.pack_start(m_stage_command, false, true); +#ifdef WITH_VIDEO + m_buttons.pack_start(m_camera_button, false, true); + m_buttons.pack_start(m_screen_share_button, false, true); +#endif m_buttons.pack_start(m_disconnect, false, true); m_stage_invite_box.pack_start(m_stage_invite_lbl, false, true); m_stage_invite_box.pack_start(m_stage_invite_btns); @@ -313,6 +330,59 @@ void VoiceWindow::OnDeafenChanged() { m_signal_deafen.emit(m_deafen.get_active()); } +#ifdef WITH_VIDEO +void VoiceWindow::OnCameraClicked() { + auto &discord = Abaddon::Get().GetDiscordClient(); + spdlog::get("ui")->info("Camera button clicked, current state: {}", m_camera_active); + if (!m_camera_active) { + discord.StartCamera(); + m_camera_button.set_label("Stop Camera"); + m_camera_active = true; + } else { + discord.StopCamera(); + m_camera_button.set_label("Start Camera"); + m_camera_active = false; + } +} + +void VoiceWindow::OnScreenShareClicked() { + auto &discord = Abaddon::Get().GetDiscordClient(); + spdlog::get("ui")->info("Screen share button clicked, current state: {}", m_screen_share_active); + const auto channel = discord.GetChannel(m_channel_id); + if (!channel.has_value()) { + spdlog::get("ui")->warn("Channel not found for screen share"); + return; + } + + if (!m_screen_share_active) { + // Show dialog to select screen source + ScreenShareDialog dialog(*this); + int res = dialog.run(); + + if (res == Gtk::RESPONSE_OK) { + auto source = dialog.get_selected_source(); + if (source) { + Snowflake guild_id = 0; + if (channel->GuildID.has_value()) { + guild_id = *channel->GuildID; + } + // Pass geometry to StartScreenShare + discord.StartScreenShare(guild_id, m_channel_id, + source->x, source->y, + source->width, source->height); + m_screen_share_button.set_label("Stop Sharing"); + m_screen_share_active = true; + } + } + // If user cancelled, do nothing + } else { + discord.StopScreenShare(); + m_screen_share_button.set_label("Share Screen"); + m_screen_share_active = false; + } +} +#endif + void VoiceWindow::TryDeleteRow(Snowflake id) { if (auto it = m_rows.find(id); it != m_rows.end()) { delete it->second; diff --git a/src/windows/voice/voicewindow.hpp b/src/windows/voice/voicewindow.hpp index 05033d9b..767cfc71 100644 --- a/src/windows/voice/voicewindow.hpp +++ b/src/windows/voice/voicewindow.hpp @@ -38,6 +38,10 @@ class VoiceWindow : public Gtk::Window { void OnMuteChanged(); void OnDeafenChanged(); +#ifdef WITH_VIDEO + void OnCameraClicked(); + void OnScreenShareClicked(); +#endif void TryDeleteRow(Snowflake id); bool UpdateVoiceMeters(); @@ -70,6 +74,12 @@ class VoiceWindow : public Gtk::Window { Gtk::HBox m_buttons; Gtk::Button m_disconnect; Gtk::Button m_stage_command; +#ifdef WITH_VIDEO + Gtk::Button m_camera_button; + Gtk::Button m_screen_share_button; + bool m_camera_active = false; + bool m_screen_share_active = false; +#endif Gtk::VBox m_stage_invite_box; Gtk::Label m_stage_invite_lbl;