diff --git a/samples/bluetooth/peripheral_identity/prj.conf b/samples/bluetooth/peripheral_identity/prj.conf index 8bd97851e368..d7847c0f7a32 100644 --- a/samples/bluetooth/peripheral_identity/prj.conf +++ b/samples/bluetooth/peripheral_identity/prj.conf @@ -6,8 +6,8 @@ CONFIG_BT_PRIVACY=y CONFIG_BT_DEVICE_NAME="Zephyr Peripheral" CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS=n -CONFIG_BT_MAX_CONN=62 -CONFIG_BT_ID_MAX=62 +CONFIG_BT_MAX_CONN=61 +CONFIG_BT_ID_MAX=61 # CONFIG_BT_SMP=y # CONFIG_BT_MAX_PAIRED=62 diff --git a/subsys/bluetooth/host/Kconfig b/subsys/bluetooth/host/Kconfig index e4a381e9b05a..228a4ce58302 100644 --- a/subsys/bluetooth/host/Kconfig +++ b/subsys/bluetooth/host/Kconfig @@ -133,6 +133,26 @@ config BT_DRIVER_RX_HIGH_PRIO int default 6 +config BT_TX_PROCESSOR_THREAD + # This thread is used to send pending HCI Commands, ACL and ISO data to + # Controller. + bool + # This option is automatically selected for all platforms except nRF51 + # due to limited RAM on nRF51 devices. + default y if !SOC_SERIES_NRF51X + +if BT_TX_PROCESSOR_THREAD + +config BT_TX_PROCESSOR_THREAD_PRIO + int + default SYSTEM_WORKQUEUE_PRIORITY + +config BT_TX_PROCESSOR_STACK_SIZE + int + default 1024 + +endif + config BT_CONN_TX_NOTIFY_WQ bool "Use a separate workqueue for connection TX notify processing [EXPERIMENTAL]" depends on BT_CONN_TX diff --git a/subsys/bluetooth/host/att.c b/subsys/bluetooth/host/att.c index 644952f55257..1d6ded311174 100644 --- a/subsys/bluetooth/host/att.c +++ b/subsys/bluetooth/host/att.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -248,10 +249,37 @@ const char *bt_att_err_to_str(uint8_t att_err) } #endif /* CONFIG_BT_ATT_ERR_TO_STR */ -static void att_tx_destroy(struct net_buf *buf) +static void att_tx_destroy_work_handler(struct k_work *work); +static K_WORK_DEFINE(att_tx_destroy_work, att_tx_destroy_work_handler); +static struct k_spinlock tx_destroy_queue_lock; +static sys_slist_t tx_destroy_queue = SYS_SLIST_STATIC_INIT(&tx_destroy_queue); + +static void att_tx_destroy_work_handler(struct k_work *work) { - struct bt_att_tx_meta_data *p_meta = att_get_tx_meta_data(buf); + struct net_buf *buf; + struct bt_att_tx_meta_data *p_meta; struct bt_att_tx_meta_data meta; + sys_snode_t *buf_node; + k_spinlock_key_t key; + bool resubmit; + + key = k_spin_lock(&tx_destroy_queue_lock); + buf_node = sys_slist_get(&tx_destroy_queue); + /* If there are more items in the queue, those likely have + * been there before this handler started running and + * coalesced into a single work submission, so we need to + * resubmit. + */ + resubmit = !sys_slist_is_empty(&tx_destroy_queue); + k_spin_unlock(&tx_destroy_queue_lock, key); + + /* Spurious wakeups can occur in with some thread interleavings. */ + if (buf_node == NULL) { + return; + } + + buf = CONTAINER_OF(buf_node, struct net_buf, node); + p_meta = att_get_tx_meta_data(buf); LOG_DBG("%p", buf); @@ -266,8 +294,8 @@ static void att_tx_destroy(struct net_buf *buf) */ memset(p_meta, 0x00, sizeof(*p_meta)); - /* After this point, p_meta doesn't belong to us. - * The user data will be memset to 0 on allocation. + /* After this point, p_meta doesn't belong to us. The user data will + * be memset to 0 on allocation. */ net_buf_destroy(buf); @@ -278,6 +306,51 @@ static void att_tx_destroy(struct net_buf *buf) if (meta.opcode != 0) { att_on_sent_cb(&meta); } + + /* We resubmit this work instead of looping to allow other work on + * the work queue to run. + */ + if (resubmit) { + int err = k_work_submit_to_queue(NULL, work); + + if (err < 0) { + LOG_ERR("Failed to re-submit %s: %d", __func__, err); + k_oops(); + } + } +} + +static void att_tx_destroy(struct net_buf *buf) +{ + int err; + k_spinlock_key_t key; + + /* We need to invoke att_on_sent_cb, which may block. We + * don't want to block in a net buf destroy callback, so we + * defer to a sensible workqueue. + * + * bt_workq cannot be used because it currently forms a + * deadlock with att_pool: bt_workq -> bt_att_recv -> + * send_err_rsp waits for att pool. + * + * We use the system work queue to preserve earlier + * behavior. The tx_processor used to run on the system work + * queue, and it could end up here: tx_processor -> + * bt_hci_send -> net_buf_unref. + * + * A possible alternative is tx_notify_workqueue_get() since + * this workqueue is processing similar "completion" events. + */ + key = k_spin_lock(&tx_destroy_queue_lock); + sys_slist_append(&tx_destroy_queue, &buf->node); + k_spin_unlock(&tx_destroy_queue_lock, key); + + err = k_work_submit(&att_tx_destroy_work); + if (err < 0) { + LOG_ERR("Failed to submit att_tx_destroy_work: %d", err); + k_oops(); + } + /* Continues in att_tx_destroy_work_handler() */ } NET_BUF_POOL_DEFINE(att_pool, CONFIG_BT_ATT_TX_COUNT, diff --git a/subsys/bluetooth/host/hci_core.c b/subsys/bluetooth/host/hci_core.c index 66ba610bee2b..2d79870e3a0f 100644 --- a/subsys/bluetooth/host/hci_core.c +++ b/subsys/bluetooth/host/hci_core.c @@ -472,10 +472,11 @@ int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf, /* TODO: disallow sending sync commands from syswq altogether */ - /* Since the commands are now processed in the syswq, we cannot suspend - * and wait. We have to send the command from the current context. + /* If the commands are processed in the syswq and we are on the + * syswq, then we cannot suspend and wait. We have to send the + * command from the current context. */ - if (k_current_get() == &k_sys_work_q.thread) { + if (!IS_ENABLED(CONFIG_BT_TX_PROCESSOR_THREAD) && k_current_get() == &k_sys_work_q.thread) { /* drain the command queue until we get to send the command of interest. */ struct net_buf *cmd = NULL; @@ -5098,24 +5099,99 @@ static bool process_pending_cmd(k_timeout_t timeout) static void tx_processor(struct k_work *item) { LOG_DBG("TX process start"); + + /* Historically, the code in process_pending_cmd() and + * bt_conn_tx_processor() has been invoked only from + * cooperative threads. For now, we assume their + * implementations rely on this and ensure the current + * thread is cooperative. + */ + k_sched_lock(); + if (process_pending_cmd(K_NO_WAIT)) { /* If we processed a command, let the scheduler run before * processing another command (or data). */ bt_tx_irq_raise(); - return; + goto exit; } /* Hand over control to conn to process pending data */ if (IS_ENABLED(CONFIG_BT_CONN_TX)) { bt_conn_tx_processor(); } + +exit: + k_sched_unlock(); } +/** + * This work item shall never be cancelled. + */ static K_WORK_DEFINE(tx_work, tx_processor); +#if defined(CONFIG_BT_TX_PROCESSOR_THREAD) +static K_THREAD_STACK_DEFINE(bt_tx_processor_stack, CONFIG_BT_TX_PROCESSOR_STACK_SIZE); + +/** + * This work queue shall never be stopped, drained or plugged. + */ +static struct k_work_q bt_tx_processor_workq; + +static int bt_tx_processor_init(void) +{ + struct k_work_queue_config cfg = {}; + + if (IS_ENABLED(CONFIG_THREAD_NAME)) { + cfg.name = "bt_tx_processor"; + } + + k_work_queue_start(&bt_tx_processor_workq, bt_tx_processor_stack, + K_THREAD_STACK_SIZEOF(bt_tx_processor_stack), + CONFIG_BT_TX_PROCESSOR_THREAD_PRIO, &cfg); + + return 0; +} + +/* Priority 999 is the last to run in POST_KERNEL. We don't actually + * care when it runs, so long as it's before APPLICATION, when + * `bt_enable()` can be called. Running it last will allow more urgent + * initializations competing for CPU time to complete first. + */ +SYS_INIT(bt_tx_processor_init, POST_KERNEL, 999); +#endif /* CONFIG_BT_TX_PROCESSOR_THREAD */ + +/** + * This function shall not be called before init level APPLICATION. + */ void bt_tx_irq_raise(void) { + int __maybe_unused err; LOG_DBG("kick TX"); +#if defined(CONFIG_BT_TX_PROCESSOR_THREAD) + err = k_work_submit_to_queue(&bt_tx_processor_workq, &tx_work); + __ASSERT(err >= 0, "%d", err); + /* Assertions: + * + * EBUSY shall not occur because `bt_tx_processor_workq` shall + * never be draining or plugged, and `tx_work` shall never be + * cancelled. + * + * EINVAL is not possible because taking address of variable + * cannot result in the null pointer. + * + * ENODEV shall not occur because `bt_tx_processor_workq` shall + * never be stopped, is started before init level APPLICATION, + * and this function shall not be called before init level + * APPLICATION. + * + * The above is an exhaustive list of the API errors. + * + * Defensive coding: If any error occurs and asserts are + * disabled, the program will recover if bt_tx_irq_raise is + * called again and is successful. No cleanup is needed. + */ +#else k_work_submit(&tx_work); +#endif }