diff --git a/cc2520.h b/cc2520.h index 304ba86..122342a 100644 --- a/cc2520.h +++ b/cc2520.h @@ -29,8 +29,8 @@ #define CC2520_GPIO_5 -1 #define CC2520_RESET 17 -#define CC2520_DEBUG_0 21 -#define CC2520_DEBUG_1 18 +#define CC2520_DEBUG_0 4 +#define CC2520_DEBUG_1 3 // Logical mapping of CC2520 GPIO pins to // functions, we're going to keep these static diff --git a/csma.c b/csma.c index 425ce80..b0d59b3 100644 --- a/csma.c +++ b/csma.c @@ -3,6 +3,7 @@ #include #include #include +#include #include "csma.h" #include "cc2520.h" @@ -22,6 +23,9 @@ static u8 cur_tx_len; static spinlock_t state_sl; +static struct workqueue_struct *wq; +static struct work_struct work; + enum cc2520_csma_state_enum { CC2520_CSMA_IDLE, CC2520_CSMA_TX, @@ -30,12 +34,15 @@ enum cc2520_csma_state_enum { static int csma_state; +static unsigned long flags; + static int cc2520_csma_tx(u8 * buf, u8 len); static void cc2520_csma_tx_done(u8 status); static void cc2520_csma_rx_done(u8 *buf, u8 len); static enum hrtimer_restart cc2520_csma_timer_cb(struct hrtimer *timer); static void cc2520_csma_start_timer(int us_period); static int cc2520_csma_get_backoff(int min, int max); +static void cc2520_csma_wq(struct work_struct *work); int cc2520_csma_init() { @@ -55,6 +62,11 @@ int cc2520_csma_init() goto error; } + wq = alloc_workqueue("csma_wq", WQ_HIGHPRI, 128); + if (!wq) { + goto error; + } + hrtimer_init(&backoff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); backoff_timer.function = &cc2520_csma_timer_cb; @@ -66,6 +78,10 @@ int cc2520_csma_init() cur_tx_buf = NULL; } + if (wq) { + destroy_workqueue(wq); + } + return -EFAULT; } @@ -76,6 +92,10 @@ void cc2520_csma_free() cur_tx_buf = NULL; } + if (wq) { + destroy_workqueue(wq); + } + hrtimer_cancel(&backoff_timer); } @@ -101,56 +121,67 @@ static enum hrtimer_restart cc2520_csma_timer_cb(struct hrtimer *timer) ktime_t kt; int new_backoff; - //printk(KERN_INFO "[cc2520] - csma timer fired. \n"); if (cc2520_radio_is_clear()) { - //printk(KERN_INFO "[cc2520] - channel clear, sending.\n"); - csma_bottom->tx(cur_tx_buf, cur_tx_len); + // NOTE: We can absolutely not send from + // interrupt context, there's a few places + // where we spin lock and assume we can be + // preempted. If we're running in atomic mode + // that promise is broken. We use a work queue. + + // The workqueue adds about 30uS of latency. + INIT_WORK(&work, cc2520_csma_wq); + queue_work(wq, &work); return HRTIMER_NORESTART; } else { - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); if (csma_state == CC2520_CSMA_TX) { csma_state = CC2520_CSMA_CONG; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); new_backoff = cc2520_csma_get_backoff(backoff_min, backoff_max_cong); INFO((KERN_INFO "[cc2520] - channel still busy, waiting %d uS\n", new_backoff)); - kt=ktime_set(0,1000 * new_backoff); + kt = ktime_set(0,1000 * new_backoff); hrtimer_forward_now(&backoff_timer, kt); return HRTIMER_RESTART; } else { csma_state = CC2520_CSMA_IDLE; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); - INFO((KERN_INFO "[cc2520] - csma/ca: channel busy. aborting tx\n")); csma_top->tx_done(-CC2520_TX_BUSY); return HRTIMER_NORESTART; } } } +static void cc2520_csma_wq(struct work_struct *work) +{ + csma_bottom->tx(cur_tx_buf, cur_tx_len); +} + static int cc2520_csma_tx(u8 * buf, u8 len) { int backoff; - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); if (csma_state == CC2520_CSMA_IDLE) { csma_state = CC2520_CSMA_TX; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); memcpy(cur_tx_buf, buf, len); cur_tx_len = len; backoff = cc2520_csma_get_backoff(backoff_min, backoff_max_init); - //printk(KERN_INFO "[cc2520] - waiting %d uS to send.\n", backoff); + DBG((KERN_INFO "[cc2520] - waiting %d uS to send.\n", backoff)); cc2520_csma_start_timer(backoff); } else { - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); + DBG((KERN_INFO "[cc2520] - csma layer busy.\n")); csma_top->tx_done(-CC2520_TX_BUSY); } @@ -159,10 +190,10 @@ static int cc2520_csma_tx(u8 * buf, u8 len) static void cc2520_csma_tx_done(u8 status) { - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); csma_state = CC2520_CSMA_IDLE; - spin_unlock(&state_sl); - //printk(KERN_INFO "[cc2520] - tx done and successful.\n"); + spin_unlock_irqrestore(&state_sl, flags); + csma_top->tx_done(status); } diff --git a/interface.c b/interface.c index ec76489..afcfa9f 100644 --- a/interface.c +++ b/interface.c @@ -106,15 +106,11 @@ static ssize_t interface_write( // the form of a semaphore. interface_bottom->tx(tx_buf_c, pkt_len); down(&tx_done_sem); - //if (result) { - // return -ERESTARTSYS; - //} // Step 4: Finally return and allow other callers to write // packets. DBG((KERN_INFO "[cc2520] - wrote %d bytes.\n", pkt_len)); up(&tx_sem); - return tx_result ? tx_result : pkt_len; error: diff --git a/lpl.c b/lpl.c index e376323..4529916 100644 --- a/lpl.c +++ b/lpl.c @@ -27,6 +27,8 @@ static u8 cur_tx_len; static spinlock_t state_sl; +static unsigned long flags; + enum cc2520_lpl_state_enum { CC2520_LPL_IDLE, CC2520_LPL_TX, @@ -80,10 +82,10 @@ void cc2520_lpl_free() static int cc2520_lpl_tx(u8 * buf, u8 len) { if (lpl_enabled) { - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); if (lpl_state == CC2520_LPL_IDLE) { lpl_state = CC2520_LPL_TX; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); memcpy(cur_tx_buf, buf, len); cur_tx_len = len; @@ -92,7 +94,8 @@ static int cc2520_lpl_tx(u8 * buf, u8 len) cc2520_lpl_start_timer(); } else { - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); + INFO(("[cc2520] - lpl tx busy.\n")); lpl_top->tx_done(-CC2520_TX_BUSY); } @@ -106,22 +109,22 @@ static int cc2520_lpl_tx(u8 * buf, u8 len) static void cc2520_lpl_tx_done(u8 status) { if (lpl_enabled) { - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); if (cc2520_packet_requires_ack_wait(cur_tx_buf)) { if (status == CC2520_TX_SUCCESS) { lpl_state = CC2520_LPL_IDLE; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); hrtimer_cancel(&lpl_timer); lpl_top->tx_done(status); } else if (lpl_state == CC2520_LPL_TIMER_EXPIRED) { lpl_state = CC2520_LPL_IDLE; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); lpl_top->tx_done(-CC2520_TX_FAILED); } else { - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); //printk(KERN_INFO "[cc2520] - lpl retransmit.\n"); lpl_bottom->tx(cur_tx_buf, cur_tx_len); } @@ -129,11 +132,11 @@ static void cc2520_lpl_tx_done(u8 status) else { if (lpl_state == CC2520_LPL_TIMER_EXPIRED) { lpl_state = CC2520_LPL_IDLE; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); lpl_top->tx_done(CC2520_TX_SUCCESS); } else { - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); lpl_bottom->tx(cur_tx_buf, cur_tx_len); } } @@ -161,13 +164,13 @@ static void cc2520_lpl_start_timer() static enum hrtimer_restart cc2520_lpl_timer_cb(struct hrtimer *timer) { - spin_lock(&state_sl); + spin_lock_irqsave(&state_sl, flags); if (lpl_state == CC2520_LPL_TX) { lpl_state = CC2520_LPL_TIMER_EXPIRED; - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); } else { - spin_unlock(&state_sl); + spin_unlock_irqrestore(&state_sl, flags); INFO((KERN_INFO "[cc2520] - lpl timer in improbable state.\n")); } diff --git a/platform.c b/platform.c index e82a38a..309d8a1 100644 --- a/platform.c +++ b/platform.c @@ -151,7 +151,7 @@ static irqreturn_t cc2520_sfd_handler(int irq, void *dev_id) nanos = timespec_to_ns(&ts); gpio_val = gpio_get_value(CC2520_SFD); - DBG((KERN_INFO "[cc2520] - sfd interrupt occurred at %lld, %d\n", (long long int)nanos, gpio_val)); + //DBG((KERN_INFO "[cc2520] - sfd interrupt occurred at %lld, %d\n", (long long int)nanos, gpio_val)); cc2520_radio_sfd_occurred(nanos, gpio_val); return IRQ_HANDLED; @@ -207,7 +207,6 @@ int cc2520_plat_gpio_init() goto fail; gpio_set_value(CC2520_DEBUG_0, 0); - gpio_set_value(CC2520_DEBUG_1, 0); // Setup FIFOP Interrupt irq = gpio_to_irq(CC2520_FIFOP); diff --git a/radio.c b/radio.c index 7af5336..a5105d6 100644 --- a/radio.c +++ b/radio.c @@ -54,6 +54,9 @@ static spinlock_t rx_buf_sl; static int radio_state; +static unsigned long flags; +static unsigned long flags1; + enum cc2520_radio_state_enum { CC2520_RADIO_STATE_IDLE, CC2520_RADIO_STATE_TX, @@ -91,53 +94,53 @@ struct cc2520_interface *radio_top; void cc2520_radio_lock(int state) { - spin_lock(&radio_sl); + spin_lock_irqsave(&radio_sl, flags1); while (radio_state != CC2520_RADIO_STATE_IDLE) { - spin_unlock(&radio_sl); - spin_lock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); + spin_lock_irqsave(&radio_sl, flags1); } radio_state = state; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); } void cc2520_radio_unlock(void) { - spin_lock(&radio_sl); + spin_lock_irqsave(&radio_sl, flags1); radio_state = CC2520_RADIO_STATE_IDLE; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); } int cc2520_radio_tx_unlock_spi(void) { - spin_lock(&radio_sl); + spin_lock_irqsave(&radio_sl, flags1); if (radio_state == CC2520_RADIO_STATE_TX) { radio_state = CC2520_RADIO_STATE_TX_SPI_DONE; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 0; } else if (radio_state == CC2520_RADIO_STATE_TX_SFD_DONE) { radio_state = CC2520_RADIO_STATE_TX_2_RX; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 1; } - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 0; } int cc2520_radio_tx_unlock_sfd(void) { - spin_lock(&radio_sl); + spin_lock_irqsave(&radio_sl, flags1); if (radio_state == CC2520_RADIO_STATE_TX) { radio_state = CC2520_RADIO_STATE_TX_SFD_DONE; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 0; } else if (radio_state == CC2520_RADIO_STATE_TX_SPI_DONE) { radio_state = CC2520_RADIO_STATE_TX_2_RX; - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 1; } - spin_unlock(&radio_sl); + spin_unlock_irqrestore(&radio_sl, flags1); return 0; } @@ -389,14 +392,14 @@ void cc2520_radio_sfd_occurred(u64 nano_timestamp, u8 is_high) void cc2520_radio_fifop_occurred() { - spin_lock(&pending_rx_sl); + spin_lock_irqsave(&pending_rx_sl, flags);; - if (pending_rx > 0) { - spin_unlock(&pending_rx_sl); + if (pending_rx) { + spin_unlock_irqrestore(&pending_rx_sl, flags);; } else { pending_rx = true; - spin_unlock(&pending_rx_sl); + spin_unlock_irqrestore(&pending_rx_sl, flags);; cc2520_radio_beginRx(); } } @@ -642,27 +645,26 @@ static void cc2520_radio_flushRx() INFO((KERN_INFO "[cc2520] - oversized packet received. clearing.\n")); - tsfer1.tx_buf = tx_buf; - tsfer1.rx_buf = rx_buf; - tsfer1.len = 0; - tsfer1.cs_change = 1; - tx_buf[tsfer1.len++] = CC2520_CMD_SFLUSHRX; + //tsfer1.tx_buf = tx_buf; + //tsfer1.rx_buf = rx_buf; + rx_tsfer.len = 0; + rx_tsfer.cs_change = 1; + rx_out_buf[rx_tsfer.len++] = CC2520_CMD_SFLUSHRX; - spi_message_init(&msg); - msg.complete = cc2520_radio_completeFlushRx; - msg.context = NULL; + spi_message_init(&rx_msg); + rx_msg.complete = cc2520_radio_completeFlushRx; + rx_msg.context = NULL; - spi_message_add_tail(&tsfer1, &msg); + spi_message_add_tail(&rx_tsfer, &rx_msg); - status = spi_async(state.spi_device, &msg); + status = spi_async(state.spi_device, &rx_msg); } static void cc2520_radio_completeFlushRx(void *arg) { - - spin_lock(&pending_rx_sl); + spin_lock_irqsave(&pending_rx_sl, flags); pending_rx = false; - spin_unlock(&pending_rx_sl); + spin_unlock_irqrestore(&pending_rx_sl, flags); } static void cc2520_radio_finishRx(void *arg) @@ -687,16 +689,20 @@ static void cc2520_radio_finishRx(void *arg) // Pass length of entire buffer to // upper layers. radio_top->rx_done(rx_buf_r, len + 1); - spin_unlock(&rx_buf_sl); // Allow for subsequent FIFOP - spin_lock(&pending_rx_sl); + spin_lock_irqsave(&pending_rx_sl, flags); pending_rx = false; - spin_unlock(&pending_rx_sl); + spin_unlock_irqrestore(&pending_rx_sl, flags); DBG((KERN_INFO "[cc2520] - Read %d bytes from radio.\n", len)); } +void cc2520_radio_release_rx() +{ + spin_unlock(&rx_buf_sl); +} + ////////////////////////////// // Helper Routines ///////////////////////////// diff --git a/radio.h b/radio.h index 3817a53..05a70f2 100644 --- a/radio.h +++ b/radio.h @@ -18,6 +18,7 @@ void cc2520_radio_set_channel(int channel); void cc2520_radio_set_address(u16 short_addr, u64 extended_addr, u16 pan_id); void cc2520_radio_set_txpower(u8 power); +void cc2520_radio_release_rx(void); bool cc2520_radio_is_clear(void); // Radio Interrupt Callbacks diff --git a/sack.c b/sack.c index 59eb1ac..71b18b2 100644 --- a/sack.c +++ b/sack.c @@ -7,6 +7,8 @@ #include "cc2520.h" #include "packet.h" +#include "radio.h" + struct cc2520_interface *sack_top; struct cc2520_interface *sack_bottom; @@ -33,11 +35,17 @@ static void cc2520_sack_start_timer(void); static u8 *ack_buf; static u8 *cur_tx_buf; + +static u8 *cur_rx_buf; +static u8 cur_rx_buf_len; + static struct hrtimer timeout_timer; static int ack_timeout; //in microseconds static int sack_state; static spinlock_t sack_sl; +static unsigned long flags; + enum cc2520_sack_state_enum { CC2520_SACK_IDLE, CC2520_SACK_TX, // Waiting for a tx to complete @@ -61,6 +69,11 @@ int cc2520_sack_init() goto error; } + cur_rx_buf = kmalloc(PKT_BUFF_SIZE, GFP_KERNEL); + if (!cur_rx_buf) { + goto error; + } + hrtimer_init(&timeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); timeout_timer.function = &cc2520_sack_timer_cb; @@ -112,43 +125,42 @@ static void cc2520_sack_start_timer() static int cc2520_sack_tx(u8 * buf, u8 len) { - spin_lock(&sack_sl); - + spin_lock_irqsave(&sack_sl, flags); + if (sack_state != CC2520_SACK_IDLE) { INFO((KERN_INFO "[cc2520] - Ut oh! Tx spinlocking.\n")); } while (sack_state != CC2520_SACK_IDLE) { - spin_unlock(&sack_sl); - spin_lock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); + spin_lock_irqsave(&sack_sl, flags); } sack_state = CC2520_SACK_TX; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); memcpy(cur_tx_buf, buf, len); - return sack_bottom->tx(cur_tx_buf, len); } static void cc2520_sack_tx_done(u8 status) { - spin_lock(&sack_sl); + spin_lock_irqsave(&sack_sl, flags); if (sack_state == CC2520_SACK_TX) { if (cc2520_packet_requires_ack_wait(cur_tx_buf)) { DBG((KERN_INFO "[cc2520] - Entering TX wait state.\n")); sack_state = CC2520_SACK_TX_WAIT; cc2520_sack_start_timer(); - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); } else { sack_state = CC2520_SACK_IDLE; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); sack_top->tx_done(status); } } else if (sack_state == CC2520_SACK_TX_ACK) { sack_state = CC2520_SACK_IDLE; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); } else { ERR((KERN_ALERT "[cc2520] - ERROR: tx_done state engine in impossible state.\n")); @@ -159,56 +171,60 @@ static void cc2520_sack_rx_done(u8 *buf, u8 len) { // if this packet we just received requires // an ACK, trasmit it. - spin_lock(&sack_sl); + memcpy(cur_rx_buf, buf, len); + cur_rx_buf_len = len; + cc2520_radio_release_rx(); + + spin_lock_irqsave(&sack_sl, flags); - if (cc2520_packet_is_ack(buf)) { + if (cc2520_packet_is_ack(cur_rx_buf)) { if (sack_state == CC2520_SACK_TX_WAIT && - cc2520_packet_is_ack_to(buf, cur_tx_buf)) { + cc2520_packet_is_ack_to(cur_rx_buf, cur_tx_buf)) { sack_state = CC2520_SACK_IDLE; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); hrtimer_cancel(&timeout_timer); sack_top->tx_done(CC2520_TX_SUCCESS); } else { - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); INFO((KERN_INFO "[cc2520] - stray ack received.\n")); } } else { - if (cc2520_packet_requires_ack_reply(buf)) { + if (cc2520_packet_requires_ack_reply(cur_rx_buf)) { if (sack_state == CC2520_SACK_IDLE) { - cc2520_packet_create_ack(buf, ack_buf); + cc2520_packet_create_ack(cur_rx_buf, ack_buf); sack_state = CC2520_SACK_TX_ACK; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); sack_bottom->tx(ack_buf, IEEE154_ACK_FRAME_LENGTH + 1); - sack_top->rx_done(buf, len); + sack_top->rx_done(cur_rx_buf, cur_rx_buf_len); } else { - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); INFO((KERN_INFO "[cc2520] - ACK skipped, soft-ack layer busy. %d \n", sack_state)); } } else { - spin_unlock(&sack_sl); - sack_top->rx_done(buf, len); + spin_unlock_irqrestore(&sack_sl, flags); + sack_top->rx_done(cur_rx_buf, cur_rx_buf_len); } } } static enum hrtimer_restart cc2520_sack_timer_cb(struct hrtimer *timer) { - spin_lock(&sack_sl); + spin_lock_irqsave(&sack_sl, flags); if (sack_state == CC2520_SACK_TX_WAIT) { DBG((KERN_INFO "[cc2520] - tx ack timeout exceeded.\n")); sack_state = CC2520_SACK_IDLE; - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); sack_top->tx_done(-CC2520_TX_ACK_TIMEOUT); } else { - spin_unlock(&sack_sl); + spin_unlock_irqrestore(&sack_sl, flags); } return HRTIMER_NORESTART;