clean up dcd lpc11u

This commit is contained in:
hathach 2018-12-03 12:31:03 +07:00
parent 57f7c18d77
commit f28a15a886
1 changed files with 79 additions and 213 deletions

View File

@ -40,9 +40,6 @@
#if TUSB_OPT_DEVICE_ENABLED && (CFG_TUSB_MCU == OPT_MCU_LPC11UXX || CFG_TUSB_MCU == OPT_MCU_LPC13XX)
// NOTE: despite of being very the same to lpc13uxx controller, lpc11u's controller cannot queue transfer more than
// endpoint's max packet size and need some soft DMA helper
#include "chip.h"
#include "device/dcd.h"
#include "dcd_lpc11_13_15.h"
@ -57,6 +54,8 @@
// only SRAM1 & USB RAM can be used for transfer
#define SRAM_REGION 0x20000000
// NOTE: despite of being very the same to lpc13uxx controller, lpc11u's controller cannot queue transfer more than
// endpoint's max packet size and need some soft DMA helper
enum {
DCD_11U_13U_MAX_BYTE_PER_TD = (CFG_TUSB_MCU == OPT_MCU_LPC11UXX ? 64 : 1023)
};
@ -95,35 +94,22 @@ typedef struct ATTR_PACKED
TU_VERIFY_STATIC( sizeof(ep_cmd_sts_t) == 4, "size is not correct" );
// NOTE data will be transferred as soon as dcd get request by dcd_pipe(_queue)_xfer using double buffering.
// If there is another dcd_edpt_xfer request, the new request will be saved and executed when the first is done.
// next_td stored the 2nd request information
// current_td is used to keep track of number of remaining & xferred bytes of the current request.
// queued_bytes_in_buff keep track of number of bytes queued to each buffer (in case of short packet)
typedef struct {
ep_cmd_sts_t qhd[EP_COUNT][2]; ///< 256 byte aligned, 2 for double buffer
uint16_t expected_len[EP_COUNT];
// start at 80, the size should not exceed 48 (for setup_request align at 128)
struct {
uint16_t buff_addr_offset;
uint16_t total_bytes;
}next_td[EP_COUNT];
uint32_t current_ioc; ///< interrupt on complete mask for current TD
uint32_t next_ioc; ///< interrupt on complete mask for next TD
// must start from 128
ATTR_ALIGNED(64) uint8_t setup_packet[8];
typedef struct
{
// 256 byte aligned, 2 for double buffer (not used)
// Each cmd_sts can only transfer up to 1023 bytes each pass
//
ep_cmd_sts_t ep[EP_COUNT][2];
struct {
uint16_t remaining_bytes; ///< expected bytes of the queued transfer
uint16_t xferred_total; ///< xferred bytes of the current transfer
uint16_t xferred_bytes; ///< xferred bytes of the current transfer
uint16_t queued_bytes_in_buff[2]; ///< expected bytes that are queued for each buffer
uint16_t nbytes; // Set nbytes, to determine transferred bytes each pass
}current_td[EP_COUNT];
ATTR_ALIGNED(64) uint8_t setup_packet[8];
}dcd_data_t;
//--------------------------------------------------------------------+
@ -140,9 +126,10 @@ static inline uint16_t addr_offset(void const * buffer)
return ( (addr >> 6) & 0xFFFFUL ) ;
}
static void queue_xfer_to_buffer(uint8_t ep_id, uint8_t buff_idx, uint16_t buff_addr_offset, uint16_t total_bytes);
static void pipe_queue_xfer(uint8_t ep_id, uint16_t buff_addr_offset, uint16_t total_bytes);
static void queue_xfer_in_next_td(uint8_t ep_id);
static inline uint8_t ep_addr2id(uint8_t endpoint_addr)
{
return 2*(endpoint_addr & 0x0F) + ((endpoint_addr & TUSB_DIR_IN_MASK) ? 1 : 0);
}
//--------------------------------------------------------------------+
// CONTROLLER API
@ -191,7 +178,7 @@ bool dcd_init(uint8_t rhport)
// Setup PLL clock, and power
Chip_USB_Init();
LPC_USB->EPLISTSTART = (uint32_t) _dcd.qhd;
LPC_USB->EPLISTSTART = (uint32_t) _dcd.ep;
LPC_USB->DATABUFSTART = SRAM_REGION;
LPC_USB->INTSTAT = LPC_USB->INTSTAT; // clear all pending interrupt
@ -205,60 +192,38 @@ bool dcd_init(uint8_t rhport)
}
//--------------------------------------------------------------------+
// PIPE HELPER
//--------------------------------------------------------------------+
static inline uint8_t edpt_addr2phy(uint8_t endpoint_addr)
{
return 2*(endpoint_addr & 0x0F) + ((endpoint_addr & TUSB_DIR_IN_MASK) ? 1 : 0);
}
#if 0
static inline uint8_t edpt_phy2log(uint8_t physical_endpoint) ATTR_CONST ATTR_ALWAYS_INLINE;
static inline uint8_t edpt_phy2log(uint8_t physical_endpoint)
{
return physical_endpoint/2;
}
#endif
//--------------------------------------------------------------------+
// BULK/INTERRUPT/ISOCHRONOUS PIPE API
// DCD Endpoint Port
//--------------------------------------------------------------------+
void dcd_edpt_stall(uint8_t rhport, uint8_t ep_addr)
{
if ( edpt_number(ep_addr) == 0 )
{
// TODO cannot able to STALL Control OUT endpoint !!!!! FIXME try some walk-around
_dcd.qhd[0][0].stall = _dcd.qhd[1][0].stall = 1;
_dcd.ep[0][0].stall = _dcd.ep[1][0].stall = 1;
}
else
{
uint8_t const ep_id = edpt_addr2phy(edpt_addr);
_dcd.qhd[ep_id][0].stall = _dcd.qhd[ep_id][1].stall = 1;
uint8_t const ep_id = ep_addr2id(ep_addr);
_dcd.ep[ep_id][0].stall = _dcd.ep[ep_id][1].stall = 1;
}
}
bool dcd_edpt_stalled(uint8_t rhport, uint8_t ep_addr)
{
uint8_t const ep_id = edpt_addr2phy(edpt_addr);
return _dcd.qhd[ep_id][0].stall || _dcd.qhd[ep_id][1].stall;
uint8_t const ep_id = ep_addr2id(ep_addr);
return _dcd.ep[ep_id][0].stall || _dcd.ep[ep_id][1].stall;
}
void dcd_edpt_clear_stall(uint8_t rhport, uint8_t edpt_addr)
{
uint8_t const ep_id = edpt_addr2phy(edpt_addr);
uint8_t const ep_id = ep_addr2id(edpt_addr);
// uint8_t active_buffer = BIT_TEST_(LPC_USB->EPINUSE, ep_id) ? 1 : 0;
_dcd.qhd[ep_id][0].stall = _dcd.qhd[ep_id][1].stall = 0;
_dcd.ep[ep_id][0].stall = _dcd.ep[ep_id][1].stall = 0;
// since the next transfer always take place on buffer0 --> clear buffer0 toggle
_dcd.qhd[ep_id][0].toggle_reset = 1;
_dcd.qhd[ep_id][0].toggle_mode = 0;
//------------- clear stall must carry on any previously queued transfer -------------//
if ( _dcd.next_td[ep_id].total_bytes != 0 )
{
queue_xfer_in_next_td(ep_id);
}
_dcd.ep[ep_id][0].toggle_reset = 1;
_dcd.ep[ep_id][0].toggle_mode = 0;
}
bool dcd_edpt_open(uint8_t rhport, tusb_desc_endpoint_t const * p_endpoint_desc)
@ -269,113 +234,49 @@ bool dcd_edpt_open(uint8_t rhport, tusb_desc_endpoint_t const * p_endpoint_desc)
if (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_ISOCHRONOUS) return false;
//------------- Prepare Queue Head -------------//
uint8_t ep_id = edpt_addr2phy(p_endpoint_desc->bEndpointAddress);
uint8_t ep_id = ep_addr2id(p_endpoint_desc->bEndpointAddress);
// endpoint must not previously opened, normally this means running out of endpoints
TU_ASSERT( _dcd.qhd[ep_id][0].disable && _dcd.qhd[ep_id][1].disable );
// Check if endpoint is available
TU_ASSERT( _dcd.ep[ep_id][0].disable && _dcd.ep[ep_id][1].disable );
tu_memclr(_dcd.qhd[ep_id], 2*sizeof(ep_cmd_sts_t));
_dcd.qhd[ep_id][0].is_iso = _dcd.qhd[ep_id][1].is_iso = (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_ISOCHRONOUS);
_dcd.qhd[ep_id][0].disable = _dcd.qhd[ep_id][1].disable = 0;
tu_memclr(_dcd.ep[ep_id], 2*sizeof(ep_cmd_sts_t));
_dcd.ep[ep_id][0].is_iso = _dcd.ep[ep_id][1].is_iso = (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_ISOCHRONOUS);
LPC_USB->EPBUFCFG |= BIT_(ep_id);
LPC_USB->INTEN |= BIT_(ep_id);
// Enable EP interrupt
LPC_USB->INTEN |= BIT_(ep_id);
return true;
}
bool dcd_edpt_busy(uint8_t rhport, uint8_t ep_addr)
{
uint8_t const ep_id = edpt_addr2phy(ep_addr);
return _dcd.qhd[ep_id][0].active || _dcd.qhd[ep_id][1].active;
uint8_t const ep_id = ep_addr2id(ep_addr);
return _dcd.ep[ep_id][0].active || _dcd.ep[ep_id][1].active;
}
static void queue_xfer_to_buffer(uint8_t ep_id, uint8_t buff_idx, uint16_t buff_addr_offset, uint16_t total_bytes)
static void prepare_ep_xfer(uint8_t ep_id, uint16_t buff_addr_offset, uint16_t total_bytes)
{
uint16_t const queued_bytes = tu_min16(total_bytes, DCD_11U_13U_MAX_BYTE_PER_TD);
_dcd.current_td[ep_id].queued_bytes_in_buff[buff_idx] = queued_bytes;
_dcd.current_td[ep_id].remaining_bytes -= queued_bytes;
_dcd.current_td[ep_id].nbytes = queued_bytes;
_dcd.current_td[ep_id].remaining_bytes -= queued_bytes;
_dcd.expected_len[ep_id] = total_bytes;
_dcd.ep[ep_id][0].buffer_offset = buff_addr_offset;
_dcd.ep[ep_id][0].nbytes = queued_bytes;
_dcd.qhd[ep_id][buff_idx].buffer_offset = buff_addr_offset;
_dcd.qhd[ep_id][buff_idx].nbytes = queued_bytes;
_dcd.qhd[ep_id][buff_idx].active = 1;
}
static void pipe_queue_xfer(uint8_t ep_id, uint16_t buff_addr_offset, uint16_t total_bytes)
{
_dcd.current_td[ep_id].remaining_bytes = total_bytes;
_dcd.current_td[ep_id].xferred_total = 0;
_dcd.current_td[ep_id].queued_bytes_in_buff[0] = 0;
_dcd.current_td[ep_id].queued_bytes_in_buff[1] = 0;
LPC_USB->EPINUSE = BIT_CLR_(LPC_USB->EPINUSE , ep_id); // force HW to use buffer0
// need to queue buffer1 first, as activate buffer0 can causes controller does transferring immediately
// while buffer1 is not ready yet
if ( total_bytes > DCD_11U_13U_MAX_BYTE_PER_TD)
{
queue_xfer_to_buffer(ep_id, 1, buff_addr_offset+1, total_bytes - DCD_11U_13U_MAX_BYTE_PER_TD);
}
queue_xfer_to_buffer(ep_id, 0, buff_addr_offset, total_bytes);
}
static void queue_xfer_in_next_td(uint8_t ep_id)
{
_dcd.current_ioc |= ( _dcd.next_ioc & BIT_(ep_id) ); // copy next IOC to current IOC
pipe_queue_xfer(ep_id, _dcd.next_td[ep_id].buff_addr_offset, _dcd.next_td[ep_id].total_bytes);
_dcd.next_td[ep_id].total_bytes = 0; // clear this field as it is used to indicate whehther next TD available
}
tusb_error_t dcd_edpt_queue_xfer(uint8_t ep_id , uint8_t * buffer, uint16_t total_bytes)
{
_dcd.current_ioc = BIT_CLR_(_dcd.current_ioc, ep_id);
pipe_queue_xfer(ep_id, addr_offset(buffer), total_bytes);
return TUSB_ERROR_NONE;
}
bool dcd_control_xfer(uint8_t rhport, uint8_t ep_id, uint8_t * p_buffer, uint16_t length)
{
(void) rhport;
_dcd.current_ioc = BIT_SET_(_dcd.current_ioc, ep_id);
_dcd.current_td[ep_id].remaining_bytes = length;
_dcd.current_td[ep_id].xferred_total = 0;
queue_xfer_to_buffer(ep_id, 0, addr_offset(p_buffer), length);
return true;
_dcd.ep[ep_id][0].active = 1;
}
bool dcd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t* buffer, uint16_t total_bytes)
{
uint8_t const ep_id = edpt_addr2phy(ep_addr);
uint8_t const ep_id = ep_addr2id(ep_addr);
uint16_t buf_offset = addr_offset(buffer);
if ( edpt_number(ep_addr) == 0 )
{
return dcd_control_xfer(rhport, ep_id, buffer, (uint8_t) total_bytes);
}
_dcd.current_td[ep_id].remaining_bytes = total_bytes;
_dcd.current_td[ep_id].xferred_bytes = 0;
_dcd.current_td[ep_id].nbytes = 0;
// if( dcd_edpt_busy(ep_addr) || dcd_edpt_stalled(ep_addr) )
// { // save this transfer data to next td if pipe is busy or already been stalled
// dcd_data.next_td[ep_id].buff_addr_offset = addr_offset(buffer);
// dcd_data.next_td[ep_id].total_bytes = total_bytes;
//
// dcd_data.next_ioc = BIT_SET_(dcd_data.next_ioc, ep_id);
// }else
{
_dcd.current_ioc = BIT_SET_(_dcd.current_ioc, ep_id);
pipe_queue_xfer(ep_id, addr_offset(buffer), total_bytes);
}
prepare_ep_xfer(ep_id, buf_offset, total_bytes);
return true;
}
@ -390,13 +291,13 @@ static void bus_reset(void)
// disable all non-control endpoints on bus reset
for(uint8_t ep_id = 2; ep_id < EP_COUNT; ep_id++)
{
_dcd.qhd[ep_id][0].disable = _dcd.qhd[ep_id][1].disable = 1;
_dcd.ep[ep_id][0].disable = _dcd.ep[ep_id][1].disable = 1;
}
_dcd.qhd[0][1].buffer_offset = addr_offset(_dcd.setup_packet);
_dcd.ep[0][1].buffer_offset = addr_offset(_dcd.setup_packet);
LPC_USB->EPINUSE = 0;
LPC_USB->EPBUFCFG = 0; // all start with single buffer
LPC_USB->EPBUFCFG = 0;
LPC_USB->EPSKIP = 0xFFFFFFFF;
LPC_USB->INTSTAT = LPC_USB->INTSTAT; // clear all pending interrupt
@ -404,75 +305,44 @@ static void bus_reset(void)
LPC_USB->INTEN = INT_DEVICE_STATUS_MASK | BIT_(0) | BIT_(1); // enable device status & control endpoints
}
static void endpoint_non_control_isr(uint32_t int_status)
static void process_xfer_isr(uint32_t int_status)
{
for(uint8_t ep_id = 2; ep_id < EP_COUNT; ep_id++ )
for(uint8_t ep_id = 0; ep_id < EP_COUNT; ep_id++ )
{
if ( BIT_TEST_(int_status, ep_id) )
{
ep_cmd_sts_t * const arr_qhd = _dcd.qhd[ep_id];
ep_cmd_sts_t * ep_cs = &_dcd.ep[ep_id][0];
// when double buffering, the complete buffer is opposed to the current active buffer in EPINUSE
uint8_t const buff_idx = LPC_USB->EPINUSE & BIT_(ep_id) ? 0 : 1;
uint16_t const xferred_bytes = _dcd.current_td[ep_id].queued_bytes_in_buff[buff_idx] - arr_qhd[buff_idx].nbytes;
_dcd.current_td[ep_id].xferred_bytes += _dcd.current_td[ep_id].nbytes - ep_cs->nbytes;
_dcd.current_td[ep_id].xferred_total += xferred_bytes;
// there are still data to transfer.
if ( (arr_qhd[buff_idx].nbytes == 0) && (_dcd.current_td[ep_id].remaining_bytes > 0) )
if ( (ep_cs->nbytes == 0) && (_dcd.current_td[ep_id].remaining_bytes > 0) )
{
// NOTE although buff_addr_offset has been increased when xfer is completed
// but we still need to increase it one more as we are using double buffering.
queue_xfer_to_buffer(ep_id, buff_idx, arr_qhd[buff_idx].buffer_offset+1, _dcd.current_td[ep_id].remaining_bytes);
// There is more data to transfer
// buff_offset has been already increased by hw to correct value for next transfer
prepare_ep_xfer(ep_id, ep_cs->buffer_offset, _dcd.current_td[ep_id].remaining_bytes);
}
else if ( (arr_qhd[buff_idx].nbytes > 0) || !arr_qhd[1-buff_idx].active )
else
{
// short packet or (no more byte and both buffers are finished)
// current TD (request) is completed
LPC_USB->EPSKIP = BIT_SET_(LPC_USB->EPSKIP, ep_id); // skip other endpoint in case of short-package
_dcd.current_td[ep_id].remaining_bytes = 0;
if ( BIT_TEST_(_dcd.current_ioc, ep_id) )
{
_dcd.current_ioc = BIT_CLR_(_dcd.current_ioc, ep_id);
uint8_t const ep_addr = (ep_id / 2) | ((ep_id & 0x01) ? TUSB_DIR_IN_MASK : 0);
uint8_t const ep_addr = (ep_id / 2) | ((ep_id & 0x01) ? TUSB_DIR_IN_MASK : 0);
// TODO no way determine if the transfer is failed or not
dcd_event_xfer_complete(0, ep_addr, _dcd.current_td[ep_id].xferred_total, XFER_RESULT_SUCCESS, true);
}
//------------- Next TD is available -------------//
if ( _dcd.next_td[ep_id].total_bytes != 0 )
{
queue_xfer_in_next_td(ep_id);
}
}else
{
// transfer complete, there is no more remaining bytes, but this buffer is not the last transaction (the other is)
// TODO no way determine if the transfer is failed or not
dcd_event_xfer_complete(0, ep_addr, _dcd.current_td[ep_id].xferred_bytes, XFER_RESULT_SUCCESS, true);
}
}
}
}
static void endpoint_control_isr(uint32_t int_status)
{
uint8_t const ep_id = ( int_status & BIT_(0) ) ? 0 : 1;
dcd_event_xfer_complete(0, ep_id ? TUSB_DIR_IN_MASK : 0, _dcd.expected_len[ep_id] - _dcd.qhd[ep_id][0].nbytes, XFER_RESULT_SUCCESS, true);
}
void USB_IRQHandler(void)
{
uint32_t const int_enable = LPC_USB->INTEN;
uint32_t const int_status = LPC_USB->INTSTAT & int_enable;
uint32_t const dev_cmd_stat = LPC_USB->DEVCMDSTAT;
uint32_t int_status = LPC_USB->INTSTAT & LPC_USB->INTEN;
LPC_USB->INTSTAT = int_status; // Acknowledge handled interrupt
if (int_status == 0) return;
uint32_t const dev_cmd_stat = LPC_USB->DEVCMDSTAT;
//------------- Device Status -------------//
if ( int_status & INT_DEVICE_STATUS_MASK )
{
@ -512,30 +382,26 @@ void USB_IRQHandler(void)
// }
}
//------------- Setup Received -------------//
// Setup Receive
if ( BIT_TEST_(int_status, 0) && (dev_cmd_stat & CMDSTAT_SETUP_RECEIVED_MASK) )
{
// received control request from host
// copy setup request & acknowledge so that the next setup can be received by hw
dcd_event_setup_received(0, _dcd.setup_packet, true);
// NXP control flowchart clear Active & Stall on both Control IN/OUT endpoints
_dcd.qhd[0][0].stall = _dcd.qhd[1][0].stall = 0;
// Follow UM flowchart to clear Active & Stall on both Control IN/OUT endpoints
_dcd.ep[0][0].active = _dcd.ep[1][0].active = 0;
_dcd.ep[0][0].stall = _dcd.ep[1][0].stall = 0;
LPC_USB->DEVCMDSTAT |= CMDSTAT_SETUP_RECEIVED_MASK;
_dcd.qhd[0][1].buffer_offset = addr_offset(_dcd.setup_packet);
}
//------------- Control Endpoint -------------//
else if ( int_status & 0x03 )
{
endpoint_control_isr(int_status);
dcd_event_setup_received(0, _dcd.setup_packet, true);
// keep waiting for next setup
_dcd.ep[0][1].buffer_offset = addr_offset(_dcd.setup_packet);
// clear bit0
int_status = BIT_CLR_(int_status, 0);
}
//------------- Non-Control Endpoints -------------//
if( int_status & ~(0x03UL) )
{
endpoint_non_control_isr(int_status);
}
// Endpoint transfer complete interrupt
process_xfer_isr(int_status);
}
#endif