re-layout ehci_data_t to get better memory consumption

This commit is contained in:
hathach 2013-03-07 16:06:28 +07:00
parent ef08654e73
commit 66586ffb08
4 changed files with 84 additions and 52 deletions

View File

@ -305,3 +305,22 @@ void test_open_interrupt_qhd_non_hs(void)
TEST_ASSERT_EQUAL(0x1c, p_qhd->non_hs_interrupt_cmask);
}
//--------------------------------------------------------------------+
// TODO ISOCRHONOUS PIPE
//--------------------------------------------------------------------+
tusb_descriptor_endpoint_t const desc_ept_iso_in =
{
.bLength = sizeof(tusb_descriptor_endpoint_t),
.bDescriptorType = TUSB_DESC_ENDPOINT,
.bEndpointAddress = 0x83,
.bmAttributes = { .xfer = TUSB_XFER_BULK },
.wMaxPacketSize = 1024,
.bInterval = 1
};
void test_open_isochronous(void)
{
pipe_handle_t pipe_hdl = hcd_pipe_open(dev_addr, &desc_ept_iso_in);
TEST_ASSERT_EQUAL(0, pipe_hdl.dev_addr);
}

View File

@ -151,9 +151,9 @@ void test_control_addr0_xfer_get_check_qhd_qtd_mapping(void)
//------------- Code Under TEST -------------//
hcd_pipe_control_xfer(dev_addr, &request_get_dev_desc, xfer_data);
p_setup = &ehci_data.addr0.qtd[0];
p_data = &ehci_data.addr0.qtd[1];
p_status = &ehci_data.addr0.qtd[2];
p_setup = &ehci_data.addr0_qtd[0];
p_data = &ehci_data.addr0_qtd[1];
p_status = &ehci_data.addr0_qtd[2];
TEST_ASSERT_EQUAL_HEX( p_setup, p_qhd->qtd_overlay.next.address );
TEST_ASSERT_EQUAL_HEX( p_setup , p_qhd->p_qtd_list);
@ -161,7 +161,7 @@ void test_control_addr0_xfer_get_check_qhd_qtd_mapping(void)
TEST_ASSERT_EQUAL_HEX( p_status , p_data->next.address );
TEST_ASSERT_TRUE( p_status->next.terminate );
verify_qtd(p_setup, &ehci_data.addr0.request, 8);
verify_qtd(p_setup, &ehci_data.control_request[0], 8);
}
@ -180,7 +180,7 @@ void test_control_xfer_get(void)
TEST_ASSERT_TRUE( p_status->next.terminate );
//------------- SETUP -------------//
uint8_t* p_request = (uint8_t *) &ehci_data.device[dev_addr].control.request;
uint8_t* p_request = (uint8_t *) &ehci_data.control_request[dev_addr];
verify_qtd(p_setup, p_request, 8);
TEST_ASSERT_EQUAL_MEMORY(&request_get_dev_desc, p_request, sizeof(tusb_std_request_t));

View File

@ -251,6 +251,9 @@ static inline ehci_qhd_t* const get_control_qhd(uint8_t dev_addr) ATTR_ALWAYS_IN
static inline ehci_qtd_t* get_control_qtds(uint8_t dev_addr) ATTR_ALWAYS_INLINE ATTR_PURE ATTR_WARN_UNUSED_RESULT;
static inline tusb_std_request_t* const get_control_request_ptr(uint8_t dev_addr) ATTR_ALWAYS_INLINE ATTR_PURE ATTR_WARN_UNUSED_RESULT;
//--------------------------------------------------------------------+
// CONTROL PIPE API
//--------------------------------------------------------------------+
tusb_error_t hcd_pipe_control_open(uint8_t dev_addr, uint8_t max_packet_size)
{
ehci_qhd_t * const p_qhd = get_control_qhd(dev_addr);
@ -270,43 +273,6 @@ tusb_error_t hcd_pipe_control_open(uint8_t dev_addr, uint8_t max_packet_size)
return TUSB_ERROR_NONE;
}
pipe_handle_t hcd_pipe_open(uint8_t dev_addr, tusb_descriptor_endpoint_t const * p_endpoint_desc)
{
pipe_handle_t const null_handle = { .dev_addr = 0, .xfer_type = 0, .index = 0 };
//------------- find a free queue head -------------//
uint8_t index=0;
while( index<EHCI_MAX_QHD && ehci_data.device[dev_addr].qhd[index].used )
{
index++;
}
ASSERT( index < EHCI_MAX_QHD, null_handle);
ehci_qhd_t * const p_qhd = &ehci_data.device[dev_addr].qhd[index];
queue_head_init(p_qhd, dev_addr, p_endpoint_desc->wMaxPacketSize, p_endpoint_desc->bEndpointAddress, p_endpoint_desc->bmAttributes.xfer);
ehci_qhd_t * list_head;
if (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_BULK)
{
// TODO might need to to disable async list first
list_head = get_async_head(usbh_device_info_pool[dev_addr].core_id);
}else if (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_INTERRUPT)
{
// TODO might need to to disable period list first
list_head = get_period_head(usbh_device_info_pool[dev_addr].core_id);
}
//------------- insert to async/period list -------------//
p_qhd->next = list_head->next;
list_head->next.address = (uint32_t) p_qhd;
list_head->next.type = EHCI_QUEUE_ELEMENT_QHD;
return (pipe_handle_t) { .dev_addr = dev_addr, .xfer_type = p_endpoint_desc->bmAttributes.xfer, .index = index};
// return null_handle;
}
static void queue_td_init(ehci_qtd_t* p_qtd, uint32_t data_ptr, uint16_t total_bytes)
{
@ -364,6 +330,58 @@ tusb_error_t hcd_pipe_control_xfer(uint8_t dev_addr, tusb_std_request_t const *
return TUSB_ERROR_NONE;
}
tusb_error_t hcd_pipe_control_close(uint8_t dev_addr)
{
ehci_qhd_t * const p_qhd = get_control_qhd(dev_addr);
p_qhd->qtd_overlay.halted = 1;
// TODO remove from async list
return TUSB_ERROR_NONE;
}
//--------------------------------------------------------------------+
// BULK/INT/ISO PIPE API
//--------------------------------------------------------------------+
pipe_handle_t hcd_pipe_open(uint8_t dev_addr, tusb_descriptor_endpoint_t const * p_endpoint_desc)
{
pipe_handle_t const null_handle = { .dev_addr = 0, .xfer_type = 0, .index = 0 };
//------------- find a free queue head -------------//
uint8_t index=0;
while( index<EHCI_MAX_QHD && ehci_data.device[dev_addr].qhd[index].used )
{
index++;
}
ASSERT( index < EHCI_MAX_QHD, null_handle);
ehci_qhd_t * const p_qhd = &ehci_data.device[dev_addr].qhd[index];
queue_head_init(p_qhd, dev_addr, p_endpoint_desc->wMaxPacketSize, p_endpoint_desc->bEndpointAddress, p_endpoint_desc->bmAttributes.xfer);
ehci_qhd_t * list_head;
if (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_BULK)
{
// TODO might need to to disable async list first
list_head = get_async_head(usbh_device_info_pool[dev_addr].core_id);
}else if (p_endpoint_desc->bmAttributes.xfer == TUSB_XFER_INTERRUPT)
{
// TODO might need to to disable period list first
list_head = get_period_head(usbh_device_info_pool[dev_addr].core_id);
}
//------------- insert to async/period list -------------//
p_qhd->next = list_head->next;
list_head->next.address = (uint32_t) p_qhd;
list_head->next.type = EHCI_QUEUE_ELEMENT_QHD;
return (pipe_handle_t) { .dev_addr = dev_addr, .xfer_type = p_endpoint_desc->bmAttributes.xfer, .index = index};
// return null_handle;
}
//--------------------------------------------------------------------+
// HELPER
//--------------------------------------------------------------------+
@ -376,16 +394,14 @@ static inline ehci_qhd_t* const get_control_qhd(uint8_t dev_addr)
static inline ehci_qtd_t* get_control_qtds(uint8_t dev_addr)
{
return (dev_addr == 0) ?
ehci_data.addr0.qtd :
ehci_data.addr0_qtd :
ehci_data.device[ dev_addr ].control.qtd;
}
static inline tusb_std_request_t* const get_control_request_ptr(uint8_t dev_addr)
{
return (dev_addr == 0) ?
&ehci_data.addr0.request :
&ehci_data.device[ dev_addr ].control.request;
return &ehci_data.control_request[dev_addr];
}
static void queue_head_init(ehci_qhd_t *p_qhd, uint8_t dev_addr, uint16_t max_packet_size, uint8_t endpoint_addr, uint8_t xfer_type)

View File

@ -440,17 +440,13 @@ typedef struct {
ehci_qhd_t period_head[CONTROLLER_HOST_NUMBER];
//------------- Data for Address 0 -------------//
struct {
// qhd: addr0 use async head (dummy) as Queue Head
ehci_qtd_t qtd[3];
tusb_std_request_t request;
} addr0;
// qhd: addr0 use async head (dummy) as Queue Head
ehci_qtd_t addr0_qtd[3];
struct {
struct {
ehci_qhd_t qhd;
ehci_qtd_t qtd[3];
tusb_std_request_t request;
}control;
ehci_qhd_t qhd[EHCI_MAX_QHD] ; ///< Queue Head Pool
@ -459,6 +455,7 @@ typedef struct {
// ehci_sitd_t sitd[EHCI_MAX_SITD] ; ///< Split (FS) Isochronous Transfer Pool
}device[TUSB_CFG_HOST_DEVICE_MAX];
tusb_std_request_t control_request[TUSB_CFG_HOST_DEVICE_MAX+1]; // including address zero, 32-byte alignment breaker
}ehci_data_t;
#ifdef __cplusplus