Skip to content

Commit 50ca2bf

Browse files
committed
drivers: eth_nxp_enet_qos: Refactor RX code
Refactor the RX code to be more cognitively coherent, by splitting up into smaller functions with readable names, instead of a giant code block with lots of nesting conditionals and loops and complication. The only intended logical change is that, instead of dropping a packet if a new RX buf is not available, still receive the packet but do not give the descriptor back to the DMA. This means that the DMA will likely get suspended upon reaching this descriptor which could not have it's buffer replaced, and have another chance on the next work run. Signed-off-by: Declan Snyder <declan.snyder@nxp.com>
1 parent d7ba203 commit 50ca2bf

File tree

2 files changed

+217
-102
lines changed

2 files changed

+217
-102
lines changed

drivers/ethernet/eth_nxp_enet_qos/eth_nxp_enet_qos_mac.c

Lines changed: 216 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,16 @@ static bool software_owns_descriptor(volatile union nxp_enet_qos_rx_desc *desc)
185185
return (desc->write.control3 & OWN_FLAG) != OWN_FLAG;
186186
}
187187

188+
static bool frame_is_first(volatile union nxp_enet_qos_rx_desc *desc)
189+
{
190+
return (desc->write.control3 & FIRST_DESCRIPTOR_FLAG) == FIRST_DESCRIPTOR_FLAG;
191+
}
192+
193+
static bool frame_is_last(volatile union nxp_enet_qos_rx_desc *desc)
194+
{
195+
return (desc->write.control3 & LAST_DESCRIPTOR_FLAG) == LAST_DESCRIPTOR_FLAG;
196+
}
197+
188198
/* this function resumes the rx dma in case of underrun */
189199
static void enet_qos_dma_rx_resume(const struct device *dev)
190200
{
@@ -208,6 +218,209 @@ static void enet_qos_dma_rx_resume(const struct device *dev)
208218
ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx_data->descriptors[NUM_RX_BUFDESC]));
209219
}
210220

221+
/* treats the frame as a potential start of a new packet.
222+
* returns new net_pkt pointer if successfully received or null otherwise.
223+
*/
224+
static struct net_pkt *enet_qos_start_new_packet(const struct device *dev,
225+
volatile union nxp_enet_qos_rx_desc *desc)
226+
{
227+
struct nxp_enet_qos_mac_data *data = dev->data;
228+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
229+
struct net_pkt *pkt;
230+
231+
/* If it's not the start of a packet, ignore this frame */
232+
if (!frame_is_first(desc)) {
233+
/* since this frame is not the start of a packet, we don't log new packet
234+
* drop error, since the starting frame must have been dropped and logged
235+
* earlier. ie. we only log dropping first descriptor flag frames.
236+
*/
237+
goto skip;
238+
}
239+
240+
pkt = net_pkt_rx_alloc(K_NO_WAIT);
241+
242+
if (!pkt) {
243+
LOG_ERR("Could not alloc new RX pkt");
244+
goto skip;
245+
}
246+
247+
rx_data->processed_pkt_len = 0;
248+
249+
LOG_DBG("New RX pkt %p", pkt);
250+
251+
return pkt;
252+
skip:
253+
return NULL;
254+
}
255+
256+
static void enet_qos_drop_rx_packet(const struct device *dev,
257+
struct net_pkt *pkt)
258+
{
259+
struct nxp_enet_qos_mac_data *data = dev->data;
260+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
261+
262+
eth_stats_update_errors_rx(data->iface);
263+
if (pkt != NULL) {
264+
LOG_WRN("Dropped packet %p", pkt);
265+
net_pkt_unref(pkt);
266+
}
267+
268+
rx_data->processed_pkt_len = 0;
269+
}
270+
271+
static void enet_qos_drop_rx_frame(const struct device *dev,
272+
struct net_pkt *pkt,
273+
volatile union nxp_enet_qos_rx_desc *desc)
274+
{
275+
struct nxp_enet_qos_mac_data *data = dev->data;
276+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
277+
278+
if (rx_data->pkt != NULL) {
279+
/* Only drop packet if we didn't before */
280+
enet_qos_drop_rx_packet(dev);
281+
} else {
282+
LOG_DBG("Dropping frame from error packet");
283+
}
284+
285+
desc->read.control = rx_desc_refresh_flags;
286+
}
287+
288+
static int enet_qos_get_frame_len(const struct device *dev,
289+
volatile union nxp_enet_qos_rx_desc *desc)
290+
{
291+
struct nxp_enet_qos_mac_data *data = dev->data;
292+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
293+
294+
/* The descriptor has the cumulative length of the packet so far, including FCS */
295+
size_t pkt_len = desc->write.control3 & DESC_RX_PKT_LEN;
296+
297+
if (rx_data->processed_pkt_len >= pkt_len) {
298+
LOG_ERR("Packet length glitched, previous %d > new %d",
299+
rx_data->processed_pkt_len, pkt_len);
300+
return -EINVAL;
301+
}
302+
303+
size_t frame_len = pkt_len - rx_data->processed_pkt_len;
304+
305+
if (frame_len > ENET_QOS_RX_BUFFER_SIZE) {
306+
LOG_ERR("Frame len %d too big for max buffer len %d",
307+
frame_len, ENET_QOS_RX_BUFFER_SIZE);
308+
return -E2BIG;
309+
}
310+
311+
return frame_len;
312+
}
313+
314+
static int enet_qos_append_frame_to_packet(const struct device *dev,
315+
struct net_pkt *pkt,
316+
volatile union nxp_enet_qos_rx_desc *desc,
317+
size_t frame_len)
318+
{
319+
struct nxp_enet_qos_mac_data *data = dev->data;
320+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
321+
volatile union nxp_enet_qos_rx_desc *desc_arr = rx_data->descriptors;
322+
int desc_idx = ARRAY_INDEX(desc_arr, desc);
323+
struct net_buf *frame_buf = data->rx.reserved_bufs[desc_idx];
324+
325+
net_buf_add(frame_buf, frame_len);
326+
net_pkt_frag_add(pkt, frame_buf);
327+
328+
return 0;
329+
}
330+
331+
static int enet_qos_pass_up_rx(const struct device *dev, struct net_pkt **pkt)
332+
{
333+
struct nxp_enet_qos_mac_data *data = dev->data;
334+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
335+
int ret = 0;
336+
337+
LOG_DBG("Receiving RX packet");
338+
339+
ret = net_recv_data(data->iface, *pkt);
340+
if (ret != 0) {
341+
LOG_ERR("RECV failed for pkt %p", *pkt);
342+
enet_qos_drop_rx_packet(dev, *pkt);
343+
goto done;
344+
}
345+
346+
eth_stats_update_pkts_rx(data->iface);
347+
348+
done:
349+
rx_data->processed_pkt_len = 0;
350+
*pkt = NULL;
351+
return ret;
352+
}
353+
354+
static void enet_qos_swap_rx_desc_buf(const struct device *dev,
355+
volatile union nxp_enet_qos_rx_desc *desc)
356+
{
357+
struct nxp_enet_qos_mac_data *data = dev->data;
358+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
359+
volatile union nxp_enet_qos_rx_desc *desc_arr = rx_data->descriptors;
360+
int desc_idx = ARRAY_INDEX(desc_arr, desc);
361+
struct net_buf *new_buf;
362+
363+
new_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
364+
if (new_buf == NULL) {
365+
LOG_WRN("No new RX buf available");
366+
return;
367+
}
368+
369+
data->rx.reserved_bufs[desc_idx] = new_buf;
370+
desc->read.buf1_addr = (uint32_t)new_buf->data;
371+
372+
/* notice we only refresh (give back to DMA if we got a buffer */
373+
desc->read.control = rx_desc_refresh_flags;
374+
}
375+
376+
static void eth_nxp_enet_qos_process_rx_frame(const struct device *dev,
377+
struct net_pkt **pkt,
378+
volatile union nxp_enet_qos_rx_desc *desc)
379+
{
380+
struct nxp_enet_qos_mac_data *data = dev->data;
381+
struct nxp_enet_qos_rx_data *rx_data = &data->rx;
382+
int frame_len = 0;
383+
int ret = 0;
384+
385+
if (*pkt == NULL) {
386+
/* Trying to start a new packet if none is provided */
387+
*pkt = enet_qos_start_new_packet(dev, desc);
388+
}
389+
if (*pkt == NULL) {
390+
/* Still no packet so drop the frame */
391+
goto drop;
392+
}
393+
394+
frame_len = enet_qos_get_frame_len(dev, desc);
395+
if (frame_len < 0) {
396+
goto drop;
397+
}
398+
399+
/* Take the received data and add to the packet */
400+
enet_qos_append_frame_to_packet(dev, *pkt, desc, frame_len);
401+
rx_data->processed_pkt_len += frame_len;
402+
403+
if (!frame_is_last(desc)) {
404+
goto done;
405+
}
406+
407+
/* If this is the last frame of the packet, send it up the stack */
408+
ret = enet_qos_pass_up_rx(dev, pkt);
409+
if (ret != 0) {
410+
goto drop;
411+
}
412+
413+
goto done;
414+
drop:
415+
enet_qos_drop_rx_frame(dev, *pkt, desc);
416+
*pkt = NULL;
417+
return;
418+
done:
419+
/* last thing to do is switch the buf so the DMA doesn't overwrite
420+
* the net buf that the net stack is processing.
421+
*/
422+
enet_qos_swap_rx_desc_buf(dev, desc);
423+
}
211424

212425
static void eth_nxp_enet_qos_rx(struct k_work *work)
213426
{
@@ -220,10 +433,6 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
220433
uint32_t desc_idx = rx_data->next_desc_idx;
221434
volatile union nxp_enet_qos_rx_desc *desc = &desc_arr[desc_idx];
222435
struct net_pkt *pkt = NULL;
223-
struct net_buf *new_buf;
224-
struct net_buf *buf;
225-
size_t pkt_len;
226-
size_t processed_len;
227436

228437
LOG_DBG("RX work start: %p", work);
229438

@@ -233,99 +442,8 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
233442
while (software_owns_descriptor(desc)) {
234443
rx_data->next_desc_idx = (desc_idx + 1U) % NUM_RX_BUFDESC;
235444

236-
if (pkt == NULL) {
237-
if ((desc->write.control3 & FIRST_DESCRIPTOR_FLAG) !=
238-
FIRST_DESCRIPTOR_FLAG) {
239-
LOG_DBG("receive packet mask %X ",
240-
(desc->write.control3 >> 28) & 0x0f);
241-
LOG_ERR("Rx descriptor does not have first descriptor flag, drop");
242-
desc->read.control = rx_desc_refresh_flags;
243-
/* Error statistics for this packet already updated earlier */
244-
goto next;
245-
}
246-
247-
/* Otherwise, we found a packet that we need to process */
248-
pkt = net_pkt_rx_alloc(K_NO_WAIT);
249-
250-
if (!pkt) {
251-
LOG_ERR("Could not alloc new RX pkt");
252-
/* error: no new buffer, reuse previous immediately */
253-
desc->read.control = rx_desc_refresh_flags;
254-
eth_stats_update_errors_rx(data->iface);
255-
goto next;
256-
}
257-
258-
processed_len = 0U;
445+
eth_nxp_enet_qos_process_rx_frame(dev, &pkt, desc);
259446

260-
LOG_DBG("Created new RX pkt %u of %d: %p",
261-
desc_idx + 1U, NUM_RX_BUFDESC, pkt);
262-
}
263-
264-
/* Read the cumulative length of data in this buffer and previous buffers (if any).
265-
* The complete length is in a descriptor with the last descriptor flag set
266-
* (note that it includes four byte FCS as well). This length will be validated
267-
* against processed_len to ensure it's within expected bounds.
268-
*/
269-
pkt_len = desc->write.control3 & DESC_RX_PKT_LEN;
270-
if ((pkt_len < processed_len) ||
271-
((pkt_len - processed_len) > ENET_QOS_RX_BUFFER_SIZE)) {
272-
LOG_ERR("Invalid packet length in descriptor: pkt_len=%u, processed_len=%u",
273-
pkt_len, processed_len);
274-
net_pkt_unref(pkt);
275-
pkt = NULL;
276-
desc->read.control = rx_desc_refresh_flags;
277-
eth_stats_update_errors_rx(data->iface);
278-
goto next;
279-
}
280-
281-
/* We need to know if we can replace the reserved fragment in advance.
282-
* At no point can we allow the driver to have less the amount of reserved
283-
* buffers it needs to function, so we will not give up our previous buffer
284-
* unless we know we can get a new one.
285-
*/
286-
new_buf = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
287-
if (new_buf == NULL) {
288-
/* We have no choice but to lose the previous packet,
289-
* as the buffer is more important. If we recv this packet,
290-
* we don't know what the upper layer will do to our poor buffer.
291-
* drop this buffer, reuse allocated DMA buffer
292-
*/
293-
LOG_ERR("No new RX buf available");
294-
net_pkt_unref(pkt);
295-
pkt = NULL;
296-
desc->read.control = rx_desc_refresh_flags;
297-
eth_stats_update_errors_rx(data->iface);
298-
goto next;
299-
}
300-
301-
/* Append buffer to a packet */
302-
buf = data->rx.reserved_bufs[desc_idx];
303-
net_buf_add(buf, pkt_len - processed_len);
304-
net_pkt_frag_add(pkt, buf);
305-
processed_len = pkt_len;
306-
307-
if ((desc->write.control3 & LAST_DESCRIPTOR_FLAG) == LAST_DESCRIPTOR_FLAG) {
308-
/* Propagate completed packet to network stack */
309-
LOG_DBG("Receiving RX packet");
310-
if (net_recv_data(data->iface, pkt)) {
311-
LOG_ERR("RECV failed");
312-
/* Error during processing, we continue with new buffer */
313-
net_pkt_unref(pkt);
314-
eth_stats_update_errors_rx(data->iface);
315-
} else {
316-
/* Record successfully received packet */
317-
eth_stats_update_pkts_rx(data->iface);
318-
}
319-
pkt = NULL;
320-
}
321-
322-
LOG_DBG("Swap RX buf");
323-
/* Allow receive into a new buffer */
324-
data->rx.reserved_bufs[desc_idx] = new_buf;
325-
desc->read.buf1_addr = (uint32_t)new_buf->data;
326-
desc->read.control = rx_desc_refresh_flags;
327-
328-
next:
329447
desc_idx = rx_data->next_desc_idx;
330448
desc = &desc_arr[desc_idx];
331449
}
@@ -335,15 +453,13 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
335453
* fragment of the packet, deallocate the incomplete one
336454
*/
337455
LOG_DBG("Incomplete packet received, cleaning up");
338-
net_pkt_unref(pkt);
339-
pkt = NULL;
340-
eth_stats_update_errors_rx(data->iface);
456+
enet_qos_drop_rx_packet(dev, pkt);
341457
}
342458

343459
/* now that we updated the descriptors, resume in case we are suspended */
344460
enet_qos_dma_rx_resume(dev);
345461

346-
LOG_DBG("End RX work normally");
462+
LOG_DBG("End RX work");
347463
return;
348464
}
349465

@@ -748,8 +864,6 @@ static const struct device *eth_nxp_enet_qos_get_phy(const struct device *dev)
748864
return config->phy_dev;
749865
}
750866

751-
752-
753867
static int eth_nxp_enet_qos_set_config(const struct device *dev,
754868
enum ethernet_config_type type,
755869
const struct ethernet_config *cfg)

drivers/ethernet/eth_nxp_enet_qos/nxp_enet_qos_priv.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ struct nxp_enet_qos_rx_data {
116116
uint32_t next_desc_idx;
117117
volatile union nxp_enet_qos_rx_desc descriptors[NUM_RX_BUFDESC];
118118
struct net_buf *reserved_bufs[NUM_RX_BUFDESC];
119+
size_t processed_pkt_len;
119120
};
120121

121122
struct nxp_enet_qos_mac_data {

0 commit comments

Comments
 (0)