@@ -221,8 +221,8 @@ static void enet_qos_dma_rx_resume(const struct device *dev)
221
221
/* treats the frame as a potential start of a new packet.
222
222
* returns new net_pkt pointer if successfully received or null otherwise.
223
223
*/
224
- static struct net_pkt * enet_qos_start_new_packet (const struct device * dev ,
225
- volatile union nxp_enet_qos_rx_desc * desc )
224
+ static void enet_qos_start_new_rx_packet (const struct device * dev ,
225
+ volatile union nxp_enet_qos_rx_desc * desc )
226
226
{
227
227
struct nxp_enet_qos_mac_data * data = dev -> data ;
228
228
struct nxp_enet_qos_rx_data * rx_data = & data -> rx ;
@@ -234,42 +234,46 @@ static struct net_pkt *enet_qos_start_new_packet(const struct device *dev,
234
234
* drop error, since the starting frame must have been dropped and logged
235
235
* earlier. ie. we only log dropping first descriptor flag frames.
236
236
*/
237
- goto skip ;
237
+ return ;
238
238
}
239
239
240
240
pkt = net_pkt_rx_alloc (K_NO_WAIT );
241
241
242
242
if (!pkt ) {
243
243
LOG_ERR ("Could not alloc new RX pkt" );
244
- goto skip ;
244
+ } else {
245
+ LOG_DBG ("New RX pkt %p" , pkt );
245
246
}
246
247
247
248
rx_data -> processed_pkt_len = 0 ;
249
+ rx_data -> pkt = pkt ;
250
+ }
248
251
249
- LOG_DBG ("New RX pkt %p" , pkt );
252
+ static void enet_qos_finish_rx_packet (const struct device * dev )
253
+ {
254
+ struct nxp_enet_qos_mac_data * data = dev -> data ;
255
+ struct nxp_enet_qos_rx_data * rx_data = & data -> rx ;
250
256
251
- return pkt ;
252
- skip :
253
- return NULL ;
257
+ rx_data -> pkt = NULL ;
258
+ rx_data -> processed_pkt_len = 0 ;
254
259
}
255
260
256
- static void enet_qos_drop_rx_packet (const struct device * dev ,
257
- struct net_pkt * pkt )
261
+ static void enet_qos_drop_rx_packet (const struct device * dev )
258
262
{
259
263
struct nxp_enet_qos_mac_data * data = dev -> data ;
260
264
struct nxp_enet_qos_rx_data * rx_data = & data -> rx ;
265
+ struct net_pkt * pkt = rx_data -> pkt ;
261
266
262
267
eth_stats_update_errors_rx (data -> iface );
263
268
if (pkt != NULL ) {
264
269
LOG_WRN ("Dropped packet %p" , pkt );
265
270
net_pkt_unref (pkt );
266
271
}
267
272
268
- rx_data -> processed_pkt_len = 0 ;
273
+ enet_qos_finish_rx_packet ( dev ) ;
269
274
}
270
275
271
276
static void enet_qos_drop_rx_frame (const struct device * dev ,
272
- struct net_pkt * pkt ,
273
277
volatile union nxp_enet_qos_rx_desc * desc )
274
278
{
275
279
struct nxp_enet_qos_mac_data * data = dev -> data ;
@@ -312,7 +316,6 @@ static int enet_qos_get_frame_len(const struct device *dev,
312
316
}
313
317
314
318
static int enet_qos_append_frame_to_packet (const struct device * dev ,
315
- struct net_pkt * pkt ,
316
319
volatile union nxp_enet_qos_rx_desc * desc ,
317
320
size_t frame_len )
318
321
{
@@ -321,33 +324,33 @@ static int enet_qos_append_frame_to_packet(const struct device *dev,
321
324
volatile union nxp_enet_qos_rx_desc * desc_arr = rx_data -> descriptors ;
322
325
int desc_idx = ARRAY_INDEX (desc_arr , desc );
323
326
struct net_buf * frame_buf = data -> rx .reserved_bufs [desc_idx ];
327
+ struct net_pkt * pkt = rx_data -> pkt ;
324
328
325
329
net_buf_add (frame_buf , frame_len );
326
330
net_pkt_frag_add (pkt , frame_buf );
327
331
328
332
return 0 ;
329
333
}
330
334
331
- static int enet_qos_pass_up_rx (const struct device * dev , struct net_pkt * * pkt )
335
+ static int enet_qos_pass_up_rx (const struct device * dev )
332
336
{
333
337
struct nxp_enet_qos_mac_data * data = dev -> data ;
334
338
struct nxp_enet_qos_rx_data * rx_data = & data -> rx ;
335
339
int ret = 0 ;
336
340
337
341
LOG_DBG ("Receiving RX packet" );
338
342
339
- ret = net_recv_data (data -> iface , * pkt );
343
+ ret = net_recv_data (data -> iface , rx_data -> pkt );
340
344
if (ret != 0 ) {
341
- LOG_ERR ("RECV failed for pkt %p" , * pkt );
342
- enet_qos_drop_rx_packet (dev , * pkt );
345
+ LOG_ERR ("RECV failed for pkt %p" , rx_data -> pkt );
346
+ enet_qos_drop_rx_packet (dev );
343
347
goto done ;
344
348
}
345
349
346
350
eth_stats_update_pkts_rx (data -> iface );
347
351
348
352
done :
349
- rx_data -> processed_pkt_len = 0 ;
350
- * pkt = NULL ;
353
+ enet_qos_finish_rx_packet (dev );
351
354
return ret ;
352
355
}
353
356
@@ -374,19 +377,18 @@ static void enet_qos_swap_rx_desc_buf(const struct device *dev,
374
377
}
375
378
376
379
static void eth_nxp_enet_qos_process_rx_frame (const struct device * dev ,
377
- struct net_pkt * * pkt ,
378
380
volatile union nxp_enet_qos_rx_desc * desc )
379
381
{
380
382
struct nxp_enet_qos_mac_data * data = dev -> data ;
381
383
struct nxp_enet_qos_rx_data * rx_data = & data -> rx ;
382
384
int frame_len = 0 ;
383
385
int ret = 0 ;
384
386
385
- if (* pkt == NULL ) {
387
+ if (rx_data -> pkt == NULL ) {
386
388
/* Trying to start a new packet if none is provided */
387
- * pkt = enet_qos_start_new_packet (dev , desc );
389
+ enet_qos_start_new_rx_packet (dev , desc );
388
390
}
389
- if (* pkt == NULL ) {
391
+ if (rx_data -> pkt == NULL ) {
390
392
/* Still no packet so drop the frame */
391
393
goto drop ;
392
394
}
@@ -397,23 +399,22 @@ static void eth_nxp_enet_qos_process_rx_frame(const struct device *dev,
397
399
}
398
400
399
401
/* Take the received data and add to the packet */
400
- enet_qos_append_frame_to_packet (dev , * pkt , desc , frame_len );
402
+ enet_qos_append_frame_to_packet (dev , desc , frame_len );
401
403
rx_data -> processed_pkt_len += frame_len ;
402
404
403
405
if (!frame_is_last (desc )) {
404
406
goto done ;
405
407
}
406
408
407
409
/* If this is the last frame of the packet, send it up the stack */
408
- ret = enet_qos_pass_up_rx (dev , pkt );
410
+ ret = enet_qos_pass_up_rx (dev );
409
411
if (ret != 0 ) {
410
412
goto drop ;
411
413
}
412
414
413
415
goto done ;
414
416
drop :
415
- enet_qos_drop_rx_frame (dev , * pkt , desc );
416
- * pkt = NULL ;
417
+ enet_qos_drop_rx_frame (dev , desc );
417
418
return ;
418
419
done :
419
420
/* last thing to do is switch the buf so the DMA doesn't overwrite
@@ -432,7 +433,7 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
432
433
volatile union nxp_enet_qos_rx_desc * desc_arr = data -> rx .descriptors ;
433
434
uint32_t desc_idx = rx_data -> next_desc_idx ;
434
435
volatile union nxp_enet_qos_rx_desc * desc = & desc_arr [desc_idx ];
435
- struct net_pkt * pkt = NULL ;
436
+ struct net_pkt * pkt = rx_data -> pkt ;
436
437
437
438
LOG_DBG ("RX work start: %p" , work );
438
439
@@ -442,19 +443,13 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
442
443
while (software_owns_descriptor (desc )) {
443
444
rx_data -> next_desc_idx = (desc_idx + 1U ) % NUM_RX_BUFDESC ;
444
445
445
- eth_nxp_enet_qos_process_rx_frame (dev , & pkt , desc );
446
+ eth_nxp_enet_qos_process_rx_frame (dev , desc );
446
447
447
448
desc_idx = rx_data -> next_desc_idx ;
448
449
desc = & desc_arr [desc_idx ];
449
450
}
450
451
451
- if (pkt != NULL ) {
452
- /* Looped through descriptors without reaching the final
453
- * fragment of the packet, deallocate the incomplete one
454
- */
455
- LOG_DBG ("Incomplete packet received, cleaning up" );
456
- enet_qos_drop_rx_packet (dev , pkt );
457
- }
452
+ rx_data -> pkt = pkt ;
458
453
459
454
/* now that we updated the descriptors, resume in case we are suspended */
460
455
enet_qos_dma_rx_resume (dev );
0 commit comments