8
8
#include <linux/version.h>
9
9
10
10
#include <net/mana/mana.h>
11
+ #include <net/mana/hw_channel.h>
11
12
12
13
#include <linux/cpu.h>
13
14
struct dentry * mana_debugfs_root ;
@@ -64,6 +65,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
64
65
mana_gd_init_vf_regs (pdev );
65
66
}
66
67
68
+ /* Suppress logging when we set timeout to zero */
69
+ bool mana_need_log (struct gdma_context * gc , int err )
70
+ {
71
+ struct hw_channel_context * hwc ;
72
+
73
+ if (err != - ETIMEDOUT )
74
+ return true;
75
+
76
+ if (!gc )
77
+ return true;
78
+
79
+ hwc = gc -> hwc .driver_data ;
80
+ if (hwc && hwc -> hwc_timeout == 0 )
81
+ return false;
82
+
83
+ return true;
84
+ }
85
+
67
86
static int mana_gd_query_max_resources (struct pci_dev * pdev )
68
87
{
69
88
struct gdma_context * gc = pci_get_drvdata (pdev );
@@ -267,8 +286,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
267
286
268
287
err = mana_gd_send_request (gc , sizeof (req ), & req , sizeof (resp ), & resp );
269
288
if (err || resp .hdr .status ) {
270
- dev_err (gc -> dev , "Failed to disable queue: %d, 0x%x\n" , err ,
271
- resp .hdr .status );
289
+ if (mana_need_log (gc , err ))
290
+ dev_err (gc -> dev , "Failed to disable queue: %d, 0x%x\n" , err ,
291
+ resp .hdr .status );
272
292
return err ? err : - EPROTO ;
273
293
}
274
294
@@ -353,25 +373,12 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
353
373
354
374
#define MANA_SERVICE_PERIOD 10
355
375
356
- struct mana_serv_work {
357
- struct work_struct serv_work ;
358
- struct pci_dev * pdev ;
359
- };
360
-
361
- static void mana_serv_func (struct work_struct * w )
376
+ static void mana_serv_fpga (struct pci_dev * pdev )
362
377
{
363
- struct mana_serv_work * mns_wk ;
364
378
struct pci_bus * bus , * parent ;
365
- struct pci_dev * pdev ;
366
-
367
- mns_wk = container_of (w , struct mana_serv_work , serv_work );
368
- pdev = mns_wk -> pdev ;
369
379
370
380
pci_lock_rescan_remove ();
371
381
372
- if (!pdev )
373
- goto out ;
374
-
375
382
bus = pdev -> bus ;
376
383
if (!bus ) {
377
384
dev_err (& pdev -> dev , "MANA service: no bus\n" );
@@ -392,7 +399,74 @@ static void mana_serv_func(struct work_struct *w)
392
399
393
400
out :
394
401
pci_unlock_rescan_remove ();
402
+ }
403
+
404
+ static void mana_serv_reset (struct pci_dev * pdev )
405
+ {
406
+ struct gdma_context * gc = pci_get_drvdata (pdev );
407
+ struct hw_channel_context * hwc ;
408
+
409
+ if (!gc ) {
410
+ dev_err (& pdev -> dev , "MANA service: no GC\n" );
411
+ return ;
412
+ }
413
+
414
+ hwc = gc -> hwc .driver_data ;
415
+ if (!hwc ) {
416
+ dev_err (& pdev -> dev , "MANA service: no HWC\n" );
417
+ goto out ;
418
+ }
419
+
420
+ /* HWC is not responding in this case, so don't wait */
421
+ hwc -> hwc_timeout = 0 ;
422
+
423
+ dev_info (& pdev -> dev , "MANA reset cycle start\n" );
395
424
425
+ mana_gd_suspend (pdev , PMSG_SUSPEND );
426
+
427
+ msleep (MANA_SERVICE_PERIOD * 1000 );
428
+
429
+ mana_gd_resume (pdev );
430
+
431
+ dev_info (& pdev -> dev , "MANA reset cycle completed\n" );
432
+
433
+ out :
434
+ gc -> in_service = false;
435
+ }
436
+
437
+ struct mana_serv_work {
438
+ struct work_struct serv_work ;
439
+ struct pci_dev * pdev ;
440
+ enum gdma_eqe_type type ;
441
+ };
442
+
443
+ static void mana_serv_func (struct work_struct * w )
444
+ {
445
+ struct mana_serv_work * mns_wk ;
446
+ struct pci_dev * pdev ;
447
+
448
+ mns_wk = container_of (w , struct mana_serv_work , serv_work );
449
+ pdev = mns_wk -> pdev ;
450
+
451
+ if (!pdev )
452
+ goto out ;
453
+
454
+ switch (mns_wk -> type ) {
455
+ case GDMA_EQE_HWC_FPGA_RECONFIG :
456
+ mana_serv_fpga (pdev );
457
+ break ;
458
+
459
+ case GDMA_EQE_HWC_RESET_REQUEST :
460
+ mana_serv_reset (pdev );
461
+ break ;
462
+
463
+ default :
464
+ dev_err (& pdev -> dev , "MANA service: unknown type %d\n" ,
465
+ mns_wk -> type );
466
+ break ;
467
+ }
468
+
469
+ out :
396
470
pci_dev_put (pdev );
397
471
kfree (mns_wk );
398
472
module_put (THIS_MODULE );
@@ -448,6 +522,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
448
522
break ;
449
523
450
524
case GDMA_EQE_HWC_FPGA_RECONFIG :
525
+ case GDMA_EQE_HWC_RESET_REQUEST :
451
526
dev_info (gc -> dev , "Recv MANA service type:%d\n" , type );
452
527
453
528
if (gc -> in_service ) {
@@ -469,6 +544,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
469
544
dev_info (gc -> dev , "Start MANA service type:%d\n" , type );
470
545
gc -> in_service = true;
471
546
mns_wk -> pdev = to_pci_dev (gc -> dev );
547
+ mns_wk -> type = type ;
472
548
pci_dev_get (mns_wk -> pdev );
473
549
INIT_WORK (& mns_wk -> serv_work , mana_serv_func );
474
550
schedule_work (& mns_wk -> serv_work );
@@ -615,7 +691,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
615
691
616
692
err = mana_gd_send_request (gc , sizeof (req ), & req , sizeof (resp ), & resp );
617
693
if (err ) {
618
- dev_err (dev , "test_eq failed: %d\n" , err );
694
+ if (mana_need_log (gc , err ))
695
+ dev_err (dev , "test_eq failed: %d\n" , err );
619
696
goto out ;
620
697
}
621
698
@@ -650,7 +727,7 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
650
727
651
728
if (flush_evenets ) {
652
729
err = mana_gd_test_eq (gc , queue );
653
- if (err )
730
+ if (err && mana_need_log ( gc , err ) )
654
731
dev_warn (gc -> dev , "Failed to flush EQ: %d\n" , err );
655
732
}
656
733
@@ -796,8 +873,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
796
873
797
874
err = mana_gd_send_request (gc , sizeof (req ), & req , sizeof (resp ), & resp );
798
875
if (err || resp .hdr .status ) {
799
- dev_err (gc -> dev , "Failed to destroy DMA region: %d, 0x%x\n" ,
800
- err , resp .hdr .status );
876
+ if (mana_need_log (gc , err ))
877
+ dev_err (gc -> dev , "Failed to destroy DMA region: %d, 0x%x\n" ,
878
+ err , resp .hdr .status );
801
879
return - EPROTO ;
802
880
}
803
881
@@ -1096,8 +1174,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
1096
1174
1097
1175
err = mana_gd_send_request (gc , sizeof (req ), & req , sizeof (resp ), & resp );
1098
1176
if (err || resp .hdr .status ) {
1099
- dev_err (gc -> dev , "Failed to deregister device: %d, 0x%x\n" ,
1100
- err , resp .hdr .status );
1177
+ if (mana_need_log (gc , err ))
1178
+ dev_err (gc -> dev , "Failed to deregister device: %d, 0x%x\n" ,
1179
+ err , resp .hdr .status );
1101
1180
if (!err )
1102
1181
err = - EPROTO ;
1103
1182
}
@@ -1697,7 +1776,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
1697
1776
}
1698
1777
1699
1778
/* The 'state' parameter is not used. */
1700
- static int mana_gd_suspend (struct pci_dev * pdev , pm_message_t state )
1779
+ int mana_gd_suspend (struct pci_dev * pdev , pm_message_t state )
1701
1780
{
1702
1781
struct gdma_context * gc = pci_get_drvdata (pdev );
1703
1782
@@ -1712,7 +1791,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
1712
1791
* fail -- if this happens, it's safer to just report an error than try to undo
1713
1792
* what has been done.
1714
1793
*/
1715
- static int mana_gd_resume (struct pci_dev * pdev )
1794
+ int mana_gd_resume (struct pci_dev * pdev )
1716
1795
{
1717
1796
struct gdma_context * gc = pci_get_drvdata (pdev );
1718
1797
int err ;
0 commit comments