-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdhd_msgbuf.c
18048 lines (15558 loc) · 556 KB
/
dhd_msgbuf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
* @file definition of host message ring functionality
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
* Copyright (C) 2023, Broadcom.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
*
* <<Broadcom-WL-IPTag/Dual:>>
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
#include <bcmstdlib_s.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_proto.h>
#include <dhd_bus.h>
#include <dhd_dbg.h>
#include <siutils.h>
#include <dhd_debug.h>
#ifdef EXT_STA
#include <wlc_cfg.h>
#include <wlc_pub.h>
#include <wl_port_if.h>
#endif /* EXT_STA */
#include <dhd_flowring.h>
#include <pcie_core.h>
#include <bcmpcie.h>
#include <dhd_pcie.h>
#include <dhd_plat.h>
#ifdef DHD_TIMESYNC
#include <dhd_timesync.h>
#endif /* DHD_TIMESYNC */
#if defined(DHD_LB)
#if !defined(__linux__)
#error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID"
#endif /* !__linux__ */
#include <linux/cpu.h>
#include <bcm_ring.h>
#define DHD_LB_WORKQ_SZ (8192)
#define DHD_LB_WORKQ_SYNC (16)
#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
#endif /* DHD_LB */
#include <etd.h>
#include <hnd_debug.h>
#include <bcmtlv.h>
#include <hnd_armtrap.h>
#include <dnglevent.h>
#ifdef DHD_PKT_LOGGING
#include <dhd_pktlog.h>
#include <dhd_linux_pktdump.h>
#endif /* DHD_PKT_LOGGING */
#ifdef DHD_EWPR_VER2
#include <dhd_bitpack.h>
#endif /* DHD_EWPR_VER2 */
#if defined(DHD_MESH)
#include <dhd_mesh_route.h>
#endif /* defined(DHD_MESH) */
extern char dhd_version[];
extern char fw_version[];
/**
* Host configures a soft doorbell for d2h rings, by specifying a 32bit host
* address where a value must be written. Host may also interrupt coalescing
* on this soft doorbell.
* Use Case: Hosts with network processors, may register with the dongle the
* network processor's thread wakeup register and a value corresponding to the
* core/thread context. Dongle will issue a write transaction <address,value>
* to the PCIE RC which will need to be routed to the mapped register space, by
* the host.
*/
/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
/* Dependency Check */
#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
/* For legacy firmware where pcie shared structure does not have max_host_rxbufs, use this */
#define LEGACY_MAX_RXBUFPOST 256u
/* Read index update Magic sequence */
#define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC 0xDDDDDDDDAu
#define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring) (0xDD000000 | (ring->idx << 16u) | ring->rd)
/* Write index update Magic sequence */
#define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring) (0xFF000000 | (ring->idx << 16u) | ring->wr)
#define DHD_AGGR_H2D_DB_MAGIC 0xFFFFFFFAu
#define DHD_STOP_QUEUE_THRESHOLD 200
#define DHD_START_QUEUE_THRESHOLD 100
#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
/* flags for ioctl pending status */
#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
#define DHD_IOCTL_REQ_PKTBUFSZ 2048
#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
/**
* DMA_ALIGN_LEN use is overloaded:
* - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
* - in ensuring that a buffer's va is 4 Byte aligned
* - in rounding up a buffer length to 4 Bytes.
*/
#define DMA_ALIGN_LEN 4
#define DMA_D2H_SCRATCH_BUF_LEN 8
#define DMA_XFER_LEN_LIMIT 0x400000
#ifdef BCM_HOST_BUF
#ifndef DMA_HOST_BUFFER_LEN
#define DMA_HOST_BUFFER_LEN 0x200000
#endif
#endif /* BCM_HOST_BUF */
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
#define DHD_FLOWRING_MAX_EVENTBUF_POST 32
#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
#define DHD_H2D_INFORING_MAX_BUF_POST 32
#ifdef BTLOG
#define DHD_H2D_BTLOGRING_MAX_BUF_POST 32
#endif /* BTLOG */
#define DHD_MAX_TSBUF_POST 8
#define DHD_PROT_FUNCS 50
/* Length of buffer in host for bus throughput measurement */
#define DHD_BUS_TPUT_LEN (2048u)
#define DHD_BUS_TPUT_BUF_LEN (64u * 1024u)
#define TXP_FLUSH_NITEMS
/* optimization to write "n" tx items at a time to ring */
#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
#define RING_NAME_MAX_LENGTH 24
#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
/* Giving room before ioctl_trans_id rollsover. */
#define BUFFER_BEFORE_ROLLOVER 300
/* 512K memory + 32K registers */
#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
struct msgbuf_ring; /* ring context for common and flow rings */
#ifdef DHD_HMAPTEST
/* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */
#define HMAP_SANDBOX_BUFFER_LEN (DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */
/**
* for D11 DMA HMAPTEST thes states are as follows
* iovar sets ACTIVE state
* next TXPOST / RXPOST sets POSTED state
* on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE
* This ensures that on an iovar only one buffer is replaced from sandbox area
*/
#define HMAPTEST_D11_TX_INACTIVE 0
#define HMAPTEST_D11_TX_ACTIVE 1
#define HMAPTEST_D11_TX_POSTED 2
#define HMAPTEST_D11_RX_INACTIVE 0
#define HMAPTEST_D11_RX_ACTIVE 1
#define HMAPTEST_D11_RX_POSTED 2
#endif /* DHD_HMAPTEST */
#define PCIE_DMA_LOOPBACK 0
#define D11_DMA_LOOPBACK 1
#define BMC_DMA_LOOPBACK 2
/**
* PCIE D2H DMA Complete Sync Modes
*
* Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
* Host system memory. A WAR using one of 3 approaches is needed:
* 1. Dongle places a modulo-253 seqnum in last word of each D2H message
* 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
* writes in the last word of each work item. Each work item has a seqnum
* number = sequence num % 253.
*
* 3. Read Barrier: Dongle does a host memory read access prior to posting an
* interrupt, ensuring that D2H data transfer indeed completed.
* 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
* ring contents before the indices.
*
* Host does not sync for DMA to complete with option #3 or #4, and a noop sync
* callback (see dhd_prot_d2h_sync_none) may be bound.
*
* Dongle advertizes host side sync mechanism requirements.
*/
#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
* On success: return cmn_msg_hdr_t::msg_type
* On failure: return 0 (invalid msg_type)
*/
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
void dhd_prot_debug_ring_info(dhd_pub_t *dhd);
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
* For EDL messages.
*
* On success: return cmn_msg_hdr_t::msg_type
* On failure: return 0 (invalid msg_type)
*/
#ifdef EWP_EDL
typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg);
#endif /* EWP_EDL */
/*
* +----------------------------------------------------------------------------
*
* RingIds and FlowId are not equivalent as ringids include D2H rings whereas
* flowids do not.
*
* Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
* the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
*
* Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
* BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
*
* H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
* H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
*
* D2H Control Complete RingId = 2
* D2H Transmit Complete RingId = 3
* D2H Receive Complete RingId = 4
*
* H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
* H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
* H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
*
* When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
* unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
*
* Example: when a system supports 4 bc/mc and 128 uc flowrings, with
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
* FlowId values would be in the range [2..133] and the corresponding
* RingId values would be in the range [5..136].
*
* The flowId allocator, may chose to, allocate Flowids:
* bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
* X# of uc flowids in consecutive ranges (per station Id), where X is the
* packet's access category (e.g. 4 uc flowids per station).
*
* CAUTION:
* When DMA indices array feature is used, RingId=5, corresponding to the 0th
* FLOWRING, will actually use the FlowId as index into the H2D DMA index,
* since the FlowId truly represents the index in the H2D DMA indices array.
*
* Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
* will represent the index in the D2H DMA indices array.
*
* +----------------------------------------------------------------------------
*/
/* First TxPost Flowring Id */
#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
/* Determine whether a ringid belongs to a TxPost flowring */
#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
#define DHD_FLOWID_TO_RINGID(flowid) \
(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
#define DHD_RINGID_TO_FLOWID(ringid) \
(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
* This may be used for the H2D DMA WR index array or H2D DMA RD index array or
* any array of H2D rings.
*/
#define DHD_H2D_RING_OFFSET(ringid) \
(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
* This may be used for IFRM.
*/
#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
((ringid) - BCMPCIE_COMMON_MSGRINGS)
/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
* This may be used for the D2H DMA WR index array or D2H DMA RD index array or
* any array of D2H rings.
* d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
* max_h2d_rings: total number of h2d rings
*/
#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
((ringid) > (max_h2d_rings) ? \
((ringid) - max_h2d_rings) : \
((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
/* Convert a D2H DMA Indices Offset to a RingId */
#define DHD_D2H_RINGID(offset) \
((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
/* The ringid and flowid and dma indices array index idiosyncracy is error
* prone. While a simplification is possible, the backward compatability
* requirement (DHD should operate with any PCIE rev version of firmware),
* limits what may be accomplished.
*
* At the minimum, implementation should use macros for any conversions
* facilitating introduction of future PCIE FD revs that need more "common" or
* other dynamic rings.
*/
/* Presently there is no need for maintaining both a dmah and a secdmah */
#define DHD_DMAH_NULL ((void*)NULL)
/*
* Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
* buffer does not occupy the entire cacheline, and another object is placed
* following the DMA-able buffer, data corruption may occur if the DMA-able
* buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
* is not available.
*/
#if defined(L1_CACHE_BYTES)
#define DHD_DMA_PAD (L1_CACHE_BYTES)
#else
#define DHD_DMA_PAD (128)
#endif
/*
* +----------------------------------------------------------------------------
* Flowring Pool
*
* Unlike common rings, which are attached very early on (dhd_prot_attach),
* flowrings are dynamically instantiated. Moreover, flowrings may require a
* larger DMA-able buffer. To avoid issues with fragmented cache coherent
* DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
* The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
*
* Each DMA-able buffer may be allocated independently, or may be carved out
* of a single large contiguous region that is registered with the protocol
* layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
* may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
*
* No flowring pool action is performed in dhd_prot_attach(), as the number
* of h2d rings is not yet known.
*
* In dhd_prot_init(), the dongle advertized number of h2d rings is used to
* determine the number of flowrings required, and a pool of msgbuf_rings are
* allocated and a DMA-able buffer (carved or allocated) is attached.
* See: dhd_prot_flowrings_pool_attach()
*
* A flowring msgbuf_ring object may be fetched from this pool during flowring
* creation, using the flowid. Likewise, flowrings may be freed back into the
* pool on flowring deletion.
* See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
*
* In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
* are detached (returned back to the carved region or freed), and the pool of
* msgbuf_ring and any objects allocated against it are freed.
* See: dhd_prot_flowrings_pool_detach()
*
* In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
* state as-if upon an attach. All DMA-able buffers are retained.
* Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
* pool attach will notice that the pool persists and continue to use it. This
* will avoid the case of a fragmented DMA-able region.
*
* +----------------------------------------------------------------------------
*/
/* Conversion of a flowid to a flowring pool index */
#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
DHD_FLOWRINGS_POOL_OFFSET(flowid)
/* Traverse each flowring in the flowring pool, assigning ring and flowid */
#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
for ((flowid) = DHD_FLOWRING_START_FLOWID, \
(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
(flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
(ring)++, (flowid)++)
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
dhd_dma_buf_t srcmem;
dhd_dma_buf_t dstmem;
uint32 srcdelay;
uint32 destdelay;
uint32 len;
bool in_progress;
uint64 start_usec;
uint64 time_taken;
uint32 d11_lpbk;
int status;
} dhd_dmaxfer_t;
#ifdef DHD_HMAPTEST
/* Used in HMAP test */
typedef struct dhd_hmaptest {
dhd_dma_buf_t mem;
uint32 len;
bool in_progress;
uint32 is_write;
uint32 accesstype;
uint64 start_usec;
uint32 offset;
} dhd_hmaptest_t;
#endif /* DHD_HMAPTEST */
#ifdef TX_FLOW_RING_INDICES_TRACE
/* By default 4K, which can be overridden by Makefile */
#ifndef TX_FLOW_RING_INDICES_TRACE_SIZE
#define TX_FLOW_RING_INDICES_TRACE_SIZE (4 * 1024)
#endif /* TX_FLOW_RING_INDICES_TRACE_SIZE */
typedef struct rw_trace {
uint64 timestamp;
uint32 err_rollback_idx_cnt;
uint16 rd; /* shared mem or dma rd */
uint16 wr; /* shared mem or dma wr */
uint16 local_wr;
uint16 local_rd;
uint8 current_phase;
bool start; /* snapshot updated from the start of tx function */
} rw_trace_t;
#endif /* TX_FLOW_RING_INDICES_TRACE */
/**
* msgbuf_ring : This object manages the host side ring that includes a DMA-able
* buffer, the WR and RD indices, ring parameters such as max number of items
* an length of each items, and other miscellaneous runtime state.
* A msgbuf_ring may be used to represent a H2D or D2H common ring or a
* H2D TxPost ring as specified in the PCIE FullDongle Spec.
* Ring parameters are conveyed to the dongle, which maintains its own peer end
* ring state. Depending on whether the DMA Indices feature is supported, the
* host will update the WR/RD index in the DMA indices array in host memory or
* directly in dongle memory.
*/
typedef struct msgbuf_ring {
bool inited;
uint16 idx; /* ring id */
uint16 rd; /* read index */
uint16 curr_rd; /* read index for debug */
uint16 wr; /* write index */
uint16 max_items; /* maximum number of items in ring */
uint16 item_len; /* length of each item in the ring */
sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
uint32 seqnum; /* next expected item's sequence number */
#ifdef TXP_FLUSH_NITEMS
void *start_addr;
/* # of messages on ring not yet announced to dongle */
uint16 pend_items_count;
#ifdef AGG_H2D_DB
osl_atomic_t inflight;
#endif /* AGG_H2D_DB */
#endif /* TXP_FLUSH_NITEMS */
uint8 ring_type;
uint8 n_completion_ids;
bool create_pending;
uint16 create_req_id;
uint8 current_phase;
uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
uchar name[RING_NAME_MAX_LENGTH];
uint32 ring_mem_allocated;
void *ring_lock;
bool mesh_ring; /* TRUE if it is a mesh ring */
#ifdef DHD_AGGR_WI
aggr_state_t aggr_state; /* Aggregation state */
uint8 pending_pkt; /* Pending packet count */
#endif /* DHD_AGGR_WI */
struct msgbuf_ring *linked_ring; /* Ring Associated to metadata ring */
#ifdef TX_FLOW_RING_INDICES_TRACE
rw_trace_t *tx_flow_rw_trace;
uint32 tx_flow_rw_trace_cnt;
uint32 err_rollback_idx_cnt;
#endif /* TX_FLOW_RING_INDICES_TRACE */
} msgbuf_ring_t;
#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
#define DHD_RING_END_VA(ring) \
((uint8 *)(DHD_RING_BGN_VA((ring))) + \
(((ring)->max_items - 1) * (ring)->item_len))
#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
#define MAX_IOCTL_TRACE_SIZE 50
#define MAX_IOCTL_BUF_SIZE 64
typedef struct _dhd_ioctl_trace_t {
uint32 cmd;
uint16 transid;
char ioctl_buf[MAX_IOCTL_BUF_SIZE];
uint64 timestamp;
} dhd_ioctl_trace_t;
#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
/** D2H WLAN Rx Packet Chaining context */
typedef struct rxchain_info {
uint pkt_count;
uint ifidx;
void *pkthead;
void *pkttail;
uint8 *h_da; /* pointer to da of chain head */
uint8 *h_sa; /* pointer to sa of chain head */
uint8 h_prio; /* prio of chain head */
} rxchain_info_t;
#endif /* BCM_ROUTER_DHD && HNDCTF */
/* Legacy ring sizes for 1Gbps and lesser */
#define H2DRING_TXPOST_SIZE_V1 512u
#define H2DRING_HTPUT_TXPOST_SIZE_V1 512u /* Keep it same as H2DRING_TXPOST_SIZE_V1 */
#define D2HRING_TXCPL_SIZE_V1 1024u
#define H2DRING_RXPOST_SIZE_V1 512u
#define D2HRING_RXCPL_SIZE_V1 512u
#define H2DRING_CTRLPOST_SIZE_V1 64u
#define D2HRING_CTRLCPL_SIZE_V1 64u
#define RX_BUF_BURST_V1 32u /* Rx buffers for MSDU Data */
#define RX_BUFPOST_THRESHOLD_V1 32u /* Rxbuf post threshold */
/* Ring sizes version 2 for 2Gbps */
#define H2DRING_TXPOST_SIZE_V2 512u
#define H2DRING_HTPUT_TXPOST_SIZE_V2 2048u /* ONLY for 4 160 MHz BE flowrings */
#define D2HRING_TXCPL_SIZE_V2 2048u
#define H2DRING_RXPOST_SIZE_V2 2048u
#define D2HRING_RXCPL_SIZE_V2 1024u
#define H2DRING_CTRLPOST_SIZE_V2 128u
#define D2HRING_CTRLCPL_SIZE_V2 64u
#define RX_BUF_BURST_V2 256u /* Rx buffers for MSDU Data */
#define RX_BUFPOST_THRESHOLD_V2 64u /* Rxbuf post threshold */
/* Ring sizes version 3 for 2.5Gbps */
#define H2DRING_TXPOST_SIZE_V3 768u /* To handle two 256 BA, use size > 512 */
#define H2DRING_HTPUT_TXPOST_SIZE_V3 2048u
#define D2HRING_TXCPL_SIZE_V3 2048u
#define H2DRING_RXPOST_SIZE_V3 8192u
#define D2HRING_RXCPL_SIZE_V3 8192u
#define H2DRING_CTRLPOST_SIZE_V3 128u
#define D2HRING_CTRLCPL_SIZE_V3 64u
#define RX_BUF_BURST_V3 1536u /* Rx buffers for MSDU Data */
#define RX_BUFPOST_THRESHOLD_V3 64u /* Rxbuf post threshold */
/* Ring sizes version 4 for 5Gbps */
#define H2DRING_TXPOST_SIZE_V4 768u /* To handle two 256 BA, use size > 512 */
#define H2DRING_HTPUT_TXPOST_SIZE_V4 8192u /* ONLY for 4 320 MHz BE flowrings */
#define D2HRING_TXCPL_SIZE_V4 8192u
#define H2DRING_RXPOST_SIZE_V4 8192u
#define D2HRING_RXCPL_SIZE_V4 8192u
#define H2DRING_CTRLPOST_SIZE_V4 128u
#define D2HRING_CTRLCPL_SIZE_V4 64u
#define RX_BUF_BURST_V4 1536u /* Rx buffers for MSDU Data */
#define RX_BUFPOST_THRESHOLD_V4 64u /* Rxbuf post threshold */
#define MAX_RING_SIZE_VERSION 4
/* misc */
#define MIN_HTPUT_H2DRING_RXPOST_SIZE 512u
int ring_size_version;
int ring_size_alloc_version;
uint h2d_max_txpost;
uint h2d_htput_max_txpost;
uint d2h_max_txcpl;
uint h2d_max_rxpost;
uint d2h_max_rxcpl;
uint h2d_max_ctrlpost;
uint d2h_max_ctrlcpl;
uint rx_buf_burst;
uint rx_bufpost_threshold;
uint h2d_max_txpost_array[MAX_RING_SIZE_VERSION] =
{
H2DRING_TXPOST_SIZE_V1,
H2DRING_TXPOST_SIZE_V2,
H2DRING_TXPOST_SIZE_V3,
H2DRING_TXPOST_SIZE_V4
};
uint h2d_htput_max_txpost_array[MAX_RING_SIZE_VERSION] =
{
H2DRING_HTPUT_TXPOST_SIZE_V1,
H2DRING_HTPUT_TXPOST_SIZE_V2,
H2DRING_HTPUT_TXPOST_SIZE_V3,
H2DRING_HTPUT_TXPOST_SIZE_V4
};
uint d2h_max_txcpl_array[MAX_RING_SIZE_VERSION] =
{
D2HRING_TXCPL_SIZE_V1,
D2HRING_TXCPL_SIZE_V2,
D2HRING_TXCPL_SIZE_V3,
D2HRING_TXCPL_SIZE_V4
};
uint h2d_max_rxpost_array[MAX_RING_SIZE_VERSION] =
{
H2DRING_RXPOST_SIZE_V1,
H2DRING_RXPOST_SIZE_V2,
H2DRING_RXPOST_SIZE_V3,
H2DRING_RXPOST_SIZE_V4
};
uint d2h_max_rxcpl_array[MAX_RING_SIZE_VERSION] =
{
D2HRING_RXCPL_SIZE_V1,
D2HRING_RXCPL_SIZE_V2,
D2HRING_RXCPL_SIZE_V3,
D2HRING_RXCPL_SIZE_V4
};
uint h2d_max_ctrlpost_array[MAX_RING_SIZE_VERSION] =
{
H2DRING_CTRLPOST_SIZE_V1,
H2DRING_CTRLPOST_SIZE_V2,
H2DRING_CTRLPOST_SIZE_V3,
H2DRING_CTRLPOST_SIZE_V4
};
uint d2h_max_ctrlcpl_array[MAX_RING_SIZE_VERSION] = {
D2HRING_CTRLCPL_SIZE_V1,
D2HRING_CTRLCPL_SIZE_V2,
D2HRING_CTRLCPL_SIZE_V3,
D2HRING_CTRLCPL_SIZE_V4};
uint rx_buf_burst_array[MAX_RING_SIZE_VERSION] =
{
RX_BUF_BURST_V1,
RX_BUF_BURST_V2,
RX_BUF_BURST_V3,
RX_BUF_BURST_V4
};
uint rx_bufpost_threshold_array[MAX_RING_SIZE_VERSION] =
{
RX_BUFPOST_THRESHOLD_V1,
RX_BUFPOST_THRESHOLD_V2,
RX_BUFPOST_THRESHOLD_V3,
RX_BUFPOST_THRESHOLD_V4
};
#ifndef DHD_RX_CPL_POST_BOUND
#define DHD_RX_CPL_POST_BOUND 1024u
#endif
#ifndef DHD_TX_POST_BOUND
#define DHD_TX_POST_BOUND 256u
#endif
#ifndef DHD_CTRL_CPL_POST_BOUND
#define DHD_CTRL_CPL_POST_BOUND 64u
#endif
#ifndef DHD_TX_CPL_BOUND
#define DHD_TX_CPL_BOUND 2048u
#endif
uint dhd_rx_cpl_post_bound = DHD_RX_CPL_POST_BOUND;
uint dhd_tx_post_bound = DHD_TX_POST_BOUND;
uint dhd_tx_cpl_bound = DHD_TX_CPL_BOUND;
uint dhd_ctrl_cpl_post_bound = DHD_CTRL_CPL_POST_BOUND;
/* Max pktid map sizes for each type of ring */
#define PKTID_MAX_MAP_SZ_CTRLRING (1024)
#define PKTID_MAX_MAP_SZ_RXCPLRING (8 * 1024)
/* Limit max within 16bits i.e DHD_MAX_PKTID_16BITS (0xFF00) and
* subtract one more as map will be inited with one extra item(valid index starts from 1)
*/
#define PKTID_MAX_MAP_SZ_TXFLOWRING (DHD_MAX_PKTID_16BITS - 1)
#ifdef AGG_H2D_DB
bool agg_h2d_db_enab = TRUE;
#define AGG_H2D_DB_TIMEOUT_USEC (1000u) /* 1 msec */
uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC;
#ifndef AGG_H2D_DB_INFLIGHT_THRESH
/* Keep inflight threshold same as txp_threshold */
#define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
#endif /* !AGG_H2D_DB_INFLIGHT_THRESH */
uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH;
#define DHD_NUM_INFLIGHT_HISTO_ROWS (14u)
#define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS)
typedef struct _agg_h2d_db_info {
void *dhd;
struct hrtimer timer;
bool init;
uint32 direct_db_cnt;
uint32 timer_db_cnt;
uint64 *inflight_histo;
} agg_h2d_db_info_t;
#endif /* AGG_H2D_DB */
#ifdef DHD_AGGR_WI
typedef struct dhd_aggr_stat {
uint32 aggr_txpost; /* # of aggregated txpost work item posted */
uint32 aggr_rxpost; /* # of aggregated rxpost work item posted */
uint32 aggr_txcpl; /* # of aggregated txcpl work item processed */
uint32 aggr_rxcpl; /* # of aggregated rxcpl work item processed */
} dhd_aggr_stat_t;
#endif /* DHD_AGGR_WI */
#define DHD_DEBUG_INVALID_PKTID
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
uint16 rxbufpost_sz; /* Size of rx buffer posted to dongle */
uint16 rxbufpost_alloc_sz; /* Actual rx buffer packet allocated in the host */
uint16 rxbufpost;
uint16 rx_buf_burst;
uint16 rx_bufpost_threshold;
uint16 max_rxbufpost;
uint32 tot_rxbufpost;
uint32 tot_rxcpl;
void *rxp_bufinfo_pool; /* Scratch buffer pool to hold va, pa and pktlens */
uint16 rxp_bufinfo_pool_size; /* scartch buffer pool length */
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
uint16 max_tsbufpost;
uint16 max_infobufpost;
uint16 infobufpost;
uint16 cur_event_bufs_posted;
uint16 cur_ioctlresp_bufs_posted;
uint16 cur_ts_bufs_posted;
/* Flow control mechanism based on active transmits pending */
osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
uint16 h2d_max_txpost;
uint16 h2d_htput_max_txpost;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
uint32 rx_dataoffset;
dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
#ifdef DHD_DB0TS
dhd_mb_ring_t idma_db0_fn; /* called when dongle needs to be notified of timestamp */
#endif /* DHD_DB0TS */
/* ioctl related resources */
uint8 ioctl_state;
int16 ioctl_status; /* status returned from dongle */
uint16 ioctl_resplen;
dhd_ioctl_received_status_t ioctl_received;
uint curr_ioctl_cmd;
dhd_dma_buf_t retbuf; /* For holding ioctl response */
dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
/* DMA-able arrays for holding WR and RD indices */
uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
#ifdef DHD_DMA_INDICES_SEQNUM
char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */
char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */
uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */
uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */
uint32 host_seqnum; /* Seqence number for D2H DMA Indices sync */
#endif /* DHD_DMA_INDICES_SEQNUM */
uint32 flowring_num;
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
#ifdef EWP_EDL
d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
#endif /* EWP_EDL */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
uint16 ioctl_seq_no;
uint16 data_seq_no; /* this field is obsolete */
uint16 ioctl_trans_id;
void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
void *pktid_rx_map; /* pktid map for rx path */
void *pktid_tx_map; /* pktid map for tx path */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
#ifdef DHD_MAP_PKTID_LOGGING
void *pktid_dma_map; /* pktid map for DMA MAP */
void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
#endif /* DHD_MAP_PKTID_LOGGING */
uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
uint64 ioctl_ack_time; /* timestamp for ioctl ack */
uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
#if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
rxchain_info_t rxchain; /* chain of rx packets */
#endif
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
/* Host's soft doorbell configuration */
bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
/* Work Queues to be used by the producer and the consumer, and threshold
* when the WRITE index must be synced to consumer's workq
*/
dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
#ifdef FLOW_RING_PREALLOC
/* pre-allocation htput ring buffer */
dhd_dma_buf_t *prealloc_htput_flowring_buf;
/* pre-allocation folw ring(non htput rings) */
dhd_dma_buf_t *prealloc_regular_flowring_buf;
#endif /* FLOW_RING_PREALLOC */
uint32 host_ipc_version; /* Host sypported IPC rev */
uint32 device_ipc_version; /* FW supported IPC rev */
uint32 active_ipc_version; /* Host advertised IPC rev */
#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
dhd_ioctl_trace_t ioctl_trace[MAX_IOCTL_TRACE_SIZE];
uint32 ioctl_trace_count;
#endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
bool hostts_req_buf_inuse;
bool rx_ts_log_enabled;
bool tx_ts_log_enabled;
#ifdef BTLOG
msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */
msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */
uint16 btlogbufpost;
uint16 max_btlogbufpost;
#endif /* BTLOG */
#ifdef DHD_HMAPTEST
uint32 hmaptest_rx_active;
uint32 hmaptest_rx_pktid;
char *hmap_rx_buf_va;
dmaaddr_t hmap_rx_buf_pa;
uint32 hmap_rx_buf_len;
uint32 hmaptest_tx_active;
uint32 hmaptest_tx_pktid;
char *hmap_tx_buf_va;
dmaaddr_t hmap_tx_buf_pa;
uint32 hmap_tx_buf_len;
dhd_hmaptest_t hmaptest; /* for hmaptest */
bool hmap_enabled; /* TRUE = hmap is enabled */
#endif /* DHD_HMAPTEST */
#ifdef SNAPSHOT_UPLOAD
dhd_dma_buf_t snapshot_upload_buf; /* snapshot upload buffer */
uint32 snapshot_upload_len; /* snapshot uploaded len */
uint8 snapshot_type; /* snaphot uploaded type */
bool snapshot_cmpl_pending; /* snapshot completion pending */
#endif /* SNAPSHOT_UPLOAD */
bool no_retry;
bool no_aggr;
bool fixed_rate;
bool rts_protect;
dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
#ifdef DHD_HP2P
msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
#endif /* DHD_HP2P */
#if defined(DHD_MESH)
msgbuf_ring_t *d2hring_mesh_rxcpl; /* D2H Mesh Rx completion ring */
#endif /* DHD_MESH */
uint32 txcpl_db_cnt;
#ifdef AGG_H2D_DB
agg_h2d_db_info_t agg_h2d_db_info;
#endif /* AGG_H2D_DB */
uint64 tx_h2d_db_cnt;
#ifdef DHD_DEBUG_INVALID_PKTID
uint8 ctrl_cpl_snapshot[D2HRING_CTRL_CMPLT_ITEMSIZE];
#endif /* DHD_DEBUG_INVALID_PKTID */
uint32 event_wakeup_pkt; /* Number of event wakeup packet rcvd */
uint32 rx_wakeup_pkt; /* Number of Rx wakeup packet rcvd */
uint32 info_wakeup_pkt; /* Number of info cpl wakeup packet rcvd */
#ifdef DHD_AGGR_WI
dhd_aggr_stat_t aggr_stat; /* Aggregated work item statistics */
#endif /* DHD_AGGR_WI */
uint32 rxbuf_post_err;
msgbuf_ring_t *d2hring_md_cpl; /* D2H metadata completion ring */
/* no. which controls how many rx cpl/post items are processed per dpc */
uint32 rx_cpl_post_bound;
/*
* no. which controls how many tx post items are processed per dpc,
* i.e, how many tx pkts are posted to flowring from the bkp queue
* from dpc context
*/
uint32 tx_post_bound;
/* no. which controls how many tx cpl items are processed per dpc */
uint32 tx_cpl_bound;
/* no. which controls how many ctrl cpl/post items are processed per dpc */
uint32 ctrl_cpl_post_bound;
} dhd_prot_t;
/* Metadata d2h completion ring linking/unlink ring types */
typedef enum dhd_mdata_linked_ring_idx {
DHD_METADATA_NO_RING = 0,
DHD_METADATA_D2H_TXCPL,
DHD_METADATA_D2H_RXCPL,
DHD_METADATA_D2H_HP2PTX,
DHD_METADATA_D2H_HP2PRX
} dhd_mdata_linked_ring_idx_t;
#ifdef DHD_EWPR_VER2
#define HANG_INFO_BASE64_BUFFER_SIZE 640
#endif
#ifdef DHD_DUMP_PCIE_RINGS
static
int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
const void *user_buf, unsigned long *file_posn);
#ifdef EWP_EDL
static
int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,