~funderscore blog cgit wiki get in touch
aboutsummaryrefslogtreecommitdiff
blob: 86f89be855fe7ecb5e8671eb81298192582c94e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2018-2022 Marvell International Ltd.
 *
 */

#ifndef __CVMX_PKO3_H__
#define __CVMX_PKO3_H__

DECLARE_GLOBAL_DATA_PTR;

/* Use full LMTDMA when PARAMETER_CHECKINS is enabled */
#undef CVMX_ENABLE_PARAMETER_CHECKING
#define CVMX_ENABLE_PARAMETER_CHECKING 0

/*
 * CVMSEG, scratch line for LMTDMA/LMTST operations:
 * 1. It should differ from other CVMSEG uses, e.g. IOBDMA,
 * 2. It must agree with the setting of CvmCtl[LMTLINE] control register.
 * Contains 16 words, words 1-15 are cleared when word 0 is written to.
 */
#define CVMX_PKO_LMTLINE 2ull

/* PKO3 queue level identifier */
enum cvmx_pko3_level_e {
	CVMX_PKO_LEVEL_INVAL = 0,
	CVMX_PKO_PORT_QUEUES = 0xd1,
	CVMX_PKO_L2_QUEUES = 0xc2,
	CVMX_PKO_L3_QUEUES = 0xb3,
	CVMX_PKO_L4_QUEUES = 0xa4,
	CVMX_PKO_L5_QUEUES = 0x95,
	CVMX_PKO_DESCR_QUEUES = 0x86,
};

enum cvmx_pko_dqop {
	CVMX_PKO_DQ_SEND = 0ULL,
	CVMX_PKO_DQ_OPEN = 1ULL,
	CVMX_PKO_DQ_CLOSE = 2ULL,
	CVMX_PKO_DQ_QUERY = 3ULL
};

/**
 * Returns the PKO DQ..L2 Shaper Time-Wheel clock rate for specified node.
 */
static inline u64 cvmx_pko3_dq_tw_clock_rate_node(int node)
{
	return gd->bus_clk / 768;
}

/**
 * Returns the PKO Port Shaper Time-Wheel clock rate for specified node.
 */
static inline u64 cvmx_pko3_pq_tw_clock_rate_node(int node)
{
	int div;

	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
		div = 96;
	else
		div = 48;
	return gd->bus_clk / div;
}

/**
 * @INTERNAL
 * Return the number of MACs in the PKO (exclusing the NULL MAC)
 * in a model-dependent manner.
 */
static inline unsigned int __cvmx_pko3_num_macs(void)
{
	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
		return 10;
	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
		return 14;
	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
		return 28;
	return 0;
}

/**
 * @INTERNAL
 * Return the number of queue levels, depending on SoC model
 */
static inline int __cvmx_pko3_sq_lvl_max(void)
{
	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
		return CVMX_PKO_L3_QUEUES;
	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
		return CVMX_PKO_L3_QUEUES;
	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
		return CVMX_PKO_L5_QUEUES;
	return -1;
}

/**
 * @INTERNAL
 * Return the next (lower) queue level for a given level
 */
static inline enum cvmx_pko3_level_e
__cvmx_pko3_sq_lvl_next(enum cvmx_pko3_level_e level)
{
	switch (level) {
	default:
		return CVMX_PKO_LEVEL_INVAL;
	case CVMX_PKO_PORT_QUEUES:
		return CVMX_PKO_L2_QUEUES;
	case CVMX_PKO_L2_QUEUES:
		return CVMX_PKO_L3_QUEUES;
	case CVMX_PKO_L3_QUEUES:
		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
			return CVMX_PKO_DESCR_QUEUES;
		return CVMX_PKO_L4_QUEUES;
	case CVMX_PKO_L4_QUEUES:
		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
			return CVMX_PKO_LEVEL_INVAL;
		return CVMX_PKO_L5_QUEUES;
	case CVMX_PKO_L5_QUEUES:
		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
			return CVMX_PKO_LEVEL_INVAL;
		return CVMX_PKO_DESCR_QUEUES;
	}
}

/**
 * @INTERNAL
 * Return an SQ identifier string, for debug messages.
 */
static inline char *__cvmx_pko3_sq_str(char *buf, enum cvmx_pko3_level_e level,
				       unsigned int q)
{
	char *p;

	switch (level) {
	default:
		strcpy(buf, "ERR-SQ/");
		break;
	case CVMX_PKO_PORT_QUEUES:
		strcpy(buf, "PQ_L1/");
		break;
	case CVMX_PKO_L2_QUEUES:
		strcpy(buf, "SQ_L2/");
		break;
	case CVMX_PKO_L3_QUEUES:
		strcpy(buf, "SQ_L3/");
		break;
	case CVMX_PKO_L4_QUEUES:
		strcpy(buf, "SQ_L4/");
		break;
	case CVMX_PKO_L5_QUEUES:
		strcpy(buf, "SQ_L5/");
		break;
	case CVMX_PKO_DESCR_QUEUES:
		strcpy(buf, "DQ/");
		break;
	}

	for (p = buf; *p; p++)
		;
	*p++ = '0' + q / 1000;
	q -= (q / 1000) * 1000;
	*p++ = '0' + q / 100;
	q -= (q / 100) * 100;
	*p++ = '0' + q / 10;
	q -= (q / 10) * 10;
	*p++ = '0' + q;
	*p++ = ':';
	*p++ = '\0';
	return buf;
}

union cvmx_pko_query_rtn {
	u64 u64;
	struct {
		u64 dqstatus : 4;
		u64 rsvd_50_59 : 10;
		u64 dqop : 2;
		u64 depth : 48;
	} s;
};

typedef union cvmx_pko_query_rtn cvmx_pko_query_rtn_t;

/* PKO_QUERY_RTN_S[DQSTATUS] - cvmx_pko_query_rtn_t->s.dqstatus */
enum pko_query_dqstatus {
	PKO_DQSTATUS_PASS = 0,	       /* No error */
	PKO_DQSTATUS_BADSTATE = 0x8,   /* queue was not ready to enqueue */
	PKO_DQSTATUS_NOFPABUF = 0x9,   /* FPA out of buffers */
	PKO_DQSTATUS_NOPKOBUF = 0xA,   /* PKO out of buffers */
	PKO_DQSTATUS_FAILRTNPTR = 0xB, /* can't return buffer ptr to FPA */
	PKO_DQSTATUS_ALREADY = 0xC,    /* already created */
	PKO_DQSTATUS_NOTCREATED = 0xD, /* not created */
	PKO_DQSTATUS_NOTEMPTY = 0xE,   /* queue not empty */
	PKO_DQSTATUS_SENDPKTDROP = 0xF /* packet dropped, illegal construct */
};

typedef enum pko_query_dqstatus pko_query_dqstatus_t;

/* Sub-command three bit codes (SUBDC3) */
#define CVMX_PKO_SENDSUBDC_LINK	  0x0
#define CVMX_PKO_SENDSUBDC_GATHER 0x1
#define CVMX_PKO_SENDSUBDC_JUMP	  0x2
/* Sub-command four bit codes (SUBDC4) */
#define CVMX_PKO_SENDSUBDC_TSO	0x8
#define CVMX_PKO_SENDSUBDC_FREE 0x9
#define CVMX_PKO_SENDSUBDC_WORK 0xA
#define CVMX_PKO_SENDSUBDC_AURA 0xB
#define CVMX_PKO_SENDSUBDC_MEM	0xC
#define CVMX_PKO_SENDSUBDC_EXT	0xD
#define CVMX_PKO_SENDSUBDC_CRC	0xE
#define CVMX_PKO_SENDSUBDC_IMM	0xF

/**
 * pko buf ptr
 * This is good for LINK_S, GATHER_S and PKI_BUFLINK_S structure use.
 * It can also be used for JUMP_S with F-bit represented by "i" field,
 * and the size limited to 8-bit.
 */

union cvmx_pko_buf_ptr {
	u64 u64;
	struct {
		u64 size : 16;
		u64 subdc3 : 3;
		u64 i : 1;
		u64 rsvd_42_43 : 2;
		u64 addr : 42;
	} s;
};

typedef union cvmx_pko_buf_ptr cvmx_pko_buf_ptr_t;

/**
 * pko_auraalg_e
 */
enum pko_auraalg_e {
	AURAALG_NOP = 0x0,    /* aura_cnt = No change */
	AURAALG_SUB = 0x3,    /* aura_cnt -= pko_send_aura_t.offset */
	AURAALG_SUBLEN = 0x7, /* aura_cnt -= pko_send_aura_t.offset +
			       *		pko_send_hdr_t.total_bytes
			       */
	AURAALG_SUBMBUF = 0xB /* aura_cnt -= pko_send_aura_t.offset +
			       *		mbufs_freed
			       */
};

/**
 * PKO_CKL4ALG_E
 */
enum pko_clk4alg_e {
	CKL4ALG_NONE = 0x0, /* No checksum. */
	CKL4ALG_UDP = 0x1,  /* UDP L4 checksum. */
	CKL4ALG_TCP = 0x2,  /* TCP L4 checksum. */
	CKL4ALG_SCTP = 0x3, /* SCTP L4 checksum. */
};

/**
 * pko_send_aura
 */
union cvmx_pko_send_aura {
	u64 u64;
	struct {
		u64 rsvd_60_63 : 4;
		u64 aura : 12; /* NODE+LAURA */
		u64 subdc4 : 4;
		u64 alg : 4; /* pko_auraalg_e */
		u64 rsvd_08_39 : 32;
		u64 offset : 8;
	} s;
};

typedef union cvmx_pko_send_aura cvmx_pko_send_aura_t;

/**
 * pko_send_tso
 */
union cvmx_pko_send_tso {
	u64 u64;
	struct {
		u64 l2len : 8;
		u64 rsvd_48_55 : 8;
		u64 subdc4 : 4; /* 0x8 */
		u64 rsvd_32_43 : 12;
		u64 sb : 8;
		u64 mss : 16;
		u64 eom : 1;
		u64 fn : 7;
	} s;
};

typedef union cvmx_pko_send_tso cvmx_pko_send_tso_t;

/**
 * pko_send_free
 */
union cvmx_pko_send_free {
	u64 u64;
	struct {
		u64 rsvd_48_63 : 16;
		u64 subdc4 : 4; /* 0x9 */
		u64 rsvd : 2;
		u64 addr : 42;
	} s;
};

typedef union cvmx_pko_send_free cvmx_pko_send_free_t;

/* PKO_SEND_HDR_S - PKO header subcommand */
union cvmx_pko_send_hdr {
	u64 u64;
	struct {
		u64 rsvd_60_63 : 4;
		u64 aura : 12;
		u64 ckl4 : 2; /* PKO_CKL4ALG_E */
		u64 ckl3 : 1;
		u64 ds : 1;
		u64 le : 1;
		u64 n2 : 1;
		u64 ii : 1;
		u64 df : 1;
		u64 rsvd_39 : 1;
		u64 format : 7;
		u64 l4ptr : 8;
		u64 l3ptr : 8;
		u64 total : 16;
	} s;
};

typedef union cvmx_pko_send_hdr cvmx_pko_send_hdr_t;

/* PKO_SEND_EXT_S - extended header subcommand */
union cvmx_pko_send_ext {
	u64 u64;
	struct {
		u64 rsvd_48_63 : 16;
		u64 subdc4 : 4; /* _SENDSUBDC_EXT */
		u64 col : 2;	/* _COLORALG_E */
		u64 ra : 2;	/* _REDALG_E */
		u64 tstmp : 1;
		u64 rsvd_24_38 : 15;
		u64 markptr : 8;
		u64 rsvd_9_15 : 7;
		u64 shapechg : 9;
	} s;
};

typedef union cvmx_pko_send_ext cvmx_pko_send_ext_t;

/* PKO_MEMDSZ_E */
enum cvmx_pko_memdsz_e {
	MEMDSZ_B64 = 0,
	MEMDSZ_B32 = 1,
	MEMDSZ_B16 = 2, /* Not in HRM, assumed unsupported */
	MEMDSZ_B8 = 3
};

/* PKO_MEMALG_E */
enum cvmx_pko_memalg_e {
	MEMALG_SET = 0,	      /* Set mem = PKO_SEND_MEM_S[OFFSET] */
	MEMALG_SETTSTMP = 1,  /* Set the memory location to the timestamp
			       *  PKO_SEND_MEM_S[DSZ] must be B64 and a
			       *  PKO_SEND_EXT_S subdescriptor must be in
			       *  the descriptor with PKO_SEND_EXT_S[TSTMP]=1
			       */
	MEMALG_SETRSLT = 2,   /* [DSZ] = B64; mem = PKO_MEM_RESULT_S.  */
	MEMALG_ADD = 8,	      /* mem = mem + PKO_SEND_MEM_S[OFFSET] */
	MEMALG_SUB = 9,	      /* mem = mem – PKO_SEND_MEM_S[OFFSET] */
	MEMALG_ADDLEN = 0xA,  /* mem += [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
	MEMALG_SUBLEN = 0xB,  /* mem -= [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
	MEMALG_ADDMBUF = 0xC, /* mem += [OFFSET] + mbufs_freed */
	MEMALG_SUBMBUF = 0xD  /* mem -= [OFFSET] + mbufs_freed */
};

union cvmx_pko_send_mem {
	u64 u64;
	struct {
		u64 rsvd_63 : 1;
		u64 wmem : 1;
		u64 dsz : 2;
		u64 alg : 4;
		u64 offset : 8;
		u64 subdc4 : 4;
		u64 rsvd_42_43 : 2;
		u64 addr : 42;
	} s;
};

typedef union cvmx_pko_send_mem cvmx_pko_send_mem_t;

union cvmx_pko_send_work {
	u64 u64;
	struct {
		u64 rsvd_62_63 : 2;
		u64 grp : 10;
		u64 tt : 2;
		u64 rsvd_48_49 : 2;
		u64 subdc4 : 4;
		u64 rsvd_42_43 : 2;
		u64 addr : 42;
	} s;
};

typedef union cvmx_pko_send_work cvmx_pko_send_work_t;

/*** PKO_SEND_DMA_S - format of IOBDMA/LMTDMA data word ***/
union cvmx_pko_lmtdma_data {
	u64 u64;
	struct {
		u64 scraddr : 8;
		u64 rtnlen : 8;
		u64 did : 8; /* 0x51 */
		u64 node : 4;
		u64 rsvd_34_35 : 2;
		u64 dqop : 2; /* PKO_DQOP_E */
		u64 rsvd_26_31 : 6;
		u64 dq : 10;
		u64 rsvd_0_15 : 16;
	} s;
};

typedef union cvmx_pko_lmtdma_data cvmx_pko_lmtdma_data_t;

typedef struct cvmx_pko3_dq_params_s {
	s32 depth;
	s32 limit;
	u64 pad[15];
} cvmx_pko3_dq_params_t;

/* DQ depth cached value */
extern cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];

int cvmx_pko3_internal_buffer_count(unsigned int node);

/**
 * @INTERNAL
 * PKO3 DQ parameter location
 * @param node      node
 * @param dq        dq
 */
static inline cvmx_pko3_dq_params_t *cvmx_pko3_dq_parameters(unsigned int node,
							     unsigned int dq)
{
	cvmx_pko3_dq_params_t *pparam = NULL;
	static cvmx_pko3_dq_params_t dummy;

	dummy.depth = 0;
	dummy.limit = (1 << 16);

	if (cvmx_likely(node < CVMX_MAX_NODES))
		pparam = __cvmx_pko3_dq_params[node];

	if (cvmx_likely(pparam))
		pparam += dq;
	else
		pparam = &dummy;

	return pparam;
}

static inline void cvmx_pko3_dq_set_limit(unsigned int node, unsigned int dq,
					  unsigned int limit)
{
	cvmx_pko3_dq_params_t *pparam;

	pparam = cvmx_pko3_dq_parameters(node, dq);
	pparam->limit = limit;
}

/**
 * PKO descriptor queue operation error string
 *
 * @param dqstatus is the enumeration returned from hardware,
 *	  PKO_QUERY_RTN_S[DQSTATUS].
 *
 * @return static constant string error description
 */
const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus);

/*
 * This function gets PKO mac num for a interface/port.
 *
 * @param interface is the interface number.
 * @param index is the port number.
 * @return returns mac number if successful or -1 on failure.
 */
static inline int __cvmx_pko3_get_mac_num(int xiface, int index)
{
	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
	cvmx_helper_interface_mode_t mode;
	int interface_index;
	int ilk_mac_base = -1, bgx_mac_base = -1, bgx_ports = 4;

	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
		bgx_mac_base = 2;

	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
		bgx_mac_base = 2;

	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
		ilk_mac_base = 2;
		bgx_mac_base = 4;
	}

	mode = cvmx_helper_interface_get_mode(xiface);
	switch (mode) {
	case CVMX_HELPER_INTERFACE_MODE_LOOP:
		return 0;
	case CVMX_HELPER_INTERFACE_MODE_NPI:
		return 1;
	case CVMX_HELPER_INTERFACE_MODE_ILK:
		if (ilk_mac_base < 0)
			return -1;
		interface_index = (xi.interface - CVMX_ILK_GBL_BASE());
		if (interface_index < 0)
			return -1;
		return (ilk_mac_base + interface_index);
	case CVMX_HELPER_INTERFACE_MODE_SRIO:
		return (4 + 2 * xi.interface + index);
	default:
		if (xi.interface >= CVMX_ILK_GBL_BASE() && ilk_mac_base >= 0)
			return -1;
		/* All other modes belong to BGX */
		return (bgx_mac_base + bgx_ports * xi.interface + index);
	}
}

/**
 * @INTERNAL
 *
 * Get scratch offset for LMTDMA/LMTST data buffer
 *
 */
static inline unsigned int cvmx_pko3_lmtdma_scr_base(void)
{
	return CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
}

/**
 * @INTERNAL
 *
 * Get address for LMTDMA/LMTST data buffer
 *
 */
static inline u64 *cvmx_pko3_cvmseg_addr(void)
{
	const unsigned int scr = cvmx_pko3_lmtdma_scr_base();

	return (u64 *)(CVMX_SCRATCH_BASE + scr);
}

/**
 * Save scratchpad area
 * @param buf storage buffer for saving previous scratchpad contents.
 *
 * This function should be used whenever the cache line is used
 * from a context that might preempt another context that too uses
 * the same cache line designated for LMTST/LMTDMA and Wide-Atomic
 * operations, such as the hard interrupt context in Linux kernel,
 * that could preempt a user-space application on the same processor
 * core also using the same scratchpad.
 * 'cvmx_lmtline_save()' should be called upon entry into the
 * potentially interrupting context, and 'cvmx_lmtline_restore()' should
 * be called prior to exitting that context.
 */
static inline void cvmx_lmtline_save(u64 buf[16])
{
	unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
	unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);

	/* wait LMTDMA to finish (if any) */
	CVMX_SYNCIOBDMA;

	/* Copy LMTLINE to user-provided buffer */
	for (i = 0; i < sz; i++)
		buf[i] = cvmx_scratch_read64(scr_off + i * sizeof(u64));
}

/**
 * Restore scratchpad area
 * @param buf storage buffer containing the previous content of scratchpad.
 */
static inline void cvmx_lmtline_restore(const u64 buf[16])
{
	unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
	unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);

	/* wait LMTDMA to finsh (if any) */
	CVMX_SYNCIOBDMA;

	/* restore scratchpad area from buf[] */
	for (i = 0; i < sz; i++)
		cvmx_scratch_write64(scr_off + i * sizeof(u64), buf[i]);
}

/*
 * @INTERNAL
 * Deliver PKO SEND commands via CVMSEG LM and LMTDMA/LMTST.
 * The command should be already stored in the CVMSEG address.
 *
 * @param node is the destination node
 * @param dq is the destination descriptor queue.
 * @param numwords is the number of outgoing words
 * @param tag_wait Wait to finish tag switch just before issueing LMTDMA
 * @return the PKO3 native query result structure.
 *
 * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
 *
 * NOTE: Internal use only.
 */
static inline cvmx_pko_query_rtn_t
__cvmx_pko3_lmtdma(u8 node, uint16_t dq, unsigned int numwords, bool tag_wait)
{
	const enum cvmx_pko_dqop dqop = CVMX_PKO_DQ_SEND;
	cvmx_pko_query_rtn_t pko_status;
	cvmx_pko_lmtdma_data_t pko_send_dma_data;
	u64 dma_addr;
	unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
	unsigned int scr_off;
	cvmx_pko3_dq_params_t *pparam;

	if (cvmx_unlikely(numwords < 1 || numwords > 15)) {
		debug("%s: ERROR: Internal error\n", __func__);
		pko_status.u64 = ~0ull;
		return pko_status;
	}

	pparam = cvmx_pko3_dq_parameters(node, dq);

	pko_status.u64 = 0;
	pko_send_dma_data.u64 = 0;

	/* LMTDMA address offset is (nWords-1) */
	dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
	dma_addr += (numwords - 1) << 3;

	scr_off = scr_base + numwords * sizeof(u64);

	/* Write all-ones into the return area */
	cvmx_scratch_write64(scr_off, ~0ull);

	/* Barrier: make sure all prior writes complete before the following */
	CVMX_SYNCWS;

	/* If cached depth exceeds limit, check the real depth */
	if (cvmx_unlikely(pparam->depth > pparam->limit)) {
		cvmx_pko_dqx_wm_cnt_t wm_cnt;

		wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
		pko_status.s.depth = wm_cnt.s.count;
		pparam->depth = pko_status.s.depth;

		if (pparam->depth > pparam->limit) {
			pko_status.s.dqop = dqop;
			pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
			return pko_status;
		}
	} else {
		cvmx_atomic_add32_nosync(&pparam->depth, 1);
	}

	if (CVMX_ENABLE_PARAMETER_CHECKING) {
		/* Request one return word */
		pko_send_dma_data.s.rtnlen = 1;
	} else {
		/* Do not expect a return word */
		pko_send_dma_data.s.rtnlen = 0;
	}

	/* build store data for DMA */
	pko_send_dma_data.s.scraddr = scr_off >> 3;
	pko_send_dma_data.s.did = 0x51;
	pko_send_dma_data.s.node = node;
	pko_send_dma_data.s.dqop = dqop;
	pko_send_dma_data.s.dq = dq;

	/* Wait to finish tag switch just before issueing LMTDMA */
	if (tag_wait)
		cvmx_pow_tag_sw_wait();

	/* issue PKO DMA */
	cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);

	if (cvmx_unlikely(pko_send_dma_data.s.rtnlen)) {
		/* Wait for LMTDMA completion */
		CVMX_SYNCIOBDMA;

		/* Retrieve real result */
		pko_status.u64 = cvmx_scratch_read64(scr_off);
		pparam->depth = pko_status.s.depth;
	} else {
		/* Fake positive result */
		pko_status.s.dqop = dqop;
		pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
	}

	return pko_status;
}

/*
 * @INTERNAL
 * Sends PKO descriptor commands via CVMSEG LM and LMTDMA.
 * @param node is the destination node
 * @param dq is the destination descriptor queue.
 * @param cmds[] is an array of 64-bit PKO3 headers/subheaders
 * @param numwords is the number of outgoing words
 * @param dqop is the operation code
 * @return the PKO3 native query result structure.
 *
 * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
 * otherwise it must be 0.
 *
 * NOTE: Internal use only.
 */
static inline cvmx_pko_query_rtn_t __cvmx_pko3_do_dma(u8 node, uint16_t dq,
						      u64 cmds[],
						      unsigned int numwords,
						      enum cvmx_pko_dqop dqop)
{
	const unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
	cvmx_pko_query_rtn_t pko_status;
	cvmx_pko_lmtdma_data_t pko_send_dma_data;
	u64 dma_addr;
	unsigned int i, scr_off;
	cvmx_pko3_dq_params_t *pparam;

	pparam = cvmx_pko3_dq_parameters(node, dq);
	CVMX_PREFETCH0(pparam);
	/* Push WB */
	CVMX_SYNCWS;

	pko_status.u64 = 0;
	pko_send_dma_data.u64 = 0;

	if (cvmx_unlikely(numwords > 15)) {
		debug("%s: ERROR: Internal error\n", __func__);
		pko_status.u64 = ~0ull;
		return pko_status;
	}

	/* Store the command words into CVMSEG LM */
	for (i = 0, scr_off = scr_base; i < numwords; i++) {
		cvmx_scratch_write64(scr_off, cmds[i]);
		scr_off += sizeof(cmds[0]);
	}

	/* With 0 data to send, this is an IOBDMA, else LMTDMA operation */
	if (numwords == 0) {
		dma_addr = CVMX_IOBDMA_ORDERED_IO_ADDR;
	} else {
		/* LMTDMA address offset is (nWords-1) */
		dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
		dma_addr += (numwords - 1) << 3;
	}

	if (cvmx_likely(dqop == CVMX_PKO_DQ_SEND)) {
		if (cvmx_unlikely(pparam->depth > pparam->limit)) {
			cvmx_pko_dqx_wm_cnt_t wm_cnt;

			wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
			pko_status.s.depth = wm_cnt.s.count;
			pparam->depth = pko_status.s.depth;
		}

		if (cvmx_unlikely(pparam->depth > pparam->limit)) {
			pko_status.s.dqop = dqop;
			pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
			return pko_status;
		}

		cvmx_atomic_add32_nosync(&pparam->depth, 1);
	}

	if (cvmx_unlikely(dqop != CVMX_PKO_DQ_SEND) ||
	    CVMX_ENABLE_PARAMETER_CHECKING) {
		/* Request one return word */
		pko_send_dma_data.s.rtnlen = 1;
		/* Write all-ones into the return area */
		cvmx_scratch_write64(scr_off, ~0ull);
	} else {
		/* Do not expext a return word */
		pko_send_dma_data.s.rtnlen = 0;
	}

	/* build store data for DMA */
	pko_send_dma_data.s.scraddr = scr_off >> 3;
	pko_send_dma_data.s.did = 0x51;
	pko_send_dma_data.s.node = node;
	pko_send_dma_data.s.dqop = dqop;
	pko_send_dma_data.s.dq = dq;

	/* Barrier: make sure all prior writes complete before the following */
	CVMX_SYNCWS;

	/* Wait to finish tag switch just before issueing LMTDMA */
	cvmx_pow_tag_sw_wait();

	/* issue PKO DMA */
	cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);

	if (pko_send_dma_data.s.rtnlen) {
		/* Wait LMTDMA for completion */
		CVMX_SYNCIOBDMA;

		/* Retrieve real result */
		pko_status.u64 = cvmx_scratch_read64(scr_off);
		pparam->depth = pko_status.s.depth;
	} else {
		/* Fake positive result */
		pko_status.s.dqop = dqop;
		pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
	}

	return pko_status;
}

/*
 * Transmit packets through PKO, simplified API
 *
 * @INTERNAL
 *
 * @param dq is a global destination queue number
 * @param pki_ptr specifies packet first linked pointer as returned from
 * 'cvmx_wqe_get_pki_pkt_ptr()'.
 * @param len is the total number of bytes in the packet.
 * @param gaura is the aura to free packet buffers after trasnmit.
 * @param pCounter is an address of a 64-bit counter to atomically
 * @param ptag is a Flow Tag pointer for packet odering or NULL
 * decrement when packet transmission is complete.
 *
 * @return returns 0 if successful and -1 on failure.
 *
 *
 * NOTE: This is a provisional API, and is subject to change.
 */
static inline int cvmx_pko3_xmit_link_buf(int dq, cvmx_buf_ptr_pki_t pki_ptr,
					  unsigned int len, int gaura,
					  u64 *pcounter, u32 *ptag)
{
	cvmx_pko_query_rtn_t pko_status;
	cvmx_pko_send_hdr_t hdr_s;
	cvmx_pko_buf_ptr_t gtr_s;
	unsigned int node, nwords;
	unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();

	/* Separate global DQ# into node and local DQ */
	node = dq >> 10;
	dq &= (1 << 10) - 1;

	/* Fill in header */
	hdr_s.u64 = 0;
	hdr_s.s.total = len;
	hdr_s.s.df = (gaura < 0);
	hdr_s.s.ii = 1;
	hdr_s.s.aura = (gaura >= 0) ? gaura : 0;

	/* Fill in gather */
	gtr_s.u64 = 0;
	gtr_s.s.subdc3 = CVMX_PKO_SENDSUBDC_LINK;
	gtr_s.s.addr = pki_ptr.addr;
	gtr_s.s.size = pki_ptr.size;

	/* Setup command word pointers */
	cvmx_scratch_write64(scr_base + sizeof(u64) * 0, hdr_s.u64);
	cvmx_scratch_write64(scr_base + sizeof(u64) * 1, gtr_s.u64);
	nwords = 2;

	/* Conditionally setup an atomic decrement counter */
	if (pcounter) {
		cvmx_pko_send_mem_t mem_s;

		mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
		mem_s.s.dsz = MEMDSZ_B64;
		mem_s.s.alg = MEMALG_SUB;
		mem_s.s.offset = 1;
		mem_s.s.wmem = 0;
		mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, pcounter));
		cvmx_scratch_write64(scr_base + sizeof(u64) * nwords++,
				     mem_s.u64);
	}

	/* To preserve packet order, go atomic with DQ-specific tag */
	if (ptag)
		cvmx_pow_tag_sw(*ptag ^ dq, CVMX_POW_TAG_TYPE_ATOMIC);

	/* Do LMTDMA */
	pko_status = __cvmx_pko3_lmtdma(node, dq, nwords, ptag);

	if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
		return 0;
	else
		return -1;
}

/**
 * @INTERNAL
 *
 * Retrieve PKO internal AURA from register.
 */
static inline unsigned int __cvmx_pko3_aura_get(unsigned int node)
{
	static s16 aura = -1;
	cvmx_pko_dpfi_fpa_aura_t pko_aura;

	if (aura >= 0)
		return aura;

	pko_aura.u64 = csr_rd_node(node, CVMX_PKO_DPFI_FPA_AURA);

	aura = (pko_aura.s.node << 10) | pko_aura.s.laura;
	return aura;
}

/** Open configured descriptor queues before queueing packets into them.
 *
 * @param node is to specify the node to which this configuration is applied.
 * @param dq is the descriptor queue number to be opened.
 * @return returns 0 on success or -1 on failure.
 */
int cvmx_pko_dq_open(int node, int dq);

/** Close a descriptor queue
 *
 * @param node is to specify the node to which this configuration is applied.
 * @param dq is the descriptor queue number to be opened.
 * @return returns 0 on success or -1 on failure.
 *
 * This should be called before changing the DQ parent link, topology,
 * or when shutting down the PKO.
 */
int cvmx_pko3_dq_close(int node, int dq);

/** Query a descriptor queue
 *
 * @param node is to specify the node to which this configuration is applied.
 * @param dq is the descriptor queue number to be opened.
 * @return returns the descriptor queue depth on success or -1 on failure.
 *
 * This should be called before changing the DQ parent link, topology,
 * or when shutting down the PKO.
 */
int cvmx_pko3_dq_query(int node, int dq);

/** Drain a descriptor queue
 *
 * Before closing a DQ, this call will drain all pending traffic
 * on the DQ to the NULL MAC, which will circumvent any traffic
 * shaping and flow control to quickly reclaim all packet buffers.
 */
void cvmx_pko3_dq_drain(int node, int dq);

/*
 * PKO global initialization for 78XX.
 *
 * @param node is the node on which PKO block is initialized.
 * @param aura is the 12-bit AURA (including node) for PKO internal use.
 * @return none.
 */
int cvmx_pko3_hw_init_global(int node, uint16_t aura);

/**
 * Shutdown the entire PKO
 */
int cvmx_pko3_hw_disable(int node);

/* Define legacy type here to break circular dependency */
typedef struct cvmx_pko_port_status cvmx_pko_port_status_t;

/**
 * @INTERNAL
 * Backward compatibility for collecting statistics from PKO3
 *
 */
void cvmx_pko3_get_legacy_port_stats(u16 ipd_port, unsigned int clear,
				     cvmx_pko_port_status_t *status);

/** Set MAC options
 *
 * The options supported are the parameters below:
 *
 * @param xiface The physical interface number
 * @param index The physical sub-interface port
 * @param fcs_enable Enable FCS generation
 * @param pad_enable Enable padding to minimum packet size
 * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
 *
 * The typical use for `fcs_sop_off` is when the interface is configured
 * to use a header such as HighGig to precede every Ethernet packet,
 * such a header usually does not partake in the CRC32 computation stream,
 * and its size must be set with this parameter.
 *
 * @return Returns 0 on success, -1 if interface/port is invalid.
 */
int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
				bool pad_enable, unsigned int fcs_sop_off);

/** Set Descriptor Queue options
 *
 * The `min_pad` parameter must be in agreement with the interface-level
 * padding option for all descriptor queues assigned to that particular
 * interface/port.
 */
void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad);

int cvmx_pko3_port_fifo_size(unsigned int xiface, unsigned int index);
int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level);
int cvmx_pko3_port_xoff(unsigned int xiface, unsigned int index);
int cvmx_pko3_port_xon(unsigned int xiface, unsigned int index);

/* Packet descriptor - PKO3 command buffer + internal state */
typedef struct cvmx_pko3_pdesc_s {
	u64 *jump_buf;		/**< jump buffer vaddr */
	s16 last_aura;		/**< AURA of the latest LINK_S/GATHER_S */
	unsigned num_words : 5, /**< valid words in word array 2..16 */
		headroom : 10,	/**< free bytes at start of 1st buf */
		hdr_offsets : 1, pki_word4_present : 1;
	/* PKO3 command buffer: */
	cvmx_pko_send_hdr_t *hdr_s;
	u64 word[16]; /**< header and subcommands buffer */
	/* Bookkeeping fields: */
	u64 send_work_s; /**< SEND_WORK_S must be the very last subdc */
	s16 jb_aura;	 /**< AURA where the jump buffer belongs */
	u16 mem_s_ix;	 /**< index of first MEM_S subcommand */
	u8 ckl4_alg;	 /**< L3/L4 alg to use if recalc is needed */
	/* Fields saved from WQE for later inspection */
	cvmx_pki_wqe_word4_t pki_word4;
	cvmx_pki_wqe_word2_t pki_word2;
} cvmx_pko3_pdesc_t;

void cvmx_pko3_pdesc_init(cvmx_pko3_pdesc_t *pdesc);
int cvmx_pko3_pdesc_from_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
			     bool free_bufs);
int cvmx_pko3_pdesc_transmit(cvmx_pko3_pdesc_t *pdesc, uint16_t dq,
			     u32 *flow_tag);
int cvmx_pko3_pdesc_notify_decrement(cvmx_pko3_pdesc_t *pdesc,
				     volatile u64 *p_counter);
int cvmx_pko3_pdesc_notify_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
			       u8 node, uint8_t group, uint8_t tt, u32 tag);
int cvmx_pko3_pdesc_buf_append(cvmx_pko3_pdesc_t *pdesc, void *p_data,
			       unsigned int data_bytes, unsigned int gaura);
int cvmx_pko3_pdesc_append_free(cvmx_pko3_pdesc_t *pdesc, u64 addr,
				unsigned int gaura);
int cvmx_pko3_pdesc_hdr_push(cvmx_pko3_pdesc_t *pdesc, const void *p_data,
			     u8 data_bytes, uint8_t layer);
int cvmx_pko3_pdesc_hdr_pop(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
			    unsigned int num_bytes);
int cvmx_pko3_pdesc_hdr_peek(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
			     unsigned int num_bytes, unsigned int offset);
void cvmx_pko3_pdesc_set_free(cvmx_pko3_pdesc_t *pdesc, bool free_bufs);

#endif /* __CVMX_PKO3_H__ */