1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <dm/of_access.h>
#include <malloc.h>
#include <memalign.h>
#include <nand.h>
#include <pci.h>
#include <pci_ids.h>
#include <time.h>
#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/libfdt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand_bch.h>
#include <linux/mtd/nand_ecc.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/dma-mapping.h>
#include <asm/arch/clock.h>
#include "octeontx_bch.h"
static LIST_HEAD(octeontx_bch_devices);
static unsigned int num_vfs = BCH_NR_VF;
static void *bch_pf;
static void *bch_vf;
static void *token;
static bool bch_pf_initialized;
static bool bch_vf_initialized;
static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
{
int ret;
ret = pci_sriov_init(dev, nr_virtfn);
if (ret)
printf("%s(%s): pci_sriov_init returned %d\n", __func__,
dev->name, ret);
return ret;
}
void *octeontx_bch_getv(void)
{
if (!bch_vf)
return NULL;
if (bch_vf_initialized && bch_pf_initialized)
return bch_vf;
else
return NULL;
}
void octeontx_bch_putv(void *token)
{
bch_vf_initialized = !!token;
bch_vf = token;
}
void *octeontx_bch_getp(void)
{
return token;
}
void octeontx_bch_putp(void *token)
{
bch_pf = token;
bch_pf_initialized = !!token;
}
static int do_bch_init(struct bch_device *bch)
{
return 0;
}
static void bch_reset(struct bch_device *bch)
{
writeq(1, bch->reg_base + BCH_CTL);
mdelay(2);
}
static void bch_disable(struct bch_device *bch)
{
writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
writeq(~0ull, bch->reg_base + BCH_ERR_INT);
bch_reset(bch);
}
static u32 bch_check_bist_status(struct bch_device *bch)
{
return readq(bch->reg_base + BCH_BIST_RESULT);
}
static int bch_device_init(struct bch_device *bch)
{
u64 bist;
int rc;
debug("%s: Resetting...\n", __func__);
/* Reset the PF when probed first */
bch_reset(bch);
debug("%s: Checking BIST...\n", __func__);
/* Check BIST status */
bist = (u64)bch_check_bist_status(bch);
if (bist) {
dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
/* Get max VQs/VFs supported by the device */
bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
debug("%s: %d vfs\n", __func__, bch->max_vfs);
if (num_vfs > bch->max_vfs) {
dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
num_vfs, bch->max_vfs);
num_vfs = bch->max_vfs;
}
bch->vfs_enabled = bch->max_vfs;
/* Get number of VQs/VFs to be enabled */
/* TODO: Get CLK frequency */
/* Reset device parameters */
debug("%s: Doing initialization\n", __func__);
rc = do_bch_init(bch);
return rc;
}
static int bch_sriov_configure(struct udevice *dev, int numvfs)
{
struct bch_device *bch = dev_get_priv(dev);
int ret = -EBUSY;
debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
if (bch->vfs_in_use)
goto exit;
ret = 0;
if (numvfs > 0) {
debug("%s: Enabling sriov\n", __func__);
ret = pci_enable_sriov(dev, numvfs);
if (ret == 0) {
bch->flags |= BCH_FLAG_SRIOV_ENABLED;
ret = numvfs;
bch->vfs_enabled = numvfs;
}
}
debug("VFs enabled: %d\n", ret);
exit:
debug("%s: Returning %d\n", __func__, ret);
return ret;
}
static int octeontx_pci_bchpf_probe(struct udevice *dev)
{
struct bch_device *bch;
int ret;
debug("%s(%s)\n", __func__, dev->name);
bch = dev_get_priv(dev);
if (!bch)
return -ENOMEM;
bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
PCI_REGION_TYPE, PCI_REGION_MEM);
bch->dev = dev;
debug("%s: base address: %p\n", __func__, bch->reg_base);
ret = bch_device_init(bch);
if (ret) {
printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
return ret;
}
INIT_LIST_HEAD(&bch->list);
list_add(&bch->list, &octeontx_bch_devices);
token = (void *)dev;
debug("%s: Configuring SRIOV\n", __func__);
bch_sriov_configure(dev, num_vfs);
debug("%s: Done.\n", __func__);
octeontx_bch_putp(bch);
return 0;
}
static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
{},
};
static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
{},
};
/**
* Given a data block calculate the ecc data and fill in the response
*
* @param[in] block 8-byte aligned pointer to data block to calculate ECC
* @param block_size Size of block in bytes, must be a multiple of two.
* @param bch_level Number of errors that must be corrected. The number of
* parity bytes is equal to ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] ecc 8-byte aligned pointer to where ecc data should go
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_gen;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = ecc;
cmd.s.iword.ptr = block;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
/**
* Given a data block and ecc data correct the data block
*
* @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
* data concatenated to the end to correct
* @param block_size Size of block in bytes, must be a multiple of
* two.
* @param bch_level Number of errors that must be corrected. The
* number of parity bytes is equal to
* ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] block_out 8-byte aligned pointer to corrected data buffer.
* This should not be the same as block_ecc_in.
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
u16 block_size, u8 bch_level,
dma_addr_t block_out, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_correct;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = block_out;
cmd.s.iword.ptr = block_ecc_in;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
EXPORT_SYMBOL(octeontx_bch_decode);
int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
dma_addr_t handle)
{
ulong start = get_timer(0);
__iormb(); /* HW is updating *resp */
while (!resp->s.done && get_timer(start) < 10)
__iormb(); /* HW is updating *resp */
if (resp->s.done)
return 0;
return -ETIMEDOUT;
}
struct bch_q octeontx_bch_q[QID_MAX];
static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
/* some params are for later merge with CPT or cn83xx */
struct bch_q *q = &octeontx_bch_q[queue_id];
unsigned long paddr;
u64 *chunk_buffer;
int chunk = max_depth + 1;
int i, size;
if ((unsigned int)queue_id >= QID_MAX)
return -EINVAL;
if (max_depth & chunk) /* must be 2^N - 1 */
return -EINVAL;
size = NQS * chunk * sizeof(u64);
chunk_buffer = dma_alloc_coherent(size, &paddr);
if (!chunk_buffer)
return -ENOMEM;
q->base_paddr = paddr;
q->dev = dev;
q->index = 0;
q->max_depth = max_depth;
q->pool_size_m1 = pool_size;
q->base_vaddr = chunk_buffer;
for (i = 0; i < NQS; i++) {
u64 *ixp;
int inext = (i + 1) * chunk - 1;
int j = (i + 1) % NQS;
int jnext = j * chunk;
dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
ixp = &chunk_buffer[inext];
*ixp = jbase;
}
return 0;
}
static int octeontx_pci_bchvf_probe(struct udevice *dev)
{
struct bch_vf *vf;
union bch_vqx_ctl ctl;
union bch_vqx_cmd_buf cbuf;
int err;
debug("%s(%s)\n", __func__, dev->name);
vf = dev_get_priv(dev);
if (!vf)
return -ENOMEM;
vf->dev = dev;
/* Map PF's configuration registers */
vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
PCI_REGION_TYPE, PCI_REGION_MEM);
debug("%s: reg base: %p\n", __func__, vf->reg_base);
err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
sizeof(union bch_cmd) * QDEPTH);
if (err) {
dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
goto release;
}
ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
cbuf.u = 0;
cbuf.s.ldwb = 1;
cbuf.s.dfb = 1;
cbuf.s.size = QDEPTH;
writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
writeq(octeontx_bch_q[QID_BCH].base_paddr,
vf->reg_base + BCH_VQX_CMD_PTR(0));
octeontx_bch_putv(vf);
debug("%s: bch vf initialization complete\n", __func__);
if (octeontx_bch_getv())
return octeontx_pci_nand_deferred_probe();
return -1;
release:
return err;
}
static int octeontx_pci_bchpf_remove(struct udevice *dev)
{
struct bch_device *bch = dev_get_priv(dev);
bch_disable(bch);
return 0;
}
U_BOOT_DRIVER(octeontx_pci_bchpf) = {
.name = BCHPF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchpf_probe,
.remove = octeontx_pci_bchpf_remove,
.priv_auto = sizeof(struct bch_device),
.flags = DM_FLAG_OS_PREPARE,
};
U_BOOT_DRIVER(octeontx_pci_bchvf) = {
.name = BCHVF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchvf_probe,
.priv_auto = sizeof(struct bch_vf),
};
U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);
|