-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlibxdma.h
614 lines (510 loc) · 17.8 KB
/
libxdma.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
/*******************************************************************************
*
* Xilinx XDMA IP Core Linux Driver
* Copyright(c) 2015 - 2017 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "LICENSE".
*
* Karen Xie <[email protected]>
*
******************************************************************************/
#ifndef XDMA_LIB_H
#define XDMA_LIB_H
#include <linux/version.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
/* Switch debug printing on/off */
#define XDMA_DEBUG 0
/* SECTION: Preprocessor macros/constants */
#define XDMA_BAR_NUM (6)
/* maximum amount of register space to map */
#define XDMA_BAR_SIZE (0x8000UL)
/* Use this definition to poll several times between calls to schedule */
#define NUM_POLLS_PER_SCHED 100
#define XDMA_CHANNEL_NUM_MAX (4)
/*
* interrupts per engine, rad2_vul.sv:237
* .REG_IRQ_OUT (reg_irq_from_ch[(channel*2) +: 2]),
*/
#define XDMA_ENG_IRQ_NUM (1)
#define MAX_EXTRA_ADJ (15)
#define RX_STATUS_EOP (1)
/* Target internal components on XDMA control BAR */
#define XDMA_OFS_INT_CTRL (0x2000UL)
#define XDMA_OFS_CONFIG (0x3000UL)
/* maximum number of desc per transfer request */
#define XDMA_TRANSFER_MAX_DESC (2048)
/* maximum size of a single DMA transfer descriptor */
#define XDMA_DESC_BLEN_BITS 28
#define XDMA_DESC_BLEN_MAX ((1 << (XDMA_DESC_BLEN_BITS)) - 1)
/* bits of the SG DMA control register */
#define XDMA_CTRL_RUN_STOP (1UL << 0)
#define XDMA_CTRL_IE_DESC_STOPPED (1UL << 1)
#define XDMA_CTRL_IE_DESC_COMPLETED (1UL << 2)
#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH (1UL << 3)
#define XDMA_CTRL_IE_MAGIC_STOPPED (1UL << 4)
#define XDMA_CTRL_IE_IDLE_STOPPED (1UL << 6)
#define XDMA_CTRL_IE_READ_ERROR (0x1FUL << 9)
#define XDMA_CTRL_IE_DESC_ERROR (0x1FUL << 19)
#define XDMA_CTRL_NON_INCR_ADDR (1UL << 25)
#define XDMA_CTRL_POLL_MODE_WB (1UL << 26)
/* bits of the SG DMA status register */
#define XDMA_STAT_BUSY (1UL << 0)
#define XDMA_STAT_DESC_STOPPED (1UL << 1)
#define XDMA_STAT_DESC_COMPLETED (1UL << 2)
#define XDMA_STAT_ALIGN_MISMATCH (1UL << 3)
#define XDMA_STAT_MAGIC_STOPPED (1UL << 4)
#define XDMA_STAT_INVALID_LEN (1UL << 5)
#define XDMA_STAT_IDLE_STOPPED (1UL << 6)
#define XDMA_STAT_COMMON_ERR_MASK \
(XDMA_STAT_ALIGN_MISMATCH | XDMA_STAT_MAGIC_STOPPED | \
XDMA_STAT_INVALID_LEN)
/* desc_error, C2H & H2C */
#define XDMA_STAT_DESC_UNSUPP_REQ (1UL << 19)
#define XDMA_STAT_DESC_COMPL_ABORT (1UL << 20)
#define XDMA_STAT_DESC_PARITY_ERR (1UL << 21)
#define XDMA_STAT_DESC_HEADER_EP (1UL << 22)
#define XDMA_STAT_DESC_UNEXP_COMPL (1UL << 23)
#define XDMA_STAT_DESC_ERR_MASK \
(XDMA_STAT_DESC_UNSUPP_REQ | XDMA_STAT_DESC_COMPL_ABORT | \
XDMA_STAT_DESC_PARITY_ERR | XDMA_STAT_DESC_HEADER_EP | \
XDMA_STAT_DESC_UNEXP_COMPL)
/* read error: H2C */
#define XDMA_STAT_H2C_R_UNSUPP_REQ (1UL << 9)
#define XDMA_STAT_H2C_R_COMPL_ABORT (1UL << 10)
#define XDMA_STAT_H2C_R_PARITY_ERR (1UL << 11)
#define XDMA_STAT_H2C_R_HEADER_EP (1UL << 12)
#define XDMA_STAT_H2C_R_UNEXP_COMPL (1UL << 13)
#define XDMA_STAT_H2C_R_ERR_MASK \
(XDMA_STAT_H2C_R_UNSUPP_REQ | XDMA_STAT_H2C_R_COMPL_ABORT | \
XDMA_STAT_H2C_R_PARITY_ERR | XDMA_STAT_H2C_R_HEADER_EP | \
XDMA_STAT_H2C_R_UNEXP_COMPL)
/* write error, H2C only */
#define XDMA_STAT_H2C_W_DECODE_ERR (1UL << 14)
#define XDMA_STAT_H2C_W_SLAVE_ERR (1UL << 15)
#define XDMA_STAT_H2C_W_ERR_MASK \
(XDMA_STAT_H2C_W_DECODE_ERR | XDMA_STAT_H2C_W_SLAVE_ERR)
/* read error: C2H */
#define XDMA_STAT_C2H_R_DECODE_ERR (1UL << 9)
#define XDMA_STAT_C2H_R_SLAVE_ERR (1UL << 10)
#define XDMA_STAT_C2H_R_ERR_MASK \
(XDMA_STAT_C2H_R_DECODE_ERR | XDMA_STAT_C2H_R_SLAVE_ERR)
/* all combined */
#define XDMA_STAT_H2C_ERR_MASK \
(XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \
XDMA_STAT_H2C_R_ERR_MASK | XDMA_STAT_H2C_W_ERR_MASK)
#define XDMA_STAT_C2H_ERR_MASK \
(XDMA_STAT_COMMON_ERR_MASK | XDMA_STAT_DESC_ERR_MASK | \
XDMA_STAT_C2H_R_ERR_MASK)
/* bits of the SGDMA descriptor control field */
#define XDMA_DESC_STOPPED (1UL << 0)
#define XDMA_DESC_COMPLETED (1UL << 1)
#define XDMA_DESC_EOP (1UL << 4)
#define XDMA_PERF_RUN (1UL << 0)
#define XDMA_PERF_CLEAR (1UL << 1)
#define XDMA_PERF_AUTO (1UL << 2)
#define MAGIC_ENGINE 0xEEEEEEEEUL
#define MAGIC_DEVICE 0xDDDDDDDDUL
/* upper 16-bits of engine identifier register */
#define XDMA_ID_H2C 0x1fc0U
#define XDMA_ID_C2H 0x1fc1U
/* for C2H AXI-ST mode */
#define CYCLIC_RX_PAGES_MAX 256
#define LS_BYTE_MASK 0x000000FFUL
#define BLOCK_ID_MASK 0xFFF00000
#define BLOCK_ID_HEAD 0x1FC00000
#define IRQ_BLOCK_ID 0x1fc20000UL
#define CONFIG_BLOCK_ID 0x1fc30000UL
#define WB_COUNT_MASK 0x00ffffffUL
#define WB_ERR_MASK (1UL << 31)
#define POLL_TIMEOUT_SECONDS 10
#define MAX_USER_IRQ 16
#define MAX_DESC_BUS_ADDR (0xffffffffULL)
#define DESC_MAGIC 0xAD4B0000UL
#define C2H_WB 0x52B4UL
#define MAX_NUM_ENGINES (XDMA_CHANNEL_NUM_MAX * 2)
#define H2C_CHANNEL_OFFSET 0x1000
#define SGDMA_OFFSET_FROM_CHANNEL 0x4000
#define CHANNEL_SPACING 0x100
#define TARGET_SPACING 0x1000
#define BYPASS_MODE_SPACING 0x0100
/* obtain the 32 most significant (high) bits of a 32-bit or 64-bit address */
#define PCI_DMA_H(addr) ((addr >> 16) >> 16)
/* obtain the 32 least significant (low) bits of a 32-bit or 64-bit address */
#define PCI_DMA_L(addr) (addr & 0xffffffffUL)
#ifndef VM_RESERVED
#define VMEM_FLAGS (VM_IO | VM_DONTEXPAND | VM_DONTDUMP)
#else
#define VMEM_FLAGS (VM_IO | VM_RESERVED)
#endif
#ifdef __LIBXDMA_DEBUG__
#define dbg_io pr_err
#define dbg_fops pr_err
#define dbg_perf pr_err
#define dbg_sg pr_err
#define dbg_tfr pr_err
#define dbg_irq pr_err
#define dbg_init pr_err
#define dbg_desc pr_err
#else
/* disable debugging */
#define dbg_io(...)
#define dbg_fops(...)
#define dbg_perf(...)
#define dbg_sg(...)
#define dbg_tfr(...)
#define dbg_irq(...)
#define dbg_init(...)
#define dbg_desc(...)
#endif
/* SECTION: Enum definitions */
enum transfer_state {
TRANSFER_STATE_NEW = 0,
TRANSFER_STATE_SUBMITTED,
TRANSFER_STATE_COMPLETED,
TRANSFER_STATE_FAILED,
TRANSFER_STATE_ABORTED
};
enum shutdown_state {
ENGINE_SHUTDOWN_NONE = 0, /* No shutdown in progress */
ENGINE_SHUTDOWN_REQUEST = 1, /* engine requested to shutdown */
ENGINE_SHUTDOWN_IDLE = 2 /* engine has shutdown and is idle */
};
enum dev_capabilities {
CAP_64BIT_DMA = 2,
CAP_64BIT_DESC = 4,
CAP_ENGINE_WRITE = 8,
CAP_ENGINE_READ = 16
};
/* SECTION: Structure definitions */
struct config_regs {
u32 identifier;
u32 reserved_1[4];
u32 msi_enable;
};
/**
* SG DMA Controller status and control registers
*
* These registers make the control interface for DMA transfers.
*
* It sits in End Point (FPGA) memory BAR[0] for 32-bit or BAR[0:1] for 64-bit.
* It references the first descriptor which exists in Root Complex (PC) memory.
*
* @note The registers must be accessed using 32-bit (PCI DWORD) read/writes,
* and their values are in little-endian byte ordering.
*/
struct engine_regs {
u32 identifier;
u32 control;
u32 control_w1s;
u32 control_w1c;
u32 reserved_1[12]; /* padding */
u32 status;
u32 status_rc;
u32 completed_desc_count;
u32 alignments;
u32 reserved_2[14]; /* padding */
u32 poll_mode_wb_lo;
u32 poll_mode_wb_hi;
u32 interrupt_enable_mask;
u32 interrupt_enable_mask_w1s;
u32 interrupt_enable_mask_w1c;
u32 reserved_3[9]; /* padding */
u32 perf_ctrl;
u32 perf_cyc_lo;
u32 perf_cyc_hi;
u32 perf_dat_lo;
u32 perf_dat_hi;
u32 perf_pnd_lo;
u32 perf_pnd_hi;
} __packed;
struct engine_sgdma_regs {
u32 identifier;
u32 reserved_1[31]; /* padding */
/* bus address to first descriptor in Root Complex Memory */
u32 first_desc_lo;
u32 first_desc_hi;
/* number of adjacent descriptors at first_desc */
u32 first_desc_adjacent;
u32 credits;
} __packed;
struct msix_vec_table_entry {
u32 msi_vec_addr_lo;
u32 msi_vec_addr_hi;
u32 msi_vec_data_lo;
u32 msi_vec_data_hi;
} __packed;
struct msix_vec_table {
struct msix_vec_table_entry entry_list[32];
} __packed;
struct interrupt_regs {
u32 identifier;
u32 user_int_enable;
u32 user_int_enable_w1s;
u32 user_int_enable_w1c;
u32 channel_int_enable;
u32 channel_int_enable_w1s;
u32 channel_int_enable_w1c;
u32 reserved_1[9]; /* padding */
u32 user_int_request;
u32 channel_int_request;
u32 user_int_pending;
u32 channel_int_pending;
u32 reserved_2[12]; /* padding */
u32 user_msi_vector[8];
u32 channel_msi_vector[8];
} __packed;
struct sgdma_common_regs {
u32 padding[8];
u32 credit_mode_enable;
u32 credit_mode_enable_w1s;
u32 credit_mode_enable_w1c;
} __packed;
/* Structure for polled mode descriptor writeback */
struct xdma_poll_wb {
u32 completed_desc_count;
u32 reserved_1[7];
} __packed;
/**
* Descriptor for a single contiguous memory block transfer.
*
* Multiple descriptors are linked by means of the next pointer. An additional
* extra adjacent number gives the amount of extra contiguous descriptors.
*
* The descriptors are in root complex memory, and the bytes in the 32-bit
* words must be in little-endian byte ordering.
*/
struct xdma_desc {
u32 control;
u32 bytes; /* transfer length in bytes */
u32 src_addr_lo; /* source address (low 32-bit) */
u32 src_addr_hi; /* source address (high 32-bit) */
u32 dst_addr_lo; /* destination address (low 32-bit) */
u32 dst_addr_hi; /* destination address (high 32-bit) */
/*
* next descriptor in the single-linked list of descriptors;
* this is the PCIe (bus) address of the next descriptor in the
* root complex memory
*/
u32 next_lo; /* next desc address (low 32-bit) */
u32 next_hi; /* next desc address (high 32-bit) */
} __packed;
/* 32 bytes (four 32-bit words) or 64 bytes (eight 32-bit words) */
struct xdma_result {
u32 status;
u32 length;
u32 reserved_1[6]; /* padding */
} __packed;
struct sw_desc {
dma_addr_t addr;
unsigned int len;
};
/* Describes a (SG DMA) single transfer for the engine */
struct xdma_transfer {
struct list_head entry; /* queue of non-completed transfers */
struct xdma_desc *desc_virt; /* virt addr of the 1st descriptor */
dma_addr_t desc_bus; /* bus addr of the first descriptor */
int desc_adjacent; /* adjacent descriptors at desc_bus */
int desc_num; /* number of descriptors in transfer */
enum dma_data_direction dir;
wait_queue_head_t wq; /* wait queue for transfer completion */
enum transfer_state state; /* state of the transfer */
unsigned int flags;
#define XFER_FLAG_NEED_UNMAP 0x1
int cyclic; /* flag if transfer is cyclic */
int last_in_request; /* flag if last within request */
unsigned int len;
struct sg_table *sgt;
};
struct xdma_request_cb {
struct sg_table *sgt;
unsigned int total_len;
u64 ep_addr;
struct xdma_transfer xfer;
unsigned int sw_desc_idx;
unsigned int sw_desc_cnt;
struct sw_desc sdesc[0];
};
struct xdma_engine {
unsigned long magic; /* structure ID for sanity checks */
struct xdma_dev *xdev; /* parent device */
char name[5]; /* name of this engine */
int version; /* version of this engine */
//dev_t cdevno; /* character device major:minor */
//struct cdev cdev; /* character device (embedded struct) */
/* HW register address offsets */
struct engine_regs *regs; /* Control reg BAR offset */
struct engine_sgdma_regs *sgdma_regs; /* SGDAM reg BAR offset */
u32 bypass_offset; /* Bypass mode BAR offset */
/* Engine state, configuration and flags */
enum shutdown_state shutdown; /* engine shutdown mode */
enum dma_data_direction dir;
int device_open; /* flag if engine node open, ST mode only */
int running; /* flag if the driver started engine */
int non_incr_addr; /* flag if non-incremental addressing used */
int streaming;
int addr_align; /* source/dest alignment in bytes */
int len_granularity; /* transfer length multiple */
int addr_bits; /* HW datapath address width */
int channel; /* engine indices */
int max_extra_adj; /* descriptor prefetch capability */
int desc_dequeued; /* num descriptors of completed transfers */
u32 status; /* last known status of device */
u32 interrupt_enable_mask_value;/* only used for MSIX mode to store per-engine interrupt mask value */
/* Transfer list management */
struct list_head transfer_list; /* queue of transfers */
/* Members applicable to AXI-ST C2H (cyclic) transfers */
struct xdma_result *cyclic_result;
dma_addr_t cyclic_result_bus; /* bus addr for transfer */
struct xdma_request_cb *cyclic_req;
struct sg_table cyclic_sgt;
u8 eop_found; /* used only for cyclic(rx:c2h) */
int rx_tail; /* follows the HW */
int rx_head; /* where the SW reads from */
int rx_overrun; /* flag if overrun occured */
/* for copy from cyclic buffer to user buffer */
unsigned int user_buffer_index;
/* Members associated with polled mode support */
u8 *poll_mode_addr_virt; /* virt addr for descriptor writeback */
dma_addr_t poll_mode_bus; /* bus addr for descriptor writeback */
/* Members associated with interrupt mode support */
wait_queue_head_t shutdown_wq; /* wait queue for shutdown sync */
spinlock_t lock; /* protects concurrent access */
int prev_cpu; /* remember CPU# of (last) locker */
int msix_irq_line; /* MSI-X vector for this engine */
u32 irq_bitmask; /* IRQ bit mask for this engine */
struct work_struct work; /* Work queue for interrupt handling */
spinlock_t desc_lock; /* protects concurrent access */
dma_addr_t desc_bus;
struct xdma_desc *desc;
/* for performance test support */
struct xdma_performance_ioctl *xdma_perf; /* perf test control */
wait_queue_head_t xdma_perf_wq; /* Perf test sync */
};
struct xdma_user_irq {
struct xdma_dev *xdev; /* parent device */
u8 user_idx; /* 0 ~ 15 */
u8 events_irq; /* accumulated IRQs */
spinlock_t events_lock; /* lock to safely update events_irq */
wait_queue_head_t events_wq; /* wait queue to sync waiting threads */
irq_handler_t handler;
void *dev;
};
/* XDMA PCIe device specific book-keeping */
#define XDEV_FLAG_OFFLINE 0x1
struct xdma_dev {
struct list_head list_head;
struct list_head rcu_node;
unsigned long magic; /* structure ID for sanity checks */
struct pci_dev *pdev; /* pci device struct from probe() */
int idx; /* dev index */
const char *mod_name; /* name of module owning the dev */
spinlock_t lock; /* protects concurrent access */
unsigned int flags;
/* PCIe BAR management */
void *__iomem bar[XDMA_BAR_NUM]; /* addresses for mapped BARs */
int user_bar_idx; /* BAR index of user logic */
int config_bar_idx; /* BAR index of XDMA config logic */
int bypass_bar_idx; /* BAR index of XDMA bypass logic */
int regions_in_use; /* flag if dev was in use during probe() */
int got_regions; /* flag if probe() obtained the regions */
int user_max;
int c2h_channel_max;
int h2c_channel_max;
/* Interrupt management */
int irq_count; /* interrupt counter */
int irq_line; /* flag if irq allocated successfully */
int msi_enabled; /* flag if msi was enabled for the device */
int msix_enabled; /* flag if msi-x was enabled for the device */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
struct msix_entry entry[32]; /* msi-x vector/entry table */
#endif
struct xdma_user_irq user_irq[16]; /* user IRQ management */
unsigned int mask_irq_user;
/* XDMA engine management */
int engines_num; /* Total engine count */
u32 mask_irq_h2c;
u32 mask_irq_c2h;
struct xdma_engine engine_h2c[XDMA_CHANNEL_NUM_MAX];
struct xdma_engine engine_c2h[XDMA_CHANNEL_NUM_MAX];
/* SD_Accel specific */
enum dev_capabilities capabilities;
u64 feature_id;
};
static inline int xdma_device_flag_check(struct xdma_dev *xdev, unsigned int f)
{
unsigned long flags;
spin_lock_irqsave(&xdev->lock, flags);
if (xdev->flags & f) {
spin_unlock_irqrestore(&xdev->lock, flags);
return 1;
}
spin_unlock_irqrestore(&xdev->lock, flags);
return 0;
}
static inline int xdma_device_flag_test_n_set(struct xdma_dev *xdev,
unsigned int f)
{
unsigned long flags;
int rv = 0;
spin_lock_irqsave(&xdev->lock, flags);
if (xdev->flags & f) {
spin_unlock_irqrestore(&xdev->lock, flags);
rv = 1;
} else
xdev->flags |= f;
spin_unlock_irqrestore(&xdev->lock, flags);
return rv;
}
static inline void xdma_device_flag_set(struct xdma_dev *xdev, unsigned int f)
{
unsigned long flags;
spin_lock_irqsave(&xdev->lock, flags);
xdev->flags |= f;
spin_unlock_irqrestore(&xdev->lock, flags);
}
static inline void xdma_device_flag_clear(struct xdma_dev *xdev, unsigned int f)
{
unsigned long flags;
spin_lock_irqsave(&xdev->lock, flags);
xdev->flags &= ~f;
spin_unlock_irqrestore(&xdev->lock, flags);
}
void write_register(u32 value, void *iomem);
u32 read_register(void *iomem);
struct xdma_dev *xdev_find_by_pdev(struct pci_dev *pdev);
void xdma_device_offline(struct pci_dev *pdev, void *dev_handle);
void xdma_device_online(struct pci_dev *pdev, void *dev_handle);
#ifdef INTERNAL_TESTING
int xdma_performance_submit(struct xdma_dev *xdev, struct xdma_engine *engine);
struct xdma_transfer *engine_cyclic_stop(struct xdma_engine *engine);
void enable_perf(struct xdma_engine *engine);
void get_perf_stats(struct xdma_engine *engine);
int xdma_cyclic_transfer_setup(struct xdma_engine *engine);
int xdma_cyclic_transfer_teardown(struct xdma_engine *engine);
ssize_t xdma_engine_read_cyclic(struct xdma_engine *, char __user *, size_t,
int);
#endif
int engine_addrmode_set(struct xdma_engine *engine, unsigned long arg);
#endif /* XDMA_LIB_H */