1 /*
2  * Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <metal/errno.h>
8 #include <string.h>
9 #include <metal/device.h>
10 #include <metal/log.h>
11 #include <metal/dma.h>
12 #include <metal/atomic.h>
13 
metal_dma_map(struct metal_device * dev,uint32_t dir,struct metal_sg * sg_in,int nents_in,struct metal_sg * sg_out)14 int metal_dma_map(struct metal_device *dev,
15 		  uint32_t dir,
16 		  struct metal_sg *sg_in,
17 		  int nents_in,
18 		  struct metal_sg *sg_out)
19 {
20 	int nents_out;
21 
22 	if (!dev || !sg_in || !sg_out)
23 		return -EINVAL;
24 	if (!dev->bus->ops.dev_dma_map)
25 		return -ENODEV;
26 
27 	/* memory barrier */
28 	if (dir == METAL_DMA_DEV_R)
29 		/* If it is device read, apply memory write fence. */
30 		atomic_thread_fence(memory_order_release);
31 	else
32 		/* If it is device write or device r/w,
33 		   apply memory r/w fence. */
34 		atomic_thread_fence(memory_order_acq_rel);
35 	nents_out = dev->bus->ops.dev_dma_map(dev->bus,
36 			dev, dir, sg_in, nents_in, sg_out);
37 	return nents_out;
38 }
39 
metal_dma_unmap(struct metal_device * dev,uint32_t dir,struct metal_sg * sg,int nents)40 void metal_dma_unmap(struct metal_device *dev,
41 		  uint32_t dir,
42 		  struct metal_sg *sg,
43 		  int nents)
44 {
45 	/* memory barrier */
46 	if (dir == METAL_DMA_DEV_R)
47 		/* If it is device read, apply memory write fence. */
48 		atomic_thread_fence(memory_order_release);
49 	else
50 		/* If it is device write or device r/w,
51 		   apply memory r/w fence. */
52 		atomic_thread_fence(memory_order_acq_rel);
53 
54 	if (!dev || !dev->bus->ops.dev_dma_unmap || !sg)
55 		return;
56 	dev->bus->ops.dev_dma_unmap(dev->bus,
57 			dev, dir, sg, nents);
58 }
59