summaryrefslogtreecommitdiffstats
path: root/include/linux/dma-buf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-buf.h')
-rw-r--r--include/linux/dma-buf.h16
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3efbfc2145c3..1f78d1594cc7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -61,6 +61,10 @@ struct dma_buf_attachment;
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ * mapping needs to be coherent - if the exporter doesn't directly
+ * support this, it needs to fake coherency by shooting down any ptes
+ * when transitioning away from the cpu domain.
*/
struct dma_buf_ops {
int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +96,8 @@ struct dma_buf_ops {
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
};
/**
@@ -167,6 +173,9 @@ void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +257,13 @@ static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
+
+static inline int dma_buf_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */