blob: e8639909cd344f54e2808846a7e095c98ed64f58 [file] [log] [blame]
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +02001/*
2 * RDMA device: Definitions of Resource Manager functions
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#ifndef RDMA_RM_H
17#define RDMA_RM_H
18
Michael S. Tsirkin0efc9512018-03-21 17:22:07 +020019#include "qapi/error.h"
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020020#include "rdma_backend_defs.h"
21#include "rdma_rm_defs.h"
22
Yuval Shaia4d71b382019-03-11 03:29:05 -070023int rdma_rm_init(RdmaDeviceResources *dev_res,
24 struct ibv_device_attr *dev_attr);
Yuval Shaia2b057052018-12-21 16:40:25 +020025void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
26 const char *ifname);
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020027
28int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
29 uint32_t *pd_handle, uint32_t ctx_handle);
30RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle);
31void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle);
32
33int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle,
Yuval Shaia4d71b382019-03-11 03:29:05 -070034 uint64_t guest_start, uint64_t guest_length,
35 void *host_virt, int access_flags, uint32_t *mr_handle,
36 uint32_t *lkey, uint32_t *rkey);
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020037RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle);
38void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle);
39
40int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn,
41 uint32_t *uc_handle);
42RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle);
43void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle);
44
45int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
46 uint32_t cqe, uint32_t *cq_handle, void *opaque);
47RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle);
48void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle,
49 bool notify);
50void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle);
51
52int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
53 uint8_t qp_type, uint32_t max_send_wr,
54 uint32_t max_send_sge, uint32_t send_cq_handle,
55 uint32_t max_recv_wr, uint32_t max_recv_sge,
Kamal Heib8b42cfa2019-04-03 14:33:42 +030056 uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
57 uint8_t is_srq, uint32_t srq_handle);
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020058RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn);
59int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
Yuval Shaia2b057052018-12-21 16:40:25 +020060 uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx,
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020061 union ibv_gid *dgid, uint32_t dqpn,
62 enum ibv_qp_state qp_state, uint32_t qkey,
63 uint32_t rq_psn, uint32_t sq_psn);
Yuval Shaiac99f2172018-03-20 21:00:22 +020064int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
65 uint32_t qp_handle, struct ibv_qp_attr *attr,
66 int attr_mask, struct ibv_qp_init_attr *init_attr);
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020067void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle);
68
Kamal Heibcdc84052019-04-03 14:33:41 +030069RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle);
70int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle,
71 uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit,
72 uint32_t *srq_handle, void *opaque);
73int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
74 struct ibv_srq_attr *srq_attr);
75int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle,
76 struct ibv_srq_attr *srq_attr, int srq_attr_mask);
77void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle);
78
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020079int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id,
80 void *ctx);
81void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id);
82void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id);
83
Yuval Shaia2b057052018-12-21 16:40:25 +020084int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
85 const char *ifname, union ibv_gid *gid, int gid_idx);
86int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
87 const char *ifname, int gid_idx);
88int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res,
89 RdmaBackendDev *backend_dev, int sgid_idx);
90static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res,
91 int sgid_idx)
92{
Yuval Shaia14c74f72018-12-21 16:40:32 +020093 return &dev_res->port.gid_tbl[sgid_idx].gid;
Yuval Shaia2b057052018-12-21 16:40:25 +020094}
Yuval Shaiaf4b2c022019-03-11 03:29:09 -070095void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res);
Yuval Shaia2b057052018-12-21 16:40:25 +020096
Yuval Shaiaef6d4cc2018-02-09 15:23:18 +020097#endif