diff options
| author | Linus Torvalds <[email protected]> | 2022-03-25 02:17:39 +0000 |
|---|---|---|
| committer | Linus Torvalds <[email protected]> | 2022-03-25 02:17:39 +0000 |
| commit | 2dacc1e57b95ebc42ddcbfc26cd74700b341f1df (patch) | |
| tree | cdbc034754bdbf50bd97952586442835781821ba /drivers/infiniband/hw/irdma/user.h | |
| parent | Merge tag 'ceph-for-5.18-rc1' of https://github.com/ceph/ceph-client (diff) | |
| parent | RDMA/nldev: Prevent underflow in nldev_stat_set_counter_dynamic_doit() (diff) | |
| download | kernel-2dacc1e57b95ebc42ddcbfc26cd74700b341f1df.tar.gz kernel-2dacc1e57b95ebc42ddcbfc26cd74700b341f1df.zip | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
- Minor bug fixes in mlx5, mthca, pvrdma, rtrs, mlx4, hfi1, hns
- Minor cleanups: coding style, useless includes and documentation
- Reorganize how multicast processing works in rxe
- Replace a red/black tree with xarray in rxe which improves performance
- DSCP support and HW address handle re-use in irdma
- Simplify the mailbox command handling in hns
- Simplify iser now that FMR is eliminated
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (93 commits)
RDMA/nldev: Prevent underflow in nldev_stat_set_counter_dynamic_doit()
IB/iser: Fix error flow in case of registration failure
IB/iser: Generalize map/unmap dma tasks
IB/iser: Use iser_fr_desc as registration context
IB/iser: Remove iser_reg_data_sg helper function
RDMA/rxe: Use standard names for ref counting
RDMA/rxe: Replace red-black trees by xarrays
RDMA/rxe: Shorten pool names in rxe_pool.c
RDMA/rxe: Move max_elem into rxe_type_info
RDMA/rxe: Replace obj by elem in declaration
RDMA/rxe: Delete _locked() APIs for pool objects
RDMA/rxe: Reverse the sense of RXE_POOL_NO_ALLOC
RDMA/rxe: Replace mr by rkey in responder resources
RDMA/rxe: Fix ref error in rxe_av.c
RDMA/hns: Use the reserved loopback QPs to free MR before destroying MPT
RDMA/irdma: Add support for address handle re-use
RDMA/qib: Fix typos in comments
RDMA/mlx5: Fix memory leak in error flow for subscribe event routine
Revert "RDMA/core: Fix ib_qp_usecnt_dec() called when error"
RDMA/rxe: Remove useless argument for update_state()
...
Diffstat (limited to 'drivers/infiniband/hw/irdma/user.h')
| -rw-r--r-- | drivers/infiniband/hw/irdma/user.h | 62 |
1 files changed, 28 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index 3c811fb88404..ddd0ebbdd7d5 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -270,29 +270,24 @@ struct irdma_cq_poll_info { bool imm_valid:1; }; -enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); -enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); - -enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, - bool signaled, bool post_sq); -enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, - struct irdma_post_rq_info *info); +int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq); +int irdma_uk_inline_send(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq); +int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, + bool post_sq); +int irdma_uk_post_receive(struct irdma_qp_uk *qp, + struct irdma_post_rq_info *info); void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp); -enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool inv_stag, bool post_sq); -enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); -enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, bool post_sq); -enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); +int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool inv_stag, bool post_sq); +int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq); +int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq); +int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, + bool post_sq); struct irdma_wqe_uk_ops { void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity); @@ -303,16 +298,16 @@ struct irdma_wqe_uk_ops { struct irdma_bind_window *op_info); }; -enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, - struct irdma_cq_poll_info *info); +int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, + struct irdma_cq_poll_info *info); void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify); void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt); void irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info); -enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, - struct irdma_qp_uk_init_info *info); +int irdma_uk_qp_init(struct irdma_qp_uk *qp, + struct irdma_qp_uk_init_info *info); struct irdma_sq_uk_wr_trk_info { u64 wrid; u32 wr_len; @@ -413,16 +408,15 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, struct irdma_post_sq_info *info); __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx); void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq); -enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, - bool signaled, bool post_sq); -enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); -enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); +int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); +int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); +int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, u32 inline_data, u8 *shift); -enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, - u32 sq_size, u8 shift, u32 *wqdepth); -enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, - u32 rq_size, u8 shift, u32 *wqdepth); +int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, + u32 *wqdepth); +int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, + u32 *wqdepth); void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, u32 wqe_idx, bool post_sq); void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx); |
