diff --git a/libnvme/examples/discover-loop.c b/libnvme/examples/discover-loop.c index a41df92b43..f1369e8a0d 100644 --- a/libnvme/examples/discover-loop.c +++ b/libnvme/examples/discover-loop.c @@ -69,7 +69,7 @@ int main() nvme_free_global_ctx(ctx); return 1; } - ret = nvme_host_get(ctx, NULL, NULL, &h); + ret = nvme_get_host(ctx, NULL, NULL, &h); if (ret) { fprintf(stderr, "Failed to allocated memory\n"); return 1; diff --git a/libnvme/libnvme/nvme.i b/libnvme/libnvme/nvme.i index 460500e06b..9f5fb0a751 100644 --- a/libnvme/libnvme/nvme.i +++ b/libnvme/libnvme/nvme.i @@ -38,21 +38,21 @@ PyDict_SetItemString(p, key, val); /* Does NOT steal reference to val .. */ Py_XDECREF(val); /* .. therefore decrement ref. count. */ } - PyObject *hostnqn_from_file() { - char * val = nvme_hostnqn_from_file(); + PyObject *read_hostnqn() { + char * val = nvme_read_hostnqn(); PyObject * obj = PyUnicode_FromString(val); free(val); return obj; } - PyObject *hostid_from_file() { - char * val = nvme_hostid_from_file(); + PyObject *read_hostid() { + char * val = nvme_read_hostid(); PyObject * obj = PyUnicode_FromString(val); free(val); return obj; } %} -PyObject *hostnqn_from_file(); -PyObject *hostid_from_file(); +PyObject *read_hostnqn(); +PyObject *read_hostid(); %exception nvme_ctrl::connect { connect_err = 0; @@ -557,7 +557,7 @@ struct nvme_ns { const char *hostkey = NULL, const char *hostsymname = NULL) { nvme_host_t h; - if (nvme_host_get(ctx, hostnqn, hostid, &h)) + if (nvme_get_host(ctx, hostnqn, hostid, &h)) return NULL; if (hostsymname) nvme_host_set_hostsymname(h, hostsymname); @@ -613,7 +613,7 @@ struct nvme_ns { const char *name = NULL) { struct nvme_subsystem *s; - if (nvme_subsystem_get(ctx, host, name, subsysnqn, &s)) + if (nvme_get_subsystem(ctx, host, name, subsysnqn, &s)) return NULL; return s; @@ -1245,13 +1245,13 @@ struct nvme_ns { PyObject *output; int ret; - ret = nvme_nbft_read(ctx, &nbft, filename); + ret = nvme_read_nbft(ctx, &nbft, filename); if (ret) { Py_RETURN_NONE; } output = nbft_to_pydict(nbft); - nvme_nbft_free(ctx, nbft); + nvme_free_nbft(ctx, nbft); return output; } %}; diff --git a/libnvme/src/libnvme.ld b/libnvme/src/libnvme.ld index 6fd9afad14..2ebc94cb86 100644 --- a/libnvme/src/libnvme.ld +++ b/libnvme/src/libnvme.ld @@ -6,7 +6,7 @@ LIBNVME_3 { nvme_close; nvme_create_ctrl; nvme_create_global_ctx; - nvme_ctrl_config_match; + nvme_ctrl_match_config; nvme_ctrl_first_ns; nvme_ctrl_first_path; nvme_ctrl_get_address; @@ -25,9 +25,9 @@ LIBNVME_3 { nvme_ctrl_next_ns; nvme_ctrl_next_path; nvme_ctrl_release_transport_handle; - nvme_ctrl_reset; + nvme_reset_ctrl; nvme_ctrl_set_dhchap_host_key; - nvme_ctrls_filter; + nvme_filter_ctrls; nvme_describe_key_serial; nvme_disconnect_ctrl; nvme_dump_config; @@ -42,7 +42,6 @@ LIBNVME_3 { nvme_free_host; nvme_free_ns; nvme_free_subsystem; - nvme_fw_download_seq; nvme_gen_dhchap_key; nvme_generate_tls_key_identity; nvme_generate_tls_key_identity_compat; @@ -69,26 +68,20 @@ LIBNVME_3 { nvme_get_telemetry_max; nvme_get_uuid_list; nvme_get_version; - nvme_host_get; + nvme_get_host; nvme_host_get_global_ctx; nvme_host_get_ids; nvme_host_is_pdc_enabled; nvme_host_release_fds; nvme_host_set_pdc_enabled; - nvme_hostid_from_file; - nvme_hostid_generate; - nvme_hostnqn_from_file; - nvme_hostnqn_generate; - nvme_hostnqn_generate_from_hostid; + nvme_read_hostid; + nvme_generate_hostid; + nvme_read_hostnqn; + nvme_generate_hostnqn; + nvme_generate_hostnqn_from_hostid; nvme_import_tls_key; nvme_import_tls_key_versioned; - nvme_init_copy_range; - nvme_init_copy_range_f1; - nvme_init_copy_range_f2; - nvme_init_copy_range_f3; nvme_init_ctrl; - nvme_init_ctrl_list; - nvme_init_dsm_range; nvme_insert_tls_key; nvme_insert_tls_key_compat; nvme_insert_tls_key_versioned; @@ -130,11 +123,11 @@ LIBNVME_3 { nvme_mi_submit_exit; nvme_namespace_attach_ctrls; nvme_namespace_detach_ctrls; - nvme_namespace_filter; + nvme_filter_namespace; nvme_namespace_first_path; nvme_namespace_next_path; - nvme_nbft_free; - nvme_nbft_read; + nvme_free_nbft; + nvme_read_nbft; nvme_next_host; nvme_next_subsystem; nvme_ns_attach; @@ -152,7 +145,7 @@ LIBNVME_3 { nvme_ns_get_uuid; nvme_ns_identify; nvme_ns_read; - nvme_ns_rescan; + nvme_rescan_ns; nvme_ns_verify; nvme_ns_write; nvme_ns_write_uncorrectable; @@ -162,7 +155,7 @@ LIBNVME_3 { nvme_path_get_ctrl; nvme_path_get_ns; nvme_path_get_queue_depth; - nvme_paths_filter; + nvme_filter_paths; nvme_read_config; nvme_read_key; nvme_refresh_topology; @@ -193,10 +186,10 @@ LIBNVME_3 { nvme_strerror; nvme_submit_admin_passthru; nvme_submit_io_passthru; - nvme_subsys_filter; + nvme_filter_subsys; nvme_subsystem_first_ctrl; nvme_subsystem_first_ns; - nvme_subsystem_get; + nvme_get_subsystem; nvme_subsystem_get_fw_rev; nvme_subsystem_get_host; nvme_subsystem_get_nqn; @@ -205,7 +198,7 @@ LIBNVME_3 { nvme_subsystem_next_ctrl; nvme_subsystem_next_ns; nvme_subsystem_release_fds; - nvme_subsystem_reset; + nvme_reset_subsystem; nvme_transport_handle_get_fd; nvme_transport_handle_get_name; nvme_transport_handle_is_blkdev; @@ -217,9 +210,9 @@ LIBNVME_3 { nvme_transport_handle_set_submit_exit; nvme_unlink_ctrl; nvme_update_key; - nvme_uuid_find; + nvme_find_uuid; nvme_uuid_from_string; - nvme_uuid_random; + nvme_random_uuid; nvme_uuid_to_string; local: *; diff --git a/libnvme/src/meson.build b/libnvme/src/meson.build index 4e7c9b7327..387b09b515 100644 --- a/libnvme/src/meson.build +++ b/libnvme/src/meson.build @@ -46,6 +46,10 @@ if want_fabrics ] endif +if liburing_dep.found() + sources += 'nvme/uring.c' +endif + if json_c_dep.found() sources += 'nvme/json.c' else diff --git a/libnvme/src/nvme/cmds.c b/libnvme/src/nvme/cmds.c index eecc1aaaba..3ef8523bfb 100644 --- a/libnvme/src/nvme/cmds.c +++ b/libnvme/src/nvme/cmds.c @@ -15,32 +15,220 @@ #include "cleanup.h" #include "private.h" -int nvme_fw_download_seq(struct nvme_transport_handle *hdl, bool ish, - __u32 size, __u32 xfer, __u32 offset, void *buf) +static bool force_4k; + +__attribute__((constructor)) +static void nvme_init_env(void) +{ + char *val; + + val = getenv("LIBNVME_FORCE_4K"); + if (!val) + return; + if (!strcmp(val, "1") || + !strcasecmp(val, "true") || + !strncasecmp(val, "enable", 6)) + force_4k = true; +} + +int nvme_get_log(struct nvme_transport_handle *hdl, + struct nvme_passthru_cmd *cmd, bool rae, + __u32 xfer_len) +{ + __u64 offset = 0, xfer, data_len = cmd->data_len; + __u64 start = (__u64)cmd->cdw13 << 32 | cmd->cdw12; + __u64 lpo; + void *ptr = (void *)(uintptr_t)cmd->addr; + int ret; + bool _rae; + __u32 numd; + __u16 numdu, numdl; + __u32 cdw10 = cmd->cdw10 & (NVME_VAL(LOG_CDW10_LID) | + NVME_VAL(LOG_CDW10_LSP)); + __u32 cdw11 = cmd->cdw11 & NVME_VAL(LOG_CDW11_LSI); + + if (force_4k) + xfer_len = NVME_LOG_PAGE_PDU_SIZE; + + /* + * 4k is the smallest possible transfer unit, so restricting to 4k + * avoids having to check the MDTS value of the controller. + */ + do { + if (!force_4k) { + xfer = data_len - offset; + if (xfer > xfer_len) + xfer = xfer_len; + } else { + xfer = NVME_LOG_PAGE_PDU_SIZE; + } + + /* + * Always retain regardless of the RAE parameter until the very + * last portion of this log page so the data remains latched + * during the fetch sequence. + */ + lpo = start + offset; + numd = (xfer >> 2) - 1; + numdu = numd >> 16; + numdl = numd & 0xffff; + _rae = offset + xfer < data_len || rae; + + cmd->cdw10 = cdw10 | + NVME_SET(!!_rae, LOG_CDW10_RAE) | + NVME_SET(numdl, LOG_CDW10_NUMDL); + cmd->cdw11 = cdw11 | + NVME_SET(numdu, LOG_CDW11_NUMDU); + cmd->cdw12 = lpo & 0xffffffff; + cmd->cdw13 = lpo >> 32; + cmd->data_len = xfer; + cmd->addr = (__u64)(uintptr_t)ptr; + + if (hdl->uring_enabled) + ret = nvme_submit_admin_passthru_async(hdl, cmd); + else + ret = nvme_submit_admin_passthru(hdl, cmd); + if (ret) + return ret; + + offset += xfer; + ptr += xfer; + } while (offset < data_len); + + if (hdl->uring_enabled) { + ret = nvme_wait_complete_passthru(hdl); + if (ret) + return ret; + } + + return 0; +} + +static int read_ana_chunk(struct nvme_transport_handle *hdl, + enum nvme_log_ana_lsp lsp, bool rae, + __u8 *log, __u8 **read, __u8 *to_read, __u8 *log_end) { struct nvme_passthru_cmd cmd; - void *data = buf; - int err = 0; - if (ish && nvme_transport_handle_is_mi(hdl)) - nvme_init_mi_cmd_flags(&cmd, ish); + if (to_read > log_end) + return -ENOSPC; - while (size > 0) { - __u32 chunk = min(xfer, size); + while (*read < to_read) { + __u32 len = min_t(__u32, log_end - *read, + NVME_LOG_PAGE_PDU_SIZE); + int ret; - err = nvme_init_fw_download(&cmd, data, chunk, offset); - if (err) - break; - err = nvme_submit_admin_passthru(hdl, &cmd); - if (err) - break; + nvme_init_get_log_ana(&cmd, lsp, *read - log, *read, len); + ret = nvme_get_log(hdl, &cmd, rae, NVME_LOG_PAGE_PDU_SIZE); + if (ret) + return ret; - data += chunk; - size -= chunk; - offset += chunk; + *read += len; } + return 0; +} - return err; +static int try_read_ana(struct nvme_transport_handle *hdl, + enum nvme_log_ana_lsp lsp, bool rae, + struct nvme_ana_log *log, __u8 *log_end, + __u8 *read, __u8 **to_read, bool *may_retry) +{ + __u16 ngrps = le16_to_cpu(log->ngrps); + + while (ngrps--) { + __u8 *group = *to_read; + int ret; + __le32 nnsids; + + *to_read += sizeof(*log->descs); + ret = read_ana_chunk(hdl, lsp, rae, + (__u8 *)log, &read, *to_read, log_end); + if (ret) { + /* + * If the provided buffer isn't long enough, + * the log page may have changed while reading it + * and the computed length was inaccurate. + * Have the caller check chgcnt and retry. + */ + *may_retry = ret == -ENOSPC; + return ret; + } + + /* + * struct nvme_ana_group_desc has 8-byte alignment + * but the group pointer is only 4-byte aligned. + * Don't dereference the misaligned pointer. + */ + memcpy(&nnsids, + group + offsetof(struct nvme_ana_group_desc, nnsids), + sizeof(nnsids)); + *to_read += le32_to_cpu(nnsids) * sizeof(__le32); + ret = read_ana_chunk(hdl, lsp, rae, + (__u8 *)log, &read, *to_read, log_end); + if (ret) { + *may_retry = ret == -ENOSPC; + return ret; + } + } + + *may_retry = true; + return 0; +} + +int nvme_get_ana_log_atomic(struct nvme_transport_handle *hdl, + bool rae, bool rgo, struct nvme_ana_log *log, __u32 *len, + unsigned int retries) +{ + const enum nvme_log_ana_lsp lsp = + rgo ? NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY : 0; + /* Get Log Page can only fetch multiples of dwords */ + __u8 * const log_end = (__u8 *)log + (*len & -4); + __u8 *read = (__u8 *)log; + __u8 *to_read; + int ret; + + if (!retries) + return -EINVAL; + + to_read = (__u8 *)log->descs; + ret = read_ana_chunk(hdl, lsp, rae, + (__u8 *)log, &read, to_read, log_end); + if (ret) + return ret; + + do { + bool may_retry = false; + int saved_ret; + int saved_errno; + __le64 chgcnt; + + saved_ret = try_read_ana(hdl, lsp, rae, log, log_end, + read, &to_read, &may_retry); + /* + * If the log page was read with multiple Get Log Page commands, + * chgcnt must be checked afterwards to ensure atomicity + */ + *len = to_read - (__u8 *)log; + if (*len <= NVME_LOG_PAGE_PDU_SIZE || !may_retry) + return saved_ret; + + saved_errno = errno; + chgcnt = log->chgcnt; + read = (__u8 *)log; + to_read = (__u8 *)log->descs; + ret = read_ana_chunk(hdl, lsp, rae, + (__u8 *)log, &read, to_read, log_end); + if (ret) + return ret; + + if (log->chgcnt == chgcnt) { + /* Log hasn't changed; return try_read_ana() result */ + errno = saved_errno; + return saved_ret; + } + } while (--retries); + + return -EAGAIN; } int nvme_set_etdas(struct nvme_transport_handle *hdl, bool *changed) @@ -331,36 +519,6 @@ int nvme_get_lba_status_log(struct nvme_transport_handle *hdl, bool rae, return 0; } -static int nvme_ns_attachment(struct nvme_transport_handle *hdl, bool ish, - __u32 nsid, __u16 num_ctrls, __u16 *ctrlist, bool attach) -{ - struct nvme_ctrl_list cntlist = { 0 }; - struct nvme_passthru_cmd cmd; - - nvme_init_ctrl_list(&cntlist, num_ctrls, ctrlist); - if (ish && nvme_transport_handle_is_mi(hdl)) - nvme_init_mi_cmd_flags(&cmd, ish); - - if (attach) - nvme_init_ns_attach_ctrls(&cmd, nsid, &cntlist); - else - nvme_init_ns_detach_ctrls(&cmd, nsid, &cntlist); - - return nvme_submit_admin_passthru(hdl, &cmd); -} - -int nvme_namespace_attach_ctrls(struct nvme_transport_handle *hdl, bool ish, - __u32 nsid, __u16 num_ctrls, __u16 *ctrlist) -{ - return nvme_ns_attachment(hdl, ish, nsid, num_ctrls, ctrlist, true); -} - -int nvme_namespace_detach_ctrls(struct nvme_transport_handle *hdl, bool ish, - __u32 nsid, __u16 num_ctrls, __u16 *ctrlist) -{ - return nvme_ns_attachment(hdl, ish, nsid, num_ctrls, ctrlist, false); -} - size_t nvme_get_ana_log_len_from_id_ctrl(const struct nvme_id_ctrl *id_ctrl, bool rgo) { @@ -413,104 +571,6 @@ int nvme_get_logical_block_size(struct nvme_transport_handle *hdl, return 0; } -static inline void nvme_init_copy_range_elbt(__u8 *elbt, __u64 eilbrt) -{ - int i; - - for (i = 0; i < 8; i++) - elbt[9 - i] = (eilbrt >> (8 * i)) & 0xff; - elbt[1] = 0; - elbt[0] = 0; -} - -void nvme_init_copy_range(struct nvme_copy_range *copy, __u16 *nlbs, - __u64 *slbas, __u32 *eilbrts, __u32 *elbatms, - __u32 *elbats, __u16 nr) -{ - int i; - - for (i = 0; i < nr; i++) { - copy[i].nlb = cpu_to_le16(nlbs[i]); - copy[i].slba = cpu_to_le64(slbas[i]); - copy[i].eilbrt = cpu_to_le32(eilbrts[i]); - copy[i].elbatm = cpu_to_le16(elbatms[i]); - copy[i].elbat = cpu_to_le16(elbats[i]); - } -} - -void nvme_init_copy_range_f1(struct nvme_copy_range_f1 *copy, __u16 *nlbs, - __u64 *slbas, __u64 *eilbrts, __u32 *elbatms, - __u32 *elbats, __u16 nr) -{ - int i; - - for (i = 0; i < nr; i++) { - copy[i].nlb = cpu_to_le16(nlbs[i]); - copy[i].slba = cpu_to_le64(slbas[i]); - copy[i].elbatm = cpu_to_le16(elbatms[i]); - copy[i].elbat = cpu_to_le16(elbats[i]); - nvme_init_copy_range_elbt(copy[i].elbt, eilbrts[i]); - } -} - -void nvme_init_copy_range_f2(struct nvme_copy_range_f2 *copy, __u32 *snsids, - __u16 *nlbs, __u64 *slbas, __u16 *sopts, - __u32 *eilbrts, __u32 *elbatms, __u32 *elbats, - __u16 nr) -{ - int i; - - for (i = 0; i < nr; i++) { - copy[i].snsid = cpu_to_le32(snsids[i]); - copy[i].nlb = cpu_to_le16(nlbs[i]); - copy[i].slba = cpu_to_le64(slbas[i]); - copy[i].sopt = cpu_to_le16(sopts[i]); - copy[i].eilbrt = cpu_to_le32(eilbrts[i]); - copy[i].elbatm = cpu_to_le16(elbatms[i]); - copy[i].elbat = cpu_to_le16(elbats[i]); - } -} - -void nvme_init_copy_range_f3(struct nvme_copy_range_f3 *copy, __u32 *snsids, - __u16 *nlbs, __u64 *slbas, __u16 *sopts, - __u64 *eilbrts, __u32 *elbatms, __u32 *elbats, - __u16 nr) -{ - int i; - - for (i = 0; i < nr; i++) { - copy[i].snsid = cpu_to_le32(snsids[i]); - copy[i].nlb = cpu_to_le16(nlbs[i]); - copy[i].slba = cpu_to_le64(slbas[i]); - copy[i].sopt = cpu_to_le16(sopts[i]); - copy[i].elbatm = cpu_to_le16(elbatms[i]); - copy[i].elbat = cpu_to_le16(elbats[i]); - nvme_init_copy_range_elbt(copy[i].elbt, eilbrts[i]); - } -} - -void nvme_init_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs, - __u32 *llbas, __u64 *slbas, __u16 nr_ranges) -{ - int i; - - for (i = 0; i < nr_ranges; i++) { - dsm[i].cattr = cpu_to_le32(ctx_attrs[i]); - dsm[i].nlb = cpu_to_le32(llbas[i]); - dsm[i].slba = cpu_to_le64(slbas[i]); - } -} - -void nvme_init_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls, - __u16 *ctrlist) -{ - int i; - - cntlist->num = cpu_to_le16(num_ctrls); - for (i = 0; i < num_ctrls; i++) - cntlist->identifier[i] = cpu_to_le16(ctrlist[i]); -} - int nvme_get_feature_length(int fid, __u32 cdw11, enum nvme_data_tfr dir, __u32 *len) { diff --git a/libnvme/src/nvme/cmds.h b/libnvme/src/nvme/cmds.h index 22b1fed236..6396703d9b 100644 --- a/libnvme/src/nvme/cmds.h +++ b/libnvme/src/nvme/cmds.h @@ -890,20 +890,6 @@ nvme_init_zns_identify_ctrl(struct nvme_passthru_cmd *cmd, id, sizeof(*id)); } -/** - * nvme_get_log() - Get log page data - * @hdl: Transport handle - * @cmd: Passthru command - * @rae: Retain asynchronous events - * @xfer_len: Max log transfer size per request to split the total. - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_get_log(struct nvme_transport_handle *hdl, - struct nvme_passthru_cmd *cmd, bool rae, - __u32 xfer_len); - /** * nvme_init_get_log_lpo() - Initializes passthru command with a * Log Page Offset @@ -1871,31 +1857,6 @@ nvme_init_get_log_zns_changed_zones(struct nvme_passthru_cmd *cmd, log, sizeof(*log)); } -/** - * nvme_get_ana_log_atomic() - Retrieve Asymmetric Namespace Access - * log page atomically - * @hdl: Transport handle - * @rae: Whether to retain asynchronous events - * @rgo: Whether to retrieve ANA groups only (no NSIDs) - * @log: Pointer to a buffer to receive the ANA log page - * @len: Input: the length of the log page buffer. - * Output: the actual length of the ANA log page. - * @retries: The maximum number of times to retry on log page changes - * - * See &struct nvme_ana_log for the definition of the returned structure. - * - * Return: If successful, returns 0 and sets *len to the actual log page length. - * If unsuccessful, returns the nvme command status if a response was received - * (see &enum nvme_status_field) or -1 with errno set otherwise. - * Sets errno = EINVAL if retries == 0. - * Sets errno = EAGAIN if unable to read the log page atomically - * because chgcnt changed during each of the retries attempts. - * Sets errno = ENOSPC if the full log page does not fit in the provided buffer. - */ -int -nvme_get_ana_log_atomic(struct nvme_transport_handle *hdl, bool rae, bool rgo, - struct nvme_ana_log *log, __u32 *len, unsigned int retries); - /** * nvme_init_set_features() - Initialize passthru command for * Set Features @@ -3972,24 +3933,21 @@ nvme_init_virtual_mgmt(struct nvme_passthru_cmd *cmd, } /** - * nvme_flush() - Send an nvme flush command - * @hdl: Transport handle + * nvme_init_flush() - Initialize passthru command for Flush command + * @cmd: Passthru command to use * @nsid: Namespace identifier * * The Flush command requests that the contents of volatile write cache be made * non-volatile. * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. + * Initializes the passthru command buffer for the Flush command. */ -static inline int nvme_flush(struct nvme_transport_handle *hdl, __u32 nsid) +static inline void +nvme_init_flush(struct nvme_passthru_cmd *cmd, __u32 nsid) { - struct nvme_passthru_cmd cmd = {}; - - cmd.opcode = nvme_cmd_flush; - cmd.nsid = nsid; - - return nvme_submit_io_passthru(hdl, &cmd); + memset(cmd, 0, sizeof(*cmd)); + cmd->opcode = nvme_cmd_flush; + cmd->nsid = nsid; } /** @@ -5216,1781 +5174,278 @@ nvme_init_lm_get_features_ctrl_data_queue(struct nvme_passthru_cmd *cmd, } /** - * nvme_identify() - Submit a generic Identify command - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID (if applicable to the requested CNS). - * @csi: Command Set Identifier. - * @cns: Identify Controller or Namespace Structure (CNS) value, - * specifying the type of data to be returned. - * @data: Pointer to the buffer where the identification data will - * be stored. - * @len: Length of the data buffer in bytes. - * - * The generic wrapper for submitting an Identify command, allowing the host - * to specify any combination of Identify parameters. + * nvme_init_mi_cmd_flags() - Initialize command flags for NVMe-MI + * @cmd: Passthru command to use + * @ish: Ignore Shutdown (for NVMe-MI command) * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Initializes the passthru command flags */ -static inline int -nvme_identify(struct nvme_transport_handle *hdl, __u32 nsid, enum nvme_csi csi, - enum nvme_identify_cns cns, void *data, __u32 len) +static inline void +nvme_init_mi_cmd_flags(struct nvme_passthru_cmd *cmd, bool ish) { - struct nvme_passthru_cmd cmd; - - nvme_init_identify(&cmd, nsid, csi, cns, data, len); - - return nvme_submit_admin_passthru(hdl, &cmd); + cmd->flags = NVME_FIELD_ENCODE(ish, + NVME_MI_ADMIN_CFLAGS_ISH_SHIFT, + NVME_MI_ADMIN_CFLAGS_ISH_MASK); } + /** - * nvme_identify_ctrl() - Submit an Identify Controller command - * @hdl: Transport handle for the controller. - * @id: Pointer to the buffer (&struct nvme_id_ctrl) where the - * controller identification data will be stored upon - * successful completion. - * - * Submits the Identify Controller command to retrieve the controller's - * capabilities and configuration data. + * nvme_init_ctrl_list() - Initialize an nvme_ctrl_list structure from an array. + * @cntlist: The controller list structure to initialize + * @num_ctrls: The number of controllers in the array, &ctrlist. + * @ctrlist: An array of controller identifiers in CPU native endian. * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * This is intended to be used with any command that takes a controller list + * argument. See nvme_ns_attach_ctrls() and nvme_ns_detach(). */ -static inline int -nvme_identify_ctrl(struct nvme_transport_handle *hdl, - struct nvme_id_ctrl *id) +static inline void +nvme_init_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls, + __u16 *ctrlist) { - struct nvme_passthru_cmd cmd; - - nvme_init_identify_ctrl(&cmd, id); + int i; - return nvme_submit_admin_passthru(hdl, &cmd); + cntlist->num = htole16(num_ctrls); + for (i = 0; i < num_ctrls; i++) + cntlist->identifier[i] = htole16(ctrlist[i]); } /** - * nvme_identify_active_ns_list() - Submit an Identify Active Namespace - * List command - * @hdl: Transport handle for the controller. - * @nsid: The Namespace ID to query - * @ns_list: Pointer to the buffer (&struct nvme_ns_list) where the - * active namespace list will be stored. + * nvme_init_dsm_range() - Constructs a data set range structure + * @dsm: DSM range array + * @ctx_attrs: Array of context attributes + * @llbas: Array of length in logical blocks + * @slbas: Array of starting logical blocks + * @nr_ranges: The size of the dsm arrays * - * Submits the Identify command to retrieve a list of active Namespace IDs. + * Each array must be the same size of size 'nr_ranges'. This is intended to be + * used with constructing a payload for nvme_dsm(). * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: The nvme command status if a response was received or -errno + * otherwise. */ -static inline int -nvme_identify_active_ns_list(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_ns_list *ns_list) +static inline void +nvme_init_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs, + __u32 *llbas, __u64 *slbas, __u16 nr_ranges) { - struct nvme_passthru_cmd cmd; - - nvme_init_identify_active_ns_list(&cmd, nsid, ns_list); + int i; - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < nr_ranges; i++) { + dsm[i].cattr = htole32(ctx_attrs[i]); + dsm[i].nlb = htole32(llbas[i]); + dsm[i].slba = htole64(slbas[i]); + } } /** - * nvme_identify_ns() - Submit an Identify Namespace command - * @hdl: Transport handle for the controller. - * @nsid: The Namespace ID to identify. - * @ns: Pointer to the buffer (&struct nvme_id_ns) where the namespace - * identification data will be stored. - * - * Submits the Identify command to retrieve the Namespace Identification - * data structure for a specified namespace. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * nvme_init_copy_range_elbt() - Constructs a copy range elbt structure + * @elbt: + * @eilbrts: Expected initial logical block reference tag */ - -static inline int -nvme_identify_ns(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_id_ns *ns) +static inline void +nvme_init_copy_range_elbt(__u8 *elbt, __u64 eilbrt) { - struct nvme_passthru_cmd cmd; + int i; - nvme_init_identify_ns(&cmd, nsid, ns); - - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < 8; i++) + elbt[9 - i] = (eilbrt >> (8 * i)) & 0xff; + elbt[1] = 0; + elbt[0] = 0; } /** - * nvme_identify_csi_ns() - Submit a CSI-specific Identify Namespace command - * @hdl: Transport handle for the controller. - * @nsid: The Namespace ID to identify. - * @csi: The Command Set Identifier - * @uidx: The UUID Index for the command. - * @id_ns: Pointer to the buffer (@struct nvme_nvm_id_ns) where the - * CSI-specific namespace identification data will be stored. - * - * Submits the Identify command to retrieve Namespace Identification data - * specific to a Command Set Identifier (CSI). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * nvme_init_copy_range() - Constructs a copy range structure + * @copy: Copy range array + * @nlbs: Number of logical blocks + * @slbas: Starting LBA + * @eilbrts: Expected initial logical block reference tag + * @elbatms: Expected logical block application tag mask + * @elbats: Expected logical block application tag + * @nr: Number of descriptors to construct */ -static inline int -nvme_identify_csi_ns(struct nvme_transport_handle *hdl, __u32 nsid, - enum nvme_csi csi, __u8 uidx, struct nvme_nvm_id_ns *id_ns) +static inline void +nvme_init_copy_range(struct nvme_copy_range *copy, __u16 *nlbs, + __u64 *slbas, __u32 *eilbrts, __u32 *elbatms, + __u32 *elbats, __u16 nr) { - struct nvme_passthru_cmd cmd; - - nvme_init_identify_csi_ns(&cmd, nsid, csi, uidx, id_ns); + int i; - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < nr; i++) { + copy[i].nlb = htole16(nlbs[i]); + copy[i].slba = htole64(slbas[i]); + copy[i].eilbrt = htole32(eilbrts[i]); + copy[i].elbatm = htole16(elbatms[i]); + copy[i].elbat = htole16(elbats[i]); + } } /** - * nvme_identify_uuid_list() - Submit an Identify UUID List command - * @hdl: Transport handle for the controller. - * @uuid_list: Pointer to the buffer (&struct nvme_id_uuid_list) where the - * UUID list will be stored. - * - * Submits the Identify command to retrieve a list of UUIDs associated - * with the controller. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * nvme_init_copy_range_f1() - Constructs a copy range f1 structure + * @copy: Copy range array + * @nlbs: Number of logical blocks + * @slbas: Starting LBA + * @eilbrts: Expected initial logical block reference tag + * @elbatms: Expected logical block application tag mask + * @elbats: Expected logical block application tag + * @nr: Number of descriptors to construct */ -static inline int -nvme_identify_uuid_list(struct nvme_transport_handle *hdl, - struct nvme_id_uuid_list *uuid_list) +static inline void +nvme_init_copy_range_f1(struct nvme_copy_range_f1 *copy, __u16 *nlbs, + __u64 *slbas, __u64 *eilbrts, __u32 *elbatms, + __u32 *elbats, __u16 nr) { - struct nvme_passthru_cmd cmd; + int i; - nvme_init_identify_uuid_list(&cmd, uuid_list); - - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < nr; i++) { + copy[i].nlb = htole16(nlbs[i]); + copy[i].slba = htole64(slbas[i]); + copy[i].elbatm = htole16(elbatms[i]); + copy[i].elbat = htole16(elbats[i]); + nvme_init_copy_range_elbt(copy[i].elbt, eilbrts[i]); + } } /** - * nvme_identify_csi_ns_user_data_format() - Submit an Identify CSI Namespace - * User Data Format command - * @hdl: Transport handle for the controller. - * @csi: Command Set Identifier. - * @fidx: Format Index, specifying which format entry to return. - * @uidx: The UUID Index for the command. - * @data: Pointer to the buffer where the format data will be stored. - * - * Submits the Identify command to retrieve a CSI-specific Namespace User - * Data Format data structure. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * nvme_init_copy_range_f2() - Constructs a copy range f2 structure + * @copy: Copy range array + * @snsids: Source namespace identifier + * @nlbs: Number of logical blocks + * @slbas: Starting LBA + * @sopts: Source options + * @eilbrts: Expected initial logical block reference tag + * @elbatms: Expected logical block application tag mask + * @elbats: Expected logical block application tag + * @nr: Number of descriptors to construct */ -static inline int -nvme_identify_csi_ns_user_data_format(struct nvme_transport_handle *hdl, - enum nvme_csi csi, __u16 fidx, __u8 uidx, void *data) +static inline void +nvme_init_copy_range_f2(struct nvme_copy_range_f2 *copy, + __u32 *snsids, __u16 *nlbs, __u64 *slbas, __u16 *sopts, + __u32 *eilbrts, __u32 *elbatms, __u32 *elbats, + __u16 nr) { - struct nvme_passthru_cmd cmd; - - nvme_init_identify_csi_ns_user_data_format(&cmd, csi, fidx, uidx, data); + int i; - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < nr; i++) { + copy[i].snsid = htole32(snsids[i]); + copy[i].nlb = htole16(nlbs[i]); + copy[i].slba = htole64(slbas[i]); + copy[i].sopt = htole16(sopts[i]); + copy[i].eilbrt = htole32(eilbrts[i]); + copy[i].elbatm = htole16(elbatms[i]); + copy[i].elbat = htole16(elbats[i]); + } } /** - * nvme_identify_ns_granularity() - Submit an Identify Namespace Granularity - * List command - * @hdl: Transport handle for the controller. - * @gr_list: Pointer to the buffer (&struct nvme_id_ns_granularity_list) - * where the granularity list will be stored. - * - * Submits the Identify command to retrieve the Namespace Granularity List. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * nvme_init_copy_range_f3() - Constructs a copy range f3 structure + * @copy: Copy range array + * @snsids: Source namespace identifier + * @nlbs: Number of logical blocks + * @slbas: Starting LBA + * @sopts: Source options + * @eilbrts: Expected initial logical block reference tag + * @elbatms: Expected logical block application tag mask + * @elbats: Expected logical block application tag + * @nr: Number of descriptors to construct */ -static inline int -nvme_identify_ns_granularity(struct nvme_transport_handle *hdl, - struct nvme_id_ns_granularity_list *gr_list) +static inline void +nvme_init_copy_range_f3(struct nvme_copy_range_f3 *copy, __u32 *snsids, + __u16 *nlbs, __u64 *slbas, __u16 *sopts, + __u64 *eilbrts, __u32 *elbatms, __u32 *elbats, + __u16 nr) { - struct nvme_passthru_cmd cmd; + int i; - nvme_init_identify_ns_granularity(&cmd, gr_list); - - return nvme_submit_admin_passthru(hdl, &cmd); + for (i = 0; i < nr; i++) { + copy[i].snsid = htole32(snsids[i]); + copy[i].nlb = htole16(nlbs[i]); + copy[i].slba = htole64(slbas[i]); + copy[i].sopt = htole16(sopts[i]); + copy[i].elbatm = htole16(elbatms[i]); + copy[i].elbat = htole16(elbats[i]); + nvme_init_copy_range_elbt(copy[i].elbt, eilbrts[i]); + } } /** - * nvme_identify_ns_descs_list() - Submit an Identify Namespace ID Descriptor - * List command - * @hdl: Transport handle for the controller. - * @nsid: The Namespace ID to query. - * @descs: Pointer to the buffer (&struct nvme_ns_id_desc) where the - * descriptor list will be stored. - * - * Submits the Identify command to retrieve the Namespace ID Descriptor List - * for a specified namespace. + * nvme_get_log() - Get log page data + * @hdl: Transport handle + * @cmd: Passthru command + * @rae: Retain asynchronous events + * @xfer_len: Max log transfer size per request to split the total. * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. */ -static inline int -nvme_identify_ns_descs_list(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_ns_id_desc *descs) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_identify_ns_descs_list(&cmd, nsid, descs); - - return nvme_submit_admin_passthru(hdl, &cmd); -} +int nvme_get_log(struct nvme_transport_handle *hdl, + struct nvme_passthru_cmd *cmd, bool rae, + __u32 xfer_len); /** - * nvme_zns_identify_ns() - Submit a ZNS-specific Identify Namespace command - * @hdl: Transport handle for the controller. - * @nsid: The Namespace ID to identify. - * @data: Pointer to the buffer (&struct nvme_zns_id_ns) where the ZNS - * namespace identification data will be stored. - * - * Submits the Identify command to retrieve the Zoned Namespace (ZNS) - * specific identification data structure for a specified namespace. + * nvme_set_etdas() - Set the Extended Telemetry Data Area 4 Supported bit + * @hdl: Transport handle + * @changed: boolean to indicate whether or not the host + * behavior support feature had been changed * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. */ -static inline int -nvme_zns_identify_ns(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_zns_id_ns *data) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_zns_identify_ns(&cmd, nsid, data); - - return nvme_submit_admin_passthru(hdl, &cmd); -} +int nvme_set_etdas(struct nvme_transport_handle *hdl, bool *changed); /** - * nvme_get_log_simple() - Retrieve a log page using default parameters - * @hdl: Transport handle for the controller. - * @lid: Log Identifier, specifying the log page to retrieve - * (@enum nvme_cmd_get_log_lid). - * @data: Pointer to the buffer where the log page data will be stored. - * @len: Length of the data buffer in bytes. - * - * Submits the Get Log Page command using the common settings: - * NVME\_NSID\_ALL, Retain Asynchronous Event (RAE) set to false, - * and assuming the NVM Command Set. + * nvme_clear_etdas() - Clear the Extended Telemetry Data Area 4 Supported bit + * @hdl: Transport handle + * @changed: boolean to indicate whether or not the host + * behavior support feature had been changed * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. */ -static inline int -nvme_get_log_simple(struct nvme_transport_handle *hdl, - enum nvme_cmd_get_log_lid lid, void *data, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log(&cmd, NVME_NSID_ALL, lid, NVME_CSI_NVM, data, len); - - return nvme_get_log(hdl, &cmd, false, NVME_LOG_PAGE_PDU_SIZE); -} +int nvme_clear_etdas(struct nvme_transport_handle *hdl, bool *changed); /** - * nvme_get_log_supported_log_pages() - Retrieve the Supported Log Pages - * Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_supported_log_pages) where - * the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Supported Log Pages - * Log. + * nvme_get_uuid_list - Returns the uuid list (if supported) + * @hdl: Transport handle + * @uuid_list: UUID list returned by identify UUID * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: The nvme command status if a response was received (see + * &enum nvme_status_field) or -1 with errno set otherwise. */ -static inline int -nvme_get_log_supported_log_pages(struct nvme_transport_handle *hdl, - struct nvme_supported_log_pages *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log(&cmd, NVME_NSID_ALL, NVME_LOG_LID_SUPPORTED_LOG_PAGES, - NVME_CSI_NVM, log, sizeof(*log)); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - +int nvme_get_uuid_list(struct nvme_transport_handle *hdl, + struct nvme_id_uuid_list *uuid_list); /** - * nvme_get_log_error() - Retrieve the Error Information Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for (usually NVME_NSID_ALL). - * @nr_entries: The maximum number of error log entries to retrieve. - * @err_log: Pointer to the buffer (array of @struct nvme_error_log_page) - * where the log page data will be stored. - * - * This log page describes extended error information for a command that - * completed with error, or may report an error that is not specific to a - * particular command. The total size requested is determined by - * @nr_entries * sizeof(@struct nvme_error_log_page). + * nvme_get_telemetry_max() - Get telemetry limits + * @hdl: Transport handle + * @da: On success return max supported data area + * @max_data_tx: On success set to max transfer chunk supported by + * the controller * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. */ -static inline int -nvme_get_log_error(struct nvme_transport_handle *hdl, __u32 nsid, - unsigned int nr_entries, struct nvme_error_log_page *err_log) -{ - struct nvme_passthru_cmd cmd; - size_t len = sizeof(*err_log) * nr_entries; - - nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_ERROR, - NVME_CSI_NVM, err_log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} +int nvme_get_telemetry_max(struct nvme_transport_handle *hdl, + enum nvme_telemetry_da *da, size_t *max_data_tx); /** - * nvme_get_log_fw_slot() - Retrieve the Firmware Slot Information Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for (use NVME_NSID_ALL). - * @fw_log: Pointer to the buffer (@struct nvme_firmware_slot) where the log - * page data will be stored. - * - * This log page describes the firmware revision stored in each firmware slot - * supported. The firmware revision is indicated as an ASCII string. The log - * page also indicates the active slot number. + * nvme_get_telemetry_log() - Get specified telemetry log + * @hdl: Transport handle + * @create: Generate new host initated telemetry capture + * @ctrl: Get controller Initiated log + * @rae: Retain asynchronous events + * @max_data_tx: Set the max data transfer size to be used retrieving telemetry. + * @da: Log page data area, valid values: &enum nvme_telemetry_da. + * @log: On success, set to the value of the allocated and retrieved log. + * @size: Ptr to the telemetry log size, so it can be returned * - * This command is typically issued for the controller scope, thus using - * NVME_NSID_ALL. + * The total size allocated can be calculated as: + * (nvme_telemetry_log da size + 1) * NVME_LOG_TELEM_BLOCK_SIZE. * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. */ -static inline int -nvme_get_log_fw_slot(struct nvme_transport_handle *hdl, __u32 nsid, - struct nvme_firmware_slot *fw_log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_FW_SLOT, - NVME_CSI_NVM, fw_log, sizeof(*fw_log)); - - return nvme_get_log(hdl, &cmd, false, sizeof(*fw_log)); -} - -/** - * nvme_get_log_changed_ns_list() - Retrieve the Namespace Change Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for (use NVME_NSID_ALL). - * @ns_log: Pointer to the buffer (@struct nvme_ns_list) where the log - * page data will be stored. - * - * This log page describes namespaces attached to this controller that have - * changed since the last time the namespace was identified, been added, or - * deleted. - * - * This command is typically issued for the controller scope, thus using - * NVME_NSID_ALL. The Retain Asynchronous Event (RAE) is true to retain - * asynchronous events associated with the log page - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_changed_ns_list(struct nvme_transport_handle *hdl, __u32 nsid, - struct nvme_ns_list *ns_log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_CHANGED_NS, - NVME_CSI_NVM, ns_log, sizeof(*ns_log)); - - return nvme_get_log(hdl, &cmd, true, sizeof(*ns_log)); -} - -/** - * nvme_get_log_cmd_effects() - Retrieve the Command Effects Log Page - * @hdl: Transport handle for the controller. - * @csi: Command Set Identifier for the requested log page. - * @effects_log:Pointer to the buffer (@struct nvme_cmd_effects_log) where the - * log page data will be stored. - * - * This log page describes the commands that the controller supports and the - * effects of those commands on the state of the NVM subsystem. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_ALL. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_cmd_effects(struct nvme_transport_handle *hdl, - enum nvme_csi csi, struct nvme_cmd_effects_log *effects_log) -{ - struct nvme_passthru_cmd cmd; - size_t len = sizeof(*effects_log); - - nvme_init_get_log_cmd_effects(&cmd, csi, effects_log); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_device_self_test() - Retrieve the Device Self-Test Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_self_test_log) where the log - * page data will be stored. - * - * This log page indicates the status of an in-progress self-test and the - * percent complete of that operation, and the results of the previous 20 - * self-test operations. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_ALL. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_device_self_test(struct nvme_transport_handle *hdl, - struct nvme_self_test_log *log) -{ - struct nvme_passthru_cmd cmd; - size_t len = sizeof(*log); - - nvme_init_get_log(&cmd, NVME_NSID_ALL, NVME_LOG_LID_DEVICE_SELF_TEST, - NVME_CSI_NVM, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_create_telemetry_host_mcda() - Create the Host Initiated - * Telemetry Log - * @hdl: Transport handle for the controller. - * @mcda: Maximum Created Data Area. Specifies the maximum amount of data - * that may be returned by the controller. - * @log: Pointer to the buffer (@struct nvme_telemetry_log) where the log - * page data will be stored. - * - * Submits the Get Log Page command to initiate the creation of a Host Initiated - * Telemetry Log. It sets the Log Identifier (LID) to Telemetry Host and - * includes the Maximum Created Data Area (MCDA) in the Log Specific Parameter - * (LSP) field along with the Create bit. - * - * It automatically sets Retain Asynchronous Event (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_create_telemetry_host_mcda(struct nvme_transport_handle *hdl, - enum nvme_telemetry_da mcda, struct nvme_telemetry_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_create_telemetry_host_mcda(&cmd, mcda, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_create_telemetry_host() - Create the Host Initiated Telemetry - * Log (Controller Determined Size) - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_telemetry_log) where the log - * page data will be stored. - * - * Submits the Get Log Page command to initiate the creation of a Host Initiated - * Telemetry Log. This is a convenience wrapper that automatically uses the - * Controller Determined size for the Maximum Created Data Area (MCDA). - * - * It automatically sets Retain Asynchronous Event (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_create_telemetry_host(struct nvme_transport_handle *hdl, - struct nvme_telemetry_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_create_telemetry_host(&cmd, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_telemetry_host() - Retrieve the Host-Initiated - * Telemetry Log Page (Retain) - * @hdl: Transport handle for the controller. - * @lpo: Offset (in bytes) into the telemetry data to start the - * retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command to retrieve a previously captured - * Host-Initiated Telemetry Log, starting at a specified offset (@lpo). The Log - * Specific Parameter (LSP) field is set to indicate the capture should be - * retained (not deleted after read). - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous Event - * (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_telemetry_host(struct nvme_transport_handle *hdl, - __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_telemetry_host(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_telemetry_ctrl() - Retrieve the Controller-Initiated - * Telemetry Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @lpo: Offset (in bytes) into the telemetry data to start the - * retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Controller-Initiated - * Telemetry Log, allowing retrieval of data starting at a specified offset - * (@lpo). - * - * It automatically sets the Log Identifier (LID). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_telemetry_ctrl(struct nvme_transport_handle *hdl, bool rae, - __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_telemetry_ctrl(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_endurance_group() - Retrieve the Endurance Group Log Page - * @hdl: Transport handle for the controller. - * @endgid: Starting Endurance Group Identifier (ENDGID) to return in - * the list. - * @log: Pointer to the buffer (@struct nvme_endurance_group_log) where - * the log page data will be stored. - * - * This log page indicates if an Endurance Group Event has occurred for a - * particular Endurance Group. The ENDGID is placed in the Log Specific - * Identifier (LSI) field of the Get Log Page command. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_endurance_group(struct nvme_transport_handle *hdl, - __u16 endgid, struct nvme_endurance_group_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_endurance_group(&cmd, endgid, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_predictable_lat_nvmset() - Retrieve the Predictable Latency - * Per NVM Set Log Page - * @hdl: Transport handle for the controller. - * @nvmsetid: The NVM Set Identifier (NVMSETID) for which to retrieve the log. - * @log: Pointer to the buffer (@struct nvme_nvmset_predictable_lat_log) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Predictable Latency Per - * NVM Set Log. The NVMSETID is placed in the Log Specific Identifier (LSI) - * field of the command. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_predictable_lat_nvmset(struct nvme_transport_handle *hdl, - __u16 nvmsetid, struct nvme_nvmset_predictable_lat_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_predictable_lat_nvmset(&cmd, nvmsetid, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_predictable_lat_event() - Retrieve the Predictable Latency Event - * Aggregate Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Predictable Latency - * Event Aggregate Log, allowing retrieval of data starting at a specified - * offset (@lpo). - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_PREDICTABLE_LAT_AGG. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_predictable_lat_event(struct nvme_transport_handle *hdl, - bool rae, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_predictable_lat_event(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_fdp_configurations() - Retrieve the Flexible Data Placement - * (FDP) Configurations Log Page - * @hdl: Transport handle for the controller. - * @egid: Endurance Group Identifier (EGID) to return in the - * list (used in LSI). - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the FDP Configurations Log. - * The EGID is placed in the Log Specific Identifier (LSI) field. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_fdp_configurations(struct nvme_transport_handle *hdl, - __u16 egid, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_fdp_configurations(&cmd, egid, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_reclaim_unit_handle_usage() - Retrieve the FDP Reclaim Unit - * Handle (RUH) Usage Log Page - * @hdl: Transport handle for the controller. - * @egid: Endurance Group Identifier (EGID) (used in LSI). - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the FDP Reclaim Unit Handle - * Usage Log. The EGID is placed in the Log Specific Identifier (LSI) field. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_reclaim_unit_handle_usage(struct nvme_transport_handle *hdl, - __u16 egid, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_reclaim_unit_handle_usage(&cmd, egid, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_fdp_stats() - Retrieve the Flexible Data Placement (FDP) - * Statistics Log Page - * @hdl: Transport handle for the controller. - * @egid: Endurance Group Identifier (EGID) (used in LSI). - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the FDP Statistics Log. - * The EGID is placed in the Log Specific Identifier (LSI) field. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_fdp_stats(struct nvme_transport_handle *hdl, - __u16 egid, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_fdp_stats(&cmd, egid, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_fdp_events() - Retrieve the Flexible Data Placement (FDP) - * Events Log Page - * @hdl: Transport handle for the controller. - * @egid: Endurance Group Identifier (EGID) (used in LSI). - * @host_events:Whether to report host-initiated events (true) or - * controller-initiated events (false). - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the FDP Events Log. - * The EGID is placed in the Log Specific Identifier (LSI) field, and the - * @host_events flag is used to set the Log Specific Parameter (LSP) field. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_fdp_events(struct nvme_transport_handle *hdl, - __u16 egid, bool host_events, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_fdp_events(&cmd, egid, host_events, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_ana() - Retrieve the Asymmetric Namespace Access (ANA) Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @lsp: Log specific parameter, see &enum nvme_get_log_ana_lsp. - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * This log consists of a header describing the log and descriptors containing - * the ANA information for groups that contain namespaces attached to the - * controller. The @lsp parameter is placed in the Log Specific Parameter field - * of the command. - * - * See &struct nvme_ana_log for the definition of the returned structure. - * - * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_ANA. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_ana(struct nvme_transport_handle *hdl, bool rae, - enum nvme_log_ana_lsp lsp, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_ana(&cmd, lsp, lpo, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_ana_groups() - Retrieve the Asymmetric Namespace Access (ANA) - * Groups Only Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @log: Pointer to the buffer (@struct nvme_ana_log) where the log page - * data will be stored. - * @len: Length of the buffer provided in @log. - * - * This function retrieves only the ANA Group Descriptors by setting the Log - * Specific Parameter (LSP) field to NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY. It is a - * convenience wrapper around nvme_get_log_ana, using a Log Page Offset (LPO) of - * 0. - * - * See &struct nvme_ana_log for the definition of the returned structure. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_ana_groups(struct nvme_transport_handle *hdl, bool rae, - struct nvme_ana_log *log, __u32 len) -{ - return nvme_get_log_ana(hdl, rae, NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY, - 0, log, len); -} - -/** - * nvme_get_log_lba_status() - Retrieve the LBA Status Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the LBA Status Log. - * - * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_LBA_STATUS. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_lba_status(struct nvme_transport_handle *hdl, - bool rae, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_lba_status(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_endurance_grp_evt() - Retrieve the Endurance Group Event - * Aggregate Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Endurance Group Event - * Aggregate Log, allowing retrieval of data starting at a specified offset - * (@lpo). - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_ENDURANCE_GRP_EVT. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_endurance_grp_evt(struct nvme_transport_handle *hdl, - bool rae, __u64 lpo, void *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_endurance_grp_evt(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_fid_supported_effects() - Retrieve the Feature Identifiers - * Supported and Effects Log Page - * @hdl: Transport handle for the controller. - * @csi: Command set identifier, see &enum nvme_csi for known values - * @log: Pointer to the buffer (@struct nvme_fid_supported_effects_log) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Feature Identifiers - * Supported and Effects Log. It automatically sets the Log Identifier (LID). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_fid_supported_effects(struct nvme_transport_handle *hdl, - enum nvme_csi csi, struct nvme_fid_supported_effects_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_fid_supported_effects(&cmd, csi, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_mi_cmd_supported_effects() - Retrieve the Management Interface - * (MI) Commands Supported and Effects Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer - * (@struct nvme_mi_cmd_supported_effects_log) where the log page - * data will be stored. - * - * Submits the Get Log Page command specifically for the MI Commands Supported - * and Effects Log. It automatically sets the Log Identifier (LID). This command - * is typically issued with a namespace ID of 0xFFFFFFFF (NVME_NSID_NONE). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_mi_cmd_supported_effects(struct nvme_transport_handle *hdl, - struct nvme_mi_cmd_supported_effects_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_mi_cmd_supported_effects(&cmd, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_boot_partition() - Retrieve the Boot Partition Log Page - * @hdl: Transport handle for the controller. - * @lsp: The Log Specific Parameter (LSP) field for this Log - * Identifier (LID). - * @part: Pointer to the buffer (@struct nvme_boot_partition) where - * the log page data will be stored. - * @len: Length of the buffer provided in @part. - * - * Submits the Get Log Page command specifically for the Boot Partition Log. - * The LSP field is set based on the @lsp parameter. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_BOOT_PARTITION. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_boot_partition(struct nvme_transport_handle *hdl, - __u8 lsp, struct nvme_boot_partition *part, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_boot_partition(&cmd, lsp, part, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_rotational_media_info() - Retrieve the Rotational Media - * Information Log Page - * @hdl: Transport handle for the controller. - * @endgid: The Endurance Group Identifier (ENDGID) to retrieve the - * log for (used in LSI). - * @log: Pointer to the buffer (@struct nvme_rotational_media_info_log) - * where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Rotational Media - * Information Log. The ENDGID is placed in the Log Specific Identifier (LSI) - * field of the command. - * - * It automatically sets the Log Identifier (LID) and Retain Asynchronous - * Event (RAE) to false. This command is typically issued for the controller - * scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_rotational_media_info(struct nvme_transport_handle *hdl, - __u16 endgid, struct nvme_rotational_media_info_log *log, - __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_rotational_media_info(&cmd, endgid, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_dispersed_ns_participating_nss() - Retrieve the Dispersed - * Namespace Participating NVM Subsystems Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for. - * @log: Pointer to the buffer - * (@struct nvme_dispersed_ns_participating_nss_log) - * where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Dispersed Namespace - * Participating NVM Subsystems Log. It automatically sets the Log Identifier - * (LID) and Retain Asynchronous Event (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_dispersed_ns_participating_nss(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_dispersed_ns_participating_nss_log *log, - __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_dispersed_ns_participating_nss(&cmd, nsid, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_mgmt_addr_list() - Retrieve the Management Address List Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_mgmt_addr_list_log) where - * the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Management Address List - Log. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_MGMT_ADDR_LIST, Retain Asynchronous Event (RAE) to false, and - * uses NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_mgmt_addr_list(struct nvme_transport_handle *hdl, - struct nvme_mgmt_addr_list_log *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_mgmt_addr_list(&cmd, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_phy_rx_eom() - Retrieve the Physical Interface Receiver Eye - * Opening Measurement Log Page - * @hdl: Transport handle for the controller. - * @lsp: Log Specific Parameter (LSP), which controls the action - * and measurement quality. - * @controller: Target Controller ID (used in LSI). - * @log: Pointer to the buffer (@struct nvme_phy_rx_eom_log) where - * the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Physical Interface - * Receiver Eye Opening Measurement Log. The Controller ID is placed in the - * Log Specific Identifier (LSI) field. - * - * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_PHY_RX_EOM, - * and Retain Asynchronous Event (RAE) to false. This command is typically - * issued for the controller scope, thus using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_phy_rx_eom(struct nvme_transport_handle *hdl, - __u8 lsp, __u16 controller, struct nvme_phy_rx_eom_log *log, - __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_phy_rx_eom(&cmd, lsp, controller, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_reachability_groups() - Retrieve the Reachability Groups - * Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for. - * @rgo: Return Groups Only. Set to true to return only the Reachability - * Group Descriptors. - * @log: Pointer to the buffer (@struct nvme_reachability_groups_log) - * where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Reachability Groups - * Log. The @rgo parameter is placed in the Log Specific Parameter (LSP) field. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_REACHABILITY_GROUPS. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_reachability_groups(struct nvme_transport_handle *hdl, - __u32 nsid, bool rgo, struct nvme_reachability_groups_log *log, - __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_reachability_groups(&cmd, rgo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_reachability_associations() - Retrieve the Reachability - * Associations Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @rao: Return Associations Only. Set to true to return only the - * Reachability Association Descriptors. - * @log: Pointer to the buffer - * (@struct nvme_reachability_associations_log) where the log - * page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Reachability - * Associations Log. The @rao parameter is placed in the Log Specific Parameter - * (LSP) field. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_REACHABILITY_ASSOCIATIONS. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_reachability_associations(struct nvme_transport_handle *hdl, - bool rae, bool rao, - struct nvme_reachability_associations_log *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_reachability_associations(&cmd, rao, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_changed_alloc_ns_list() - Retrieve the Changed Allocated - * Namespace List Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_ns_list) where the log page - * data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Changed Allocated - * Namespace List Log. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_CHANGED_ALLOC_NS_LIST. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_changed_alloc_ns_list(struct nvme_transport_handle *hdl, - struct nvme_ns_list *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_changed_ns(&cmd, log); - - return nvme_get_log(hdl, &cmd, true, len); -} - -/** - * nvme_get_log_discovery() - Retrieve the Discovery Log Page - * @hdl: Transport handle for the controller. - * @lpo: Offset (in bytes) into the log page data to start the retrieval. - * @log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Discovery Log. - * Supported only by NVMe-oF Discovery controllers, returning discovery records. - * - * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_DISCOVERY. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_discovery(struct nvme_transport_handle *hdl, - __u64 lpo, __u32 len, void *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_discovery(&cmd, lpo, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_host_discovery() - Retrieve the Host Discovery Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @allhoste: All Host Entries. Set to true to report all host entries. - * @log: Pointer to the buffer (@struct nvme_host_discover_log) - * where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Host Discovery Log. - * The @allhoste parameter is placed in the Log Specific Parameter (LSP) field. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_HOST_DISCOVERY. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_host_discovery(struct nvme_transport_handle *hdl, - bool rae, bool allhoste, - struct nvme_host_discover_log *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_host_discovery(&cmd, allhoste, log, len); - - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_ave_discovery() - Retrieve the Asynchronous Event - * Group (AVE) Discovery Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @log: Pointer to the buffer (@struct nvme_ave_discover_log) where - * the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Asynchronous Event - * Group (AVE) Discovery Log. It automatically sets the Log Identifier (LID). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_ave_discovery(struct nvme_transport_handle *hdl, - bool rae, struct nvme_ave_discover_log *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_ave_discovery(&cmd, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_pull_model_ddc_req() - Retrieve the Pull Model DDC Request - * Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @log: Pointer to the buffer (@struct nvme_pull_model_ddc_req_log) - * where the log page data will be stored. - * @len: Length of the buffer provided in @log. - * - * Submits the Get Log Page command specifically for the Pull Model DDC Request - * Log. It automatically sets the Log Identifier (LID). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_pull_model_ddc_req(struct nvme_transport_handle *hdl, - bool rae, struct nvme_pull_model_ddc_req_log *log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_pull_model_ddc_req(&cmd, log, len); - - return nvme_get_log(hdl, &cmd, rae, len); -} - -/** - * nvme_get_log_media_unit_stat() - Retrieve the Media Unit Status Log Page - * @hdl: Transport handle for the controller. - * @domid: The Domain Identifier (DOMID) selection, if supported - * (used in LSI). - * @mus: Pointer to the buffer (@struct nvme_media_unit_stat_log) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Media Unit Status Log. - * The DOMID is placed in the Log Specific Identifier (LSI) field of the - * command. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_MEDIA_UNIT_STATUS, and Retain Asynchronous Event (RAE) to false. - * This command is typically issued for the controller scope, thus using - * NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_media_unit_stat(struct nvme_transport_handle *hdl, - __u16 domid, struct nvme_media_unit_stat_log *mus) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_media_unit_stat(&cmd, domid, mus); - - return nvme_get_log(hdl, &cmd, false, sizeof(*mus)); -} - -/** - * nvme_get_log_support_cap_config_list() - Retrieve the Supported Capacity - * Configuration List Log Page - * @hdl: Transport handle for the controller. - * @domid: The Domain Identifier (DOMID) selection, if - * supported (used in LSI). - * @cap: Pointer to the buffer - * (@struct nvme_supported_cap_config_list_log) where the log - * page data will be stored. - * - * Submits the Get Log Page command specifically for the Supported Capacity - * Configuration List Log. The DOMID is placed in the Log Specific Identifier - * (LSI) field of the command. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST, and Retain Asynchronous Event (RAE) - * to false. This command is typically issued for the controller scope, thus - * using NVME_NSID_NONE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_support_cap_config_list(struct nvme_transport_handle *hdl, - __u16 domid, struct nvme_supported_cap_config_list_log *cap) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_support_cap_config_list(&cmd, domid, cap); - - return nvme_get_log(hdl, &cmd, false, sizeof(*cap)); -} - -/** - * nvme_get_log_reservation() - Retrieve the Reservation Notification Log Page - * @hdl: Transport handle for the controller. - * @log: Pointer to the buffer (@struct nvme_resv_notification_log) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Reservation - * Notification Log. It automatically sets the Log Identifier (LID). - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_reservation(struct nvme_transport_handle *hdl, - struct nvme_resv_notification_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_reservation(&cmd, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_sanitize() - Retrieve the Sanitize Status Log Page - * @hdl: Transport handle for the controller. - * @rae: Retain asynchronous events - * @log: Pointer to the buffer (@struct nvme_sanitize_log_page) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the Sanitize Status Log. - * The log page reports sanitize operation time estimates and information about - * the most recent sanitize operation. - * - * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_SANITIZE. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_sanitize(struct nvme_transport_handle *hdl, - bool rae, struct nvme_sanitize_log_page *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_sanitize(&cmd, log); - - return nvme_get_log(hdl, &cmd, rae, sizeof(*log)); -} - -/** - * nvme_get_log_zns_changed_zones() - Retrieve the ZNS Changed Zones Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for. - * @rae: Retain asynchronous events - * @log: Pointer to the buffer (@struct nvme_zns_changed_zone_log) - * where the log page data will be stored. - * - * Submits the Get Log Page command specifically for the ZNS Changed Zones Log. - * This log lists zones that have changed state due to an exceptional event. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_ZNS_CHANGED_ZONES. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_zns_changed_zones(struct nvme_transport_handle *hdl, - __u32 nsid, bool rae, struct nvme_zns_changed_zone_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_zns_changed_zones(&cmd, nsid, log); - - return nvme_get_log(hdl, &cmd, rae, sizeof(*log)); -} - -/** - * nvme_get_log_persistent_event() - Retrieve the Persistent Event Log Page - * @hdl: Transport handle for the controller. - * @action: Action the controller should take during processing this - * command, see &enum nvme_pevent_log_action (used in LSP). - * @pevent_log: Pointer to the buffer where the log page data will be stored. - * @len: Length of the buffer provided in @pevent_log. - * - * Submits the Get Log Page command specifically for the Persistent Event Log. - * The @action parameter is placed in the Log Specific Parameter (LSP) field. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_PERSISTENT_EVENT and Retain Asynchronous Event (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_persistent_event(struct nvme_transport_handle *hdl, - enum nvme_pevent_log_action action, void *pevent_log, __u32 len) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_persistent_event(&cmd, action, pevent_log, len); - - /* - * Call the generic log execution function. - * The data length is determined by the 'len' parameter. - */ - return nvme_get_log(hdl, &cmd, false, len); -} - -/** - * nvme_get_log_lockdown() - Retrieve the Command and Feature Lockdown Log Page - * @hdl: Transport handle for the controller. - * @cnscp: Contents and Scope (CNSCP) of Command and Feature - * Identifier Lists (used in LSP). - * @log: Pointer to the buffer (@struct nvme_lockdown_log) where the log - * page data will be stored. - * - * Submits the Get Log Page command specifically for the Command and Feature - * Lockdown Log. The @cnscp parameter is placed in the Log Specific Parameter - * (LSP) field. - * - * It automatically sets the Log Identifier (LID) to - * NVME_LOG_LID_CMD_AND_FEAT_LOCKDOWN and Retain Asynchronous Event (RAE) to - * false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_lockdown(struct nvme_transport_handle *hdl, - __u8 cnscp, struct nvme_lockdown_log *log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_lockdown(&cmd, cnscp, log); - - return nvme_get_log(hdl, &cmd, false, sizeof(*log)); -} - -/** - * nvme_get_log_smart() - Retrieve the SMART / Health Information Log Page - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to request the log for. - * @smart_log: Pointer to the buffer (@struct nvme_smart_log) where the log - * page data will be stored. - * - * Submits the Get Log Page command specifically for the SMART / Health - * Information Log. It automatically sets the Log Identifier (LID) and - * Retain Asynchronous Event (RAE) to false. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_log_smart(struct nvme_transport_handle *hdl, - __u32 nsid, struct nvme_smart_log *smart_log) -{ - struct nvme_passthru_cmd cmd; - - nvme_init_get_log_smart(&cmd, nsid, smart_log); - - return nvme_get_log(hdl, &cmd, false, NVME_LOG_PAGE_PDU_SIZE); -} - -/** - * nvme_set_features() - Submit a generic Set Features command - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID sto apply the feature to. - * @fid: Feature Identifier (FID) to be set. - * @sv: Save Value (SV): If true, the feature value persists - * across power states. - * @cdw11: Command Dword 11 parameter (feature-specific). - * @cdw12: Command Dword 12 parameter (feature-specific). - * @cdw13: Command Dword 13 parameter (feature-specific). - * @uidx: UUID Index (UIDX) for the command, encoded into cdw14 - * @cdw15: Command Dword 15 parameter (feature-specific). - * @data: Pointer to the data buffer to transfer (if applicable). - * @len: Length of the data buffer in bytes. - * @result: The command completion result (CQE dword0) on success. - * - * Submits the Set Features command, allowing all standard command - * fields (cdw11-cdw15) and data buffer fields to be specified directly. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_set_features(struct nvme_transport_handle *hdl, __u32 nsid, __u8 fid, - bool sv, __u32 cdw11, __u32 cdw12, __u32 cdw13, __u8 uidx, - __u32 cdw15, void *data, __u32 len, __u64 *result) -{ - struct nvme_passthru_cmd cmd; - int err; - - nvme_init_set_features(&cmd, fid, sv); - cmd.nsid = nsid; - cmd.cdw11 = cdw11; - cmd.cdw12 = cdw12; - cmd.cdw13 = cdw13; - cmd.cdw14 = NVME_FIELD_ENCODE(uidx, - NVME_SET_FEATURES_CDW14_UUID_SHIFT, - NVME_SET_FEATURES_CDW14_UUID_MASK); - cmd.cdw15 = cdw15; - cmd.data_len = len; - cmd.addr = (__u64)(uintptr_t)data; - - err = nvme_submit_admin_passthru(hdl, &cmd); - if (result) - *result = cmd.result; - return err; -} - -/** - * nvme_set_features_simple() - Submit a Set Features command using only cdw11 - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID to apply the feature to. - * @fid: Feature Identifier (FID) to be set. - * @sv: Save Value (SV): If true, the feature value persists across - * power states. - * @cdw11: Command Dword 11 parameter (feature-specific value). - * @result: The command completion result (CQE dword0) on success. - * - * Submits the Set Features command for features that only require - * parameters in cdw11. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_set_features_simple(struct nvme_transport_handle *hdl, - __u32 nsid, __u8 fid, bool sv, __u32 cdw11, __u64 *result) -{ - struct nvme_passthru_cmd cmd; - int err; - - nvme_init_set_features(&cmd, fid, sv); - cmd.nsid = nsid; - cmd.cdw11 = cdw11; - - err = nvme_submit_admin_passthru(hdl, &cmd); - if (result) - *result = cmd.result; - return err; -} - -/** - * nvme_get_features() - Submit a Get Features command - * @hdl: Transport handle for the controller. - * @nsid: Namespace ID, if applicable - * @fid: Feature identifier, see &enum nvme_features_id - * @sel: Select which type of attribute to return, - * see &enum nvme_get_features_sel - * @cdw11: Feature specific command dword11 field - * @uidx: UUID Index for differentiating vendor specific encoding - * @data: User address of feature data, if applicable - * @len: Length of feature data, if applicable, in bytes - * @result: The command completion result (CQE dword0) on success. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_features(struct nvme_transport_handle *hdl, __u32 nsid, - __u8 fid, enum nvme_get_features_sel sel, - __u32 cdw11, __u8 uidx, void *data, - __u32 len, __u64 *result) -{ - struct nvme_passthru_cmd cmd; - int err; - - nvme_init_get_features(&cmd, fid, sel); - - cmd.nsid = nsid; - cmd.cdw11 = cdw11; - cmd.cdw14 = NVME_FIELD_ENCODE(uidx, - NVME_GET_FEATURES_CDW14_UUID_SHIFT, - NVME_GET_FEATURES_CDW14_UUID_MASK); - cmd.data_len = len; - cmd.addr = (__u64)(uintptr_t)data; - - err = nvme_submit_admin_passthru(hdl, &cmd); - if (result) - *result = cmd.result; - return err; -} - -/** - * nvme_get_features_simple() - Submit a simple Get Features command - * @hdl: Transport handle for the controller. - * @fid: Feature Identifier (FID) to be retrieved. - * @sel: Select (SEL), specifying which feature value - * to return (&struct nvme_get_features_sel). - * @result: The command completion result (CQE dword0) on success. - * - * Submits the Get Features command for features that only require parameters in - * the CQE dword0 and do not need any parameters in cdw11 through cdw15. - * - * Return: 0 on success, the NVMe command status on error, or a negative - * errno otherwise. - */ -static inline int -nvme_get_features_simple(struct nvme_transport_handle *hdl, __u8 fid, - enum nvme_get_features_sel sel, __u64 *result) -{ - struct nvme_passthru_cmd cmd; - int err; - - nvme_init_get_features(&cmd, fid, sel); - - err = nvme_submit_admin_passthru(hdl, &cmd); - if (result) - *result = cmd.result; - return err; -} - -/** - * nvme_init_mi_cmd_flags() - Initialize command flags for NVMe-MI - * @cmd: Passthru command to use - * @ish: Ignore Shutdown (for NVMe-MI command) - * - * Initializes the passthru command flags - */ -static inline void -nvme_init_mi_cmd_flags(struct nvme_passthru_cmd *cmd, bool ish) -{ - cmd->flags = NVME_FIELD_ENCODE(ish, - NVME_MI_ADMIN_CFLAGS_ISH_SHIFT, - NVME_MI_ADMIN_CFLAGS_ISH_MASK); -} - -/** - * nvme_fw_download_seq() - Firmware download sequence - * @hdl: Transport handle - * @ish: Ignore Shutdown (for NVMe-MI command) - * @size: Total size of the firmware image to transfer - * @xfer: Maximum size to send with each partial transfer - * @offset: Starting offset to send with this firmware download - * @buf: Address of buffer containing all or part of the firmware image. - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_fw_download_seq(struct nvme_transport_handle *hdl, bool ish, - __u32 size, __u32 xfer, __u32 offset, void *buf); - -/** - * nvme_set_etdas() - Set the Extended Telemetry Data Area 4 Supported bit - * @hdl: Transport handle - * @changed: boolean to indicate whether or not the host - * behavior support feature had been changed - * - * Return: The nvme command status if a response was received (see - * &enum nvme_status_field) or -1 with errno set otherwise. - */ -int nvme_set_etdas(struct nvme_transport_handle *hdl, bool *changed); - -/** - * nvme_clear_etdas() - Clear the Extended Telemetry Data Area 4 Supported bit - * @hdl: Transport handle - * @changed: boolean to indicate whether or not the host - * behavior support feature had been changed - * - * Return: The nvme command status if a response was received (see - * &enum nvme_status_field) or -1 with errno set otherwise. - */ -int nvme_clear_etdas(struct nvme_transport_handle *hdl, bool *changed); - -/** - * nvme_get_uuid_list - Returns the uuid list (if supported) - * @hdl: Transport handle - * @uuid_list: UUID list returned by identify UUID - * - * Return: The nvme command status if a response was received (see - * &enum nvme_status_field) or -1 with errno set otherwise. - */ -int nvme_get_uuid_list(struct nvme_transport_handle *hdl, - struct nvme_id_uuid_list *uuid_list); - -/** - * nvme_get_telemetry_max() - Get telemetry limits - * @hdl: Transport handle - * @da: On success return max supported data area - * @max_data_tx: On success set to max transfer chunk supported by - * the controller - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_get_telemetry_max(struct nvme_transport_handle *hdl, - enum nvme_telemetry_da *da, size_t *max_data_tx); - -/** - * nvme_get_telemetry_log() - Get specified telemetry log - * @hdl: Transport handle - * @create: Generate new host initated telemetry capture - * @ctrl: Get controller Initiated log - * @rae: Retain asynchronous events - * @max_data_tx: Set the max data transfer size to be used retrieving telemetry. - * @da: Log page data area, valid values: &enum nvme_telemetry_da. - * @log: On success, set to the value of the allocated and retrieved log. - * @size: Ptr to the telemetry log size, so it can be returned - * - * The total size allocated can be calculated as: - * (nvme_telemetry_log da size + 1) * NVME_LOG_TELEM_BLOCK_SIZE. - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_get_telemetry_log(struct nvme_transport_handle *hdl, bool create, - bool ctrl, bool rae, size_t max_data_tx, - enum nvme_telemetry_da da, struct nvme_telemetry_log **log, - size_t *size); +int nvme_get_telemetry_log(struct nvme_transport_handle *hdl, bool create, + bool ctrl, bool rae, size_t max_data_tx, + enum nvme_telemetry_da da, struct nvme_telemetry_log **log, + size_t *size); /** * nvme_get_ctrl_telemetry() - Get controller telemetry log @@ -7054,6 +5509,31 @@ int nvme_get_new_host_telemetry(struct nvme_transport_handle *hdl, size_t nvme_get_ana_log_len_from_id_ctrl(const struct nvme_id_ctrl *id_ctrl, bool rgo); +/** + * nvme_get_ana_log_atomic() - Retrieve Asymmetric Namespace Access + * log page atomically + * @hdl: Transport handle + * @rae: Whether to retain asynchronous events + * @rgo: Whether to retrieve ANA groups only (no NSIDs) + * @log: Pointer to a buffer to receive the ANA log page + * @len: Input: the length of the log page buffer. + * Output: the actual length of the ANA log page. + * @retries: The maximum number of times to retry on log page changes + * + * See &struct nvme_ana_log for the definition of the returned structure. + * + * Return: If successful, returns 0 and sets *len to the actual log page length. + * If unsuccessful, returns the nvme command status if a response was received + * (see &enum nvme_status_field) or -1 with errno set otherwise. + * Sets errno = EINVAL if retries == 0. + * Sets errno = EAGAIN if unable to read the log page atomically + * because chgcnt changed during each of the retries attempts. + * Sets errno = ENOSPC if the full log page does not fit in the provided buffer. + */ +int +nvme_get_ana_log_atomic(struct nvme_transport_handle *hdl, bool rae, bool rgo, + struct nvme_ana_log *log, __u32 *len, unsigned int retries); + /** * nvme_get_ana_log_len() - Retrieve size of the current ANA log * @hdl: Transport handle @@ -7088,125 +5568,6 @@ int nvme_get_logical_block_size(struct nvme_transport_handle *hdl, __u32 nsid, int nvme_get_lba_status_log(struct nvme_transport_handle *hdl, bool rae, struct nvme_lba_status_log **log); -/** - * nvme_namespace_attach_ctrls() - Attach namespace to controller(s) - * @hdl: Transport handle - * @ish: Ignore Shutdown (for NVMe-MI command) - * @nsid: Namespace ID to attach - * @num_ctrls: Number of controllers in ctrlist - * @ctrlist: List of controller IDs to perform the attach action - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_namespace_attach_ctrls(struct nvme_transport_handle *hdl, bool ish, - __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); - -/** - * nvme_namespace_detach_ctrls() - Detach namespace from controller(s) - * @hdl: Transport handle - * @ish: Ignore Shutdown (for NVMe-MI command) - * @nsid: Namespace ID to detach - * @num_ctrls: Number of controllers in ctrlist - * @ctrlist: List of controller IDs to perform the detach action - * - * Return: 0 on success, the nvme command status if a response was - * received (see &enum nvme_status_field) or a negative error otherwise. - */ -int nvme_namespace_detach_ctrls(struct nvme_transport_handle *hdl, bool ish, - __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); - -/** - * nvme_init_ctrl_list() - Initialize an nvme_ctrl_list structure from an array. - * @cntlist: The controller list structure to initialize - * @num_ctrls: The number of controllers in the array, &ctrlist. - * @ctrlist: An array of controller identifiers in CPU native endian. - * - * This is intended to be used with any command that takes a controller list - * argument. See nvme_ns_attach_ctrls() and nvme_ns_detach(). - */ -void nvme_init_ctrl_list(struct nvme_ctrl_list *cntlist, __u16 num_ctrls, - __u16 *ctrlist); - -/** - * nvme_init_dsm_range() - Constructs a data set range structure - * @dsm: DSM range array - * @ctx_attrs: Array of context attributes - * @llbas: Array of length in logical blocks - * @slbas: Array of starting logical blocks - * @nr_ranges: The size of the dsm arrays - * - * Each array must be the same size of size 'nr_ranges'. This is intended to be - * used with constructing a payload for nvme_dsm(). - * - * Return: The nvme command status if a response was received or -errno - * otherwise. - */ -void nvme_init_dsm_range(struct nvme_dsm_range *dsm, __u32 *ctx_attrs, - __u32 *llbas, __u64 *slbas, __u16 nr_ranges); - -/** - * nvme_init_copy_range() - Constructs a copy range structure - * @copy: Copy range array - * @nlbs: Number of logical blocks - * @slbas: Starting LBA - * @eilbrts: Expected initial logical block reference tag - * @elbatms: Expected logical block application tag mask - * @elbats: Expected logical block application tag - * @nr: Number of descriptors to construct - */ -void nvme_init_copy_range(struct nvme_copy_range *copy, __u16 *nlbs, - __u64 *slbas, __u32 *eilbrts, __u32 *elbatms, - __u32 *elbats, __u16 nr); - -/** - * nvme_init_copy_range_f1() - Constructs a copy range f1 structure - * @copy: Copy range array - * @nlbs: Number of logical blocks - * @slbas: Starting LBA - * @eilbrts: Expected initial logical block reference tag - * @elbatms: Expected logical block application tag mask - * @elbats: Expected logical block application tag - * @nr: Number of descriptors to construct - */ -void nvme_init_copy_range_f1(struct nvme_copy_range_f1 *copy, __u16 *nlbs, - __u64 *slbas, __u64 *eilbrts, __u32 *elbatms, - __u32 *elbats, __u16 nr); - -/** - * nvme_init_copy_range_f2() - Constructs a copy range f2 structure - * @copy: Copy range array - * @snsids: Source namespace identifier - * @nlbs: Number of logical blocks - * @slbas: Starting LBA - * @sopts: Source options - * @eilbrts: Expected initial logical block reference tag - * @elbatms: Expected logical block application tag mask - * @elbats: Expected logical block application tag - * @nr: Number of descriptors to construct - */ -void nvme_init_copy_range_f2(struct nvme_copy_range_f2 *copy, __u32 *snsids, - __u16 *nlbs, __u64 *slbas, __u16 *sopts, - __u32 *eilbrts, __u32 *elbatms, __u32 *elbats, - __u16 nr); - -/** - * nvme_init_copy_range_f3() - Constructs a copy range f3 structure - * @copy: Copy range array - * @snsids: Source namespace identifier - * @nlbs: Number of logical blocks - * @slbas: Starting LBA - * @sopts: Source options - * @eilbrts: Expected initial logical block reference tag - * @elbatms: Expected logical block application tag mask - * @elbats: Expected logical block application tag - * @nr: Number of descriptors to construct - */ -void nvme_init_copy_range_f3(struct nvme_copy_range_f3 *copy, __u32 *snsids, - __u16 *nlbs, __u64 *slbas, __u16 *sopts, - __u64 *eilbrts, __u32 *elbatms, __u32 *elbats, - __u16 nr); - /** * nvme_get_feature_length() - Retrieve the command payload length for a * specific feature identifier diff --git a/libnvme/src/nvme/fabrics.c b/libnvme/src/nvme/fabrics.c index fe0fb064ca..83431b23af 100644 --- a/libnvme/src/nvme/fabrics.c +++ b/libnvme/src/nvme/fabrics.c @@ -2425,9 +2425,9 @@ int nvmf_config_modify(struct nvme_global_ctx *ctx, struct nvme_ctrl *c; if (!fctx->hostnqn) - fctx->hostnqn = hnqn = nvme_hostnqn_from_file(); + fctx->hostnqn = hnqn = nvme_read_hostnqn(); if (!fctx->hostid && hnqn) - fctx->hostid = hid = nvme_hostid_from_file(); + fctx->hostid = hid = nvme_read_hostid(); h = nvme_lookup_host(ctx, fctx->hostnqn, fctx->hostid); if (!h) { @@ -2489,7 +2489,7 @@ int nvmf_nbft_read_files(struct nvme_global_ctx *ctx, char *path, snprintf(filename, sizeof(filename), "%s/%s", path, dent[i]->d_name); - ret = nvme_nbft_read(ctx, &nbft, filename); + ret = nvme_read_nbft(ctx, &nbft, filename); if (!ret) { struct nbft_file_entry *new; @@ -2516,7 +2516,7 @@ void nvmf_nbft_free(struct nvme_global_ctx *ctx, struct nbft_file_entry *head) while (head) { struct nbft_file_entry *next = head->next; - nvme_nbft_free(ctx, head->nbft); + nvme_free_nbft(ctx, head->nbft); free(head); head = next; @@ -3041,7 +3041,7 @@ int nvmf_discovery(struct nvme_global_ctx *ctx, struct nvmf_context *fctx, ret = nvme_scan_ctrl(ctx, fctx->device, &c); if (!ret) { /* Check if device matches command-line options */ - if (!nvme_ctrl_config_match(c, fctx->transport, + if (!nvme_ctrl_match_config(c, fctx->transport, fctx->traddr, fctx->trsvcid, fctx->subsysnqn, fctx->host_traddr, fctx->host_iface)) { diff --git a/libnvme/src/nvme/filters.c b/libnvme/src/nvme/filters.c index de6915ed86..a3fb4cbfc6 100644 --- a/libnvme/src/nvme/filters.c +++ b/libnvme/src/nvme/filters.c @@ -14,7 +14,7 @@ #include "private.h" -int nvme_namespace_filter(const struct dirent *d) +int nvme_filter_namespace(const struct dirent *d) { int i, n; @@ -28,7 +28,7 @@ int nvme_namespace_filter(const struct dirent *d) return 0; } -int nvme_paths_filter(const struct dirent *d) +int nvme_filter_paths(const struct dirent *d) { int i, c, n; @@ -42,7 +42,7 @@ int nvme_paths_filter(const struct dirent *d) return 0; } -int nvme_ctrls_filter(const struct dirent *d) +int nvme_filter_ctrls(const struct dirent *d) { int i, c, n; @@ -61,7 +61,7 @@ int nvme_ctrls_filter(const struct dirent *d) return 0; } -int nvme_subsys_filter(const struct dirent *d) +int nvme_filter_subsys(const struct dirent *d) { int i; @@ -80,7 +80,7 @@ int nvme_scan_subsystems(struct dirent ***subsys) const char *dir = nvme_subsys_sysfs_dir(); int ret; - ret = scandir(dir, subsys, nvme_subsys_filter, alphasort); + ret = scandir(dir, subsys, nvme_filter_subsys, alphasort); if (ret < 0) return -errno; @@ -92,7 +92,7 @@ int nvme_scan_subsystem_namespaces(nvme_subsystem_t s, struct dirent ***ns) int ret; ret = scandir(nvme_subsystem_get_sysfs_dir(s), ns, - nvme_namespace_filter, alphasort); + nvme_filter_namespace, alphasort); if (ret < 0) return -errno; @@ -104,7 +104,7 @@ int nvme_scan_ctrls(struct dirent ***ctrls) const char *dir = nvme_ctrl_sysfs_dir(); int ret; - ret = scandir(dir, ctrls, nvme_ctrls_filter, alphasort); + ret = scandir(dir, ctrls, nvme_filter_ctrls, alphasort); if (ret < 0) return -errno; @@ -116,7 +116,7 @@ int nvme_scan_ctrl_namespace_paths(nvme_ctrl_t c, struct dirent ***paths) int ret; ret = scandir(nvme_ctrl_get_sysfs_dir(c), paths, - nvme_paths_filter, alphasort); + nvme_filter_paths, alphasort); if (ret < 0) return -errno; @@ -128,7 +128,7 @@ int nvme_scan_ctrl_namespaces(nvme_ctrl_t c, struct dirent ***ns) int ret; ret = scandir(nvme_ctrl_get_sysfs_dir(c), ns, - nvme_namespace_filter, alphasort); + nvme_filter_namespace, alphasort); if (ret < 0) return -errno; @@ -140,7 +140,7 @@ int nvme_scan_ns_head_paths(nvme_ns_head_t head, struct dirent ***paths) int ret; ret = scandir(nvme_ns_head_get_sysfs_dir(head), paths, - nvme_paths_filter, alphasort); + nvme_filter_paths, alphasort); if (ret < 0) return -errno; diff --git a/libnvme/src/nvme/filters.h b/libnvme/src/nvme/filters.h index 44f26ab2b3..05fd02a6f6 100644 --- a/libnvme/src/nvme/filters.h +++ b/libnvme/src/nvme/filters.h @@ -20,36 +20,36 @@ */ /** - * nvme_namespace_filter() - Filter for namespaces + * nvme_filter_namespace() - Filter for namespaces * @d: dirent to check * * Return: 1 if @d matches, 0 otherwise */ -int nvme_namespace_filter(const struct dirent *d); +int nvme_filter_namespace(const struct dirent *d); /** - * nvme_paths_filter() - Filter for paths + * nvme_filter_paths() - Filter for paths * @d: dirent to check * * Return: 1 if @d matches, 0 otherwise */ -int nvme_paths_filter(const struct dirent *d); +int nvme_filter_paths(const struct dirent *d); /** - * nvme_ctrls_filter() - Filter for controllers + * nvme_filter_ctrls() - Filter for controllers * @d: dirent to check * * Return: 1 if @d matches, 0 otherwise */ -int nvme_ctrls_filter(const struct dirent *d); +int nvme_filter_ctrls(const struct dirent *d); /** - * nvme_subsys_filter() - Filter for subsystems + * nvme_filter_subsys() - Filter for subsystems * @d: dirent to check * * Return: 1 if @d matches, 0 otherwise */ -int nvme_subsys_filter(const struct dirent *d); +int nvme_filter_subsys(const struct dirent *d); /** * nvme_scan_subsystems() - Scan for subsystems diff --git a/libnvme/src/nvme/ioctl.c b/libnvme/src/nvme/ioctl.c index 324bf7c569..a742cd6304 100644 --- a/libnvme/src/nvme/ioctl.c +++ b/libnvme/src/nvme/ioctl.c @@ -16,10 +16,6 @@ #include #include -#ifdef CONFIG_LIBURING -#include -#endif - #include #include #include @@ -41,7 +37,7 @@ static int nvme_verify_chr(struct nvme_transport_handle *hdl) return 0; } -int nvme_subsystem_reset(struct nvme_transport_handle *hdl) +int nvme_reset_subsystem(struct nvme_transport_handle *hdl) { int ret; @@ -55,7 +51,7 @@ int nvme_subsystem_reset(struct nvme_transport_handle *hdl) return ret; } -int nvme_ctrl_reset(struct nvme_transport_handle *hdl) +int nvme_reset_ctrl(struct nvme_transport_handle *hdl) { int ret; @@ -69,7 +65,7 @@ int nvme_ctrl_reset(struct nvme_transport_handle *hdl) return ret; } -int nvme_ns_rescan(struct nvme_transport_handle *hdl) +int nvme_rescan_ns(struct nvme_transport_handle *hdl) { int ret; @@ -202,338 +198,3 @@ int nvme_submit_admin_passthru(struct nvme_transport_handle *hdl, return -ENOTSUP; } - -static bool force_4k; - -__attribute__((constructor)) -static void nvme_init_env(void) -{ - char *val; - - val = getenv("LIBNVME_FORCE_4K"); - if (!val) - return; - if (!strcmp(val, "1") || - !strcasecmp(val, "true") || - !strncasecmp(val, "enable", 6)) - force_4k = true; -} - -#ifdef CONFIG_LIBURING -enum { - IO_URING_NOT_AVAILABLE, - IO_URING_AVAILABLE, -} io_uring_kernel_support = IO_URING_NOT_AVAILABLE; - -/* - * gcc specific attribute, call automatically on the library loading. - * if IORING_OP_URING_CMD is not supported, fallback to ioctl interface. - * - * The uring API expects the command of type struct nvme_passthru_cmd64. - */ -__attribute__((constructor)) -static void nvme_uring_cmd_probe() -{ - struct io_uring_probe *probe = io_uring_get_probe(); - - if (!probe) - return; - - if (!io_uring_opcode_supported(probe, IORING_OP_URING_CMD)) - return; - - io_uring_kernel_support = IO_URING_AVAILABLE; -} - -static int nvme_uring_cmd_setup(struct io_uring *ring) -{ - if (io_uring_queue_init(NVME_URING_ENTRIES, ring, - IORING_SETUP_SQE128 | IORING_SETUP_CQE32)) - return -errno; - return 0; -} - -static void nvme_uring_cmd_exit(struct io_uring *ring) -{ - io_uring_queue_exit(ring); -} - -static int nvme_uring_cmd_admin_passthru_async(struct nvme_transport_handle *hdl, - struct io_uring *ring, struct nvme_passthru_cmd *cmd) -{ - struct io_uring_sqe *sqe; - int ret; - - sqe = io_uring_get_sqe(ring); - if (!sqe) - return -1; - - memcpy(&sqe->cmd, cmd, sizeof(*cmd)); - - sqe->fd = hdl->fd; - sqe->opcode = IORING_OP_URING_CMD; - sqe->cmd_op = NVME_URING_CMD_ADMIN; - - ret = io_uring_submit(ring); - if (ret < 0) - return -errno; - - return 0; -} - -static int nvme_uring_cmd_wait_complete(struct io_uring *ring, int n) -{ - struct io_uring_cqe *cqe; - int ret, i; - - for (i = 0; i < n; i++) { - ret = io_uring_wait_cqe(ring, &cqe); - if (ret < 0) - return -errno; - io_uring_cqe_seen(ring, cqe); - } - - return 0; -} - -static bool nvme_uring_is_usable(struct nvme_transport_handle *hdl) -{ - struct stat st; - - if (io_uring_kernel_support != IO_URING_AVAILABLE || - hdl->type != NVME_TRANSPORT_HANDLE_TYPE_DIRECT || - fstat(hdl->fd, &st) || !S_ISCHR(st.st_mode)) - return false; - - return true; -} -#endif /* CONFIG_LIBURING */ - -int nvme_get_log(struct nvme_transport_handle *hdl, - struct nvme_passthru_cmd *cmd, bool rae, - __u32 xfer_len) -{ - __u64 offset = 0, xfer, data_len = cmd->data_len; - __u64 start = (__u64)cmd->cdw13 << 32 | cmd->cdw12; - __u64 lpo; - void *ptr = (void *)(uintptr_t)cmd->addr; - int ret; - bool _rae; - __u32 numd; - __u16 numdu, numdl; - __u32 cdw10 = cmd->cdw10 & (NVME_VAL(LOG_CDW10_LID) | - NVME_VAL(LOG_CDW10_LSP)); - __u32 cdw11 = cmd->cdw11 & NVME_VAL(LOG_CDW11_LSI); -#ifdef CONFIG_LIBURING - bool use_uring = nvme_uring_is_usable(hdl); - struct io_uring ring; - int n = 0; - - if (use_uring) { - ret = nvme_uring_cmd_setup(&ring); - if (ret) - return ret; - } -#endif /* CONFIG_LIBURING */ - - if (force_4k) - xfer_len = NVME_LOG_PAGE_PDU_SIZE; - - /* - * 4k is the smallest possible transfer unit, so restricting to 4k - * avoids having to check the MDTS value of the controller. - */ - do { - if (!force_4k) { - xfer = data_len - offset; - if (xfer > xfer_len) - xfer = xfer_len; - } else { - xfer = NVME_LOG_PAGE_PDU_SIZE; - } - - /* - * Always retain regardless of the RAE parameter until the very - * last portion of this log page so the data remains latched - * during the fetch sequence. - */ - lpo = start + offset; - numd = (xfer >> 2) - 1; - numdu = numd >> 16; - numdl = numd & 0xffff; - _rae = offset + xfer < data_len || rae; - - cmd->cdw10 = cdw10 | - NVME_SET(!!_rae, LOG_CDW10_RAE) | - NVME_SET(numdl, LOG_CDW10_NUMDL); - cmd->cdw11 = cdw11 | - NVME_SET(numdu, LOG_CDW11_NUMDU); - cmd->cdw12 = lpo & 0xffffffff; - cmd->cdw13 = lpo >> 32; - cmd->data_len = xfer; - cmd->addr = (__u64)(uintptr_t)ptr; - -#ifdef CONFIG_LIBURING - if (use_uring) { - if (n >= NVME_URING_ENTRIES) { - ret = nvme_uring_cmd_wait_complete(&ring, n); - if (ret) - goto uring_exit; - n = 0; - } - n += 1; - ret = nvme_uring_cmd_admin_passthru_async(hdl, - &ring, cmd); - if (ret) - goto uring_exit; - } else { - ret = nvme_submit_admin_passthru(hdl, cmd); - if (ret) - return ret; - } -#else /* CONFIG_LIBURING */ - ret = nvme_submit_admin_passthru(hdl, cmd); -#endif /* CONFIG_LIBURING */ - if (ret) - return ret; - - offset += xfer; - ptr += xfer; - } while (offset < data_len); - -#ifdef CONFIG_LIBURING - if (use_uring) { - ret = nvme_uring_cmd_wait_complete(&ring, n); -uring_exit: - nvme_uring_cmd_exit(&ring); - if (ret) - return ret; - } -#endif /* CONFIG_LIBURING */ - - return 0; -} - -static int read_ana_chunk(struct nvme_transport_handle *hdl, enum nvme_log_ana_lsp lsp, bool rae, - __u8 *log, __u8 **read, __u8 *to_read, __u8 *log_end) -{ - struct nvme_passthru_cmd cmd; - - if (to_read > log_end) - return -ENOSPC; - - while (*read < to_read) { - __u32 len = min_t(__u32, log_end - *read, NVME_LOG_PAGE_PDU_SIZE); - int ret; - - nvme_init_get_log_ana(&cmd, lsp, *read - log, *read, len); - ret = nvme_get_log(hdl, &cmd, rae, NVME_LOG_PAGE_PDU_SIZE); - if (ret) - return ret; - - *read += len; - } - return 0; -} - -static int try_read_ana(struct nvme_transport_handle *hdl, enum nvme_log_ana_lsp lsp, bool rae, - struct nvme_ana_log *log, __u8 *log_end, - __u8 *read, __u8 **to_read, bool *may_retry) -{ - __u16 ngrps = le16_to_cpu(log->ngrps); - - while (ngrps--) { - __u8 *group = *to_read; - int ret; - __le32 nnsids; - - *to_read += sizeof(*log->descs); - ret = read_ana_chunk(hdl, lsp, rae, - (__u8 *)log, &read, *to_read, log_end); - if (ret) { - /* - * If the provided buffer isn't long enough, - * the log page may have changed while reading it - * and the computed length was inaccurate. - * Have the caller check chgcnt and retry. - */ - *may_retry = ret == -ENOSPC; - return ret; - } - - /* - * struct nvme_ana_group_desc has 8-byte alignment - * but the group pointer is only 4-byte aligned. - * Don't dereference the misaligned pointer. - */ - memcpy(&nnsids, - group + offsetof(struct nvme_ana_group_desc, nnsids), - sizeof(nnsids)); - *to_read += le32_to_cpu(nnsids) * sizeof(__le32); - ret = read_ana_chunk(hdl, lsp, rae, - (__u8 *)log, &read, *to_read, log_end); - if (ret) { - *may_retry = ret == -ENOSPC; - return ret; - } - } - - *may_retry = true; - return 0; -} - -int nvme_get_ana_log_atomic(struct nvme_transport_handle *hdl, bool rae, bool rgo, - struct nvme_ana_log *log, __u32 *len, - unsigned int retries) -{ - const enum nvme_log_ana_lsp lsp = - rgo ? NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY : 0; - /* Get Log Page can only fetch multiples of dwords */ - __u8 * const log_end = (__u8 *)log + (*len & -4); - __u8 *read = (__u8 *)log; - __u8 *to_read; - int ret; - - if (!retries) - return -EINVAL; - - to_read = (__u8 *)log->descs; - ret = read_ana_chunk(hdl, lsp, rae, - (__u8 *)log, &read, to_read, log_end); - if (ret) - return ret; - - do { - bool may_retry = false; - int saved_ret; - int saved_errno; - __le64 chgcnt; - - saved_ret = try_read_ana(hdl, lsp, rae, log, log_end, - read, &to_read, &may_retry); - /* - * If the log page was read with multiple Get Log Page commands, - * chgcnt must be checked afterwards to ensure atomicity - */ - *len = to_read - (__u8 *)log; - if (*len <= NVME_LOG_PAGE_PDU_SIZE || !may_retry) - return saved_ret; - - saved_errno = errno; - chgcnt = log->chgcnt; - read = (__u8 *)log; - to_read = (__u8 *)log->descs; - ret = read_ana_chunk(hdl, lsp, rae, - (__u8 *)log, &read, to_read, log_end); - if (ret) - return ret; - - if (log->chgcnt == chgcnt) { - /* Log hasn't changed; return try_read_ana() result */ - errno = saved_errno; - return saved_ret; - } - } while (--retries); - - return -EAGAIN; -} diff --git a/libnvme/src/nvme/ioctl.h b/libnvme/src/nvme/ioctl.h index 7c002d1852..197c94cd4b 100644 --- a/libnvme/src/nvme/ioctl.h +++ b/libnvme/src/nvme/ioctl.h @@ -26,11 +26,6 @@ */ #define NVME_LOG_PAGE_PDU_SIZE 4096 -/* - * should not exceed CAP.MQES, 16 is rational for most ssd - */ -#define NVME_URING_ENTRIES 16 - /** * nvme_submit_admin_passthru() - Submit an nvme passthrough admin command * @hdl: Transport handle @@ -58,7 +53,7 @@ int nvme_submit_io_passthru(struct nvme_transport_handle *hdl, struct nvme_passthru_cmd *cmd); /** - * nvme_subsystem_reset() - Initiate a subsystem reset + * nvme_reset_subsystem() - Initiate a subsystem reset * @hdl: Transport handle * * This should only be sent to controller handles, not to namespaces. @@ -66,27 +61,27 @@ int nvme_submit_io_passthru(struct nvme_transport_handle *hdl, * Return: Zero if a subsystem reset was initiated or -1 with errno set * otherwise. */ -int nvme_subsystem_reset(struct nvme_transport_handle *hdl); +int nvme_reset_subsystem(struct nvme_transport_handle *hdl); /** - * nvme_ctrl_reset() - Initiate a controller reset + * nvme_reset_ctrl() - Initiate a controller reset * @hdl: Transport handle * * This should only be sent to controller handles, not to namespaces. * * Return: 0 if a reset was initiated or -1 with errno set otherwise. */ -int nvme_ctrl_reset(struct nvme_transport_handle *hdl); +int nvme_reset_ctrl(struct nvme_transport_handle *hdl); /** - * nvme_ns_rescan() - Initiate a controller rescan + * nvme_rescan_ns() - Initiate a controller rescan * @hdl: Transport handle * * This should only be sent to controller handles, not to namespaces. * * Return: 0 if a rescan was initiated or -1 with errno set otherwise. */ -int nvme_ns_rescan(struct nvme_transport_handle *hdl); +int nvme_rescan_ns(struct nvme_transport_handle *hdl); /** * nvme_get_nsid() - Retrieve the NSID from a namespace file descriptor diff --git a/libnvme/src/nvme/lib.c b/libnvme/src/nvme/lib.c index 856c2226da..c9fdd2db64 100644 --- a/libnvme/src/nvme/lib.c +++ b/libnvme/src/nvme/lib.c @@ -80,6 +80,7 @@ void nvme_free_global_ctx(struct nvme_global_ctx *ctx) nvme_mi_close(ep); free(ctx->config_file); free(ctx->application); + nvme_close_uring(ctx); free(ctx); } @@ -155,6 +156,11 @@ static int __nvme_transport_handle_open_direct( if (c) { if (!S_ISCHR(hdl->stat.st_mode)) return -EINVAL; + ret = __nvme_transport_handle_open_uring(hdl); + if (ret && ret != -ENOTSUP) { + close(hdl->fd); + return ret; + } } else if (!S_ISBLK(hdl->stat.st_mode)) { return -EINVAL; } diff --git a/libnvme/src/nvme/linux.c b/libnvme/src/nvme/linux.c index 80ce135ca1..6f58507af9 100644 --- a/libnvme/src/nvme/linux.c +++ b/libnvme/src/nvme/linux.c @@ -1723,7 +1723,7 @@ static int uuid_from_dmi(char *system_uuid) return ret; } -char *nvme_hostid_generate() +char *nvme_generate_hostid(void) { int ret; char uuid_str[NVME_UUID_LEN_STRING]; @@ -1733,7 +1733,7 @@ char *nvme_hostid_generate() if (ret < 0) ret = uuid_from_device_tree(uuid_str); if (ret < 0) { - if (nvme_uuid_random(uuid) < 0) + if (nvme_random_uuid(uuid) < 0) memset(uuid, 0, NVME_UUID_LEN); nvme_uuid_to_string(uuid, uuid_str); } @@ -1741,14 +1741,14 @@ char *nvme_hostid_generate() return strdup(uuid_str); } -char *nvme_hostnqn_generate_from_hostid(char *hostid) +char *nvme_generate_hostnqn_from_hostid(char *hostid) { char *hid = NULL; char *hostnqn; int ret; if (!hostid) - hostid = hid = nvme_hostid_generate(); + hostid = hid = nvme_generate_hostid(); ret = asprintf(&hostnqn, "nqn.2014-08.org.nvmexpress:uuid:%s", hostid); free(hid); @@ -1756,9 +1756,9 @@ char *nvme_hostnqn_generate_from_hostid(char *hostid) return (ret < 0) ? NULL : hostnqn; } -char *nvme_hostnqn_generate() +char *nvme_generate_hostnqn(void) { - return nvme_hostnqn_generate_from_hostid(NULL); + return nvme_generate_hostnqn_from_hostid(NULL); } static char *nvmf_read_file(const char *f, int len) @@ -1779,7 +1779,7 @@ static char *nvmf_read_file(const char *f, int len) return strndup(buf, strcspn(buf, "\n")); } -char *nvme_hostnqn_from_file() +char *nvme_read_hostnqn(void) { char *hostnqn = getenv("LIBNVME_HOSTNQN"); @@ -1792,7 +1792,7 @@ char *nvme_hostnqn_from_file() return nvmf_read_file(NVMF_HOSTNQN_FILE, NVMF_NQN_SIZE); } -char *nvme_hostid_from_file() +char *nvme_read_hostid(void) { char *hostid = getenv("LIBNVME_HOSTID"); diff --git a/libnvme/src/nvme/linux.h b/libnvme/src/nvme/linux.h index 6b9da6ae17..2816d4bef8 100644 --- a/libnvme/src/nvme/linux.h +++ b/libnvme/src/nvme/linux.h @@ -395,14 +395,14 @@ int nvme_import_tls_key_versioned(struct nvme_global_ctx *ctx, unsigned char *hmac, size_t *key_len, unsigned char **key); /** - * nvme_hostnqn_generate() - Generate a machine specific host nqn + * nvme_generate_hostnqn() - Generate a machine specific host nqn * Returns: An nvm namespace qualified name string based on the machine * identifier, or NULL if not successful. */ -char *nvme_hostnqn_generate(); +char *nvme_generate_hostnqn(void); /** - * nvme_hostnqn_generate_from_hostid() - Generate a host nqn from + * nvme_generate_hostnqn_from_hostid() - Generate a host nqn from * host identifier * @hostid: Host identifier * @@ -412,18 +412,18 @@ char *nvme_hostnqn_generate(); * Return: On success, an NVMe Qualified Name for host identification. This * name is based on the given host identifier. On failure, NULL. */ -char *nvme_hostnqn_generate_from_hostid(char *hostid); +char *nvme_generate_hostnqn_from_hostid(char *hostid); /** - * nvme_hostid_generate() - Generate a machine specific host identifier + * nvme_generate_hostid() - Generate a machine specific host identifier * * Return: On success, an identifier string based on the machine identifier to * be used as NVMe Host Identifier, or NULL on failure. */ -char *nvme_hostid_generate(); +char *nvme_generate_hostid(void); /** - * nvme_hostnqn_from_file() - Reads the host nvm qualified name from the config + * nvme_read_hostnqn() - Reads the host nvm qualified name from the config * default location * * Retrieve the qualified name from the config file located in $SYSCONFDIR/nvme. @@ -432,10 +432,10 @@ char *nvme_hostid_generate(); * Return: The host nqn, or NULL if unsuccessful. If found, the caller * is responsible to free the string. */ -char *nvme_hostnqn_from_file(); +char *nvme_read_hostnqn(void); /** - * nvme_hostid_from_file() - Reads the host identifier from the config default + * nvme_read_hostid() - Reads the host identifier from the config default * location * * Retrieve the host idenditifer from the config file located in @@ -444,4 +444,4 @@ char *nvme_hostnqn_from_file(); * Return: The host identifier, or NULL if unsuccessful. If found, the caller * is responsible to free the string. */ -char *nvme_hostid_from_file(); +char *nvme_read_hostid(void); diff --git a/libnvme/src/nvme/nbft.c b/libnvme/src/nvme/nbft.c index 9dd0fb676b..2baa4d4e52 100644 --- a/libnvme/src/nvme/nbft.c +++ b/libnvme/src/nvme/nbft.c @@ -711,7 +711,7 @@ static int parse_raw_nbft(struct nvme_global_ctx *ctx, struct nbft_info *nbft) return 0; } -void nvme_nbft_free(struct nvme_global_ctx *ctx, struct nbft_info *nbft) +void nvme_free_nbft(struct nvme_global_ctx *ctx, struct nbft_info *nbft) { struct nbft_info_hfi **hfi; struct nbft_info_security **sec; @@ -737,7 +737,7 @@ void nvme_nbft_free(struct nvme_global_ctx *ctx, struct nbft_info *nbft) free(nbft); } -int nvme_nbft_read(struct nvme_global_ctx *ctx, struct nbft_info **nbft, +int nvme_read_nbft(struct nvme_global_ctx *ctx, struct nbft_info **nbft, const char *filename) { __u8 *raw_nbft = NULL; @@ -801,7 +801,7 @@ int nvme_nbft_read(struct nvme_global_ctx *ctx, struct nbft_info **nbft, if (parse_raw_nbft(ctx, *nbft)) { nvme_msg(ctx, LOG_ERR, "Failed to parse %s\n", filename); - nvme_nbft_free(ctx, *nbft); + nvme_free_nbft(ctx, *nbft); return -EINVAL; } return 0; diff --git a/libnvme/src/nvme/nbft.h b/libnvme/src/nvme/nbft.h index 16d1606c7a..48d60ffb1b 100644 --- a/libnvme/src/nvme/nbft.h +++ b/libnvme/src/nvme/nbft.h @@ -1234,26 +1234,26 @@ struct nbft_info { }; /** - * nvme_nbft_read() - Read and parse contents of an ACPI NBFT table + * nvme_read_nbft() - Read and parse contents of an ACPI NBFT table * * @ctx: struct nvme_global_ctx object * @nbft: Parsed NBFT table data. * @filename: Filename of the raw NBFT table to read. * * Read and parse the specified NBFT file into a struct nbft_info. - * Free with nvme_nbft_free(). + * Free with nvme_free_nbft(). * * Return: 0 on success, errno otherwise. */ -int nvme_nbft_read(struct nvme_global_ctx *ctx, struct nbft_info **nbft, +int nvme_read_nbft(struct nvme_global_ctx *ctx, struct nbft_info **nbft, const char *filename); /** - * nvme_nbft_free() - Free the struct nbft_info and its contents + * nvme_free_nbft() - Free the struct nbft_info and its contents * @ctx: struct nvme_global_ctx object * @nbft: Parsed NBFT table data. */ -void nvme_nbft_free(struct nvme_global_ctx *ctx, struct nbft_info *nbft); +void nvme_free_nbft(struct nvme_global_ctx *ctx, struct nbft_info *nbft); /** * struct nbft_file_entry - Linked list entry for NBFT files diff --git a/libnvme/src/nvme/private.h b/libnvme/src/nvme/private.h index a85c5c177d..a1168389a5 100644 --- a/libnvme/src/nvme/private.h +++ b/libnvme/src/nvme/private.h @@ -7,6 +7,7 @@ */ #pragma once +#include #include #include @@ -117,6 +118,7 @@ struct nvme_transport_handle { struct stat stat; bool ioctl_admin64; bool ioctl_io64; + bool uring_enabled; /* mi */ struct nvme_mi_ep *ep; @@ -278,6 +280,12 @@ struct nvme_fabric_options { bool trsvcid; }; +enum nvme_io_uring_state { + NVME_IO_URING_STATE_UNKNOWN = 0, + NVME_IO_URING_STATE_NOT_AVAILABLE, + NVME_IO_URING_STATE_AVAILABLE, +}; + struct nvme_global_ctx { char *config_file; char *application; @@ -290,6 +298,12 @@ struct nvme_global_ctx { bool dry_run; struct nvme_fabric_options *options; struct ifaddrs *ifaddrs_cache; /* init with nvme_getifaddrs() */ + + enum nvme_io_uring_state uring_state; +#ifdef CONFIG_LIBURING + int ring_cmds; + struct io_uring *ring; +#endif }; struct nvmf_discovery_ctx { @@ -780,3 +794,40 @@ void nvme_ns_release_transport_handle(nvme_ns_t n); */ int nvme_mi_admin_admin_passthru(struct nvme_transport_handle *hdl, struct nvme_passthru_cmd *cmd); + +#ifdef CONFIG_LIBURING +int nvme_open_uring(struct nvme_global_ctx *ctx); +void nvme_close_uring(struct nvme_global_ctx *ctx); +int __nvme_transport_handle_open_uring(struct nvme_transport_handle *hdl); +int nvme_submit_admin_passthru_async(struct nvme_transport_handle *hdl, + struct nvme_passthru_cmd *cmd); +int nvme_wait_complete_passthru(struct nvme_transport_handle *hdl); +#else +static inline int +nvme_open_uring(struct nvme_global_ctx *ctx) +{ + return -ENOTSUP; +} +static inline void +nvme_close_uring(struct nvme_global_ctx *ctx) +{ +} +static inline int +__nvme_transport_handle_open_uring(struct nvme_transport_handle *hdl) +{ + hdl->ctx->uring_state = NVME_IO_URING_STATE_NOT_AVAILABLE; + return -ENOTSUP; +} +static inline int +nvme_submit_admin_passthru_async(struct nvme_transport_handle *hdl, + struct nvme_passthru_cmd *cmd) +{ + return -ENOTSUP; +} +static inline int +nvme_wait_complete_passthru(struct nvme_transport_handle *hdl) +{ + return -ENOTSUP; +} +#endif + diff --git a/libnvme/src/nvme/tree.c b/libnvme/src/nvme/tree.c index fc207aea5c..7c04099405 100644 --- a/libnvme/src/nvme/tree.c +++ b/libnvme/src/nvme/tree.c @@ -144,9 +144,9 @@ int nvme_host_get_ids(struct nvme_global_ctx *ctx, /* /etc/nvme/hostid and/or /etc/nvme/hostnqn */ if (!hid) - hid = nvme_hostid_from_file(); + hid = nvme_read_hostid(); if (!hnqn) - hnqn = nvme_hostnqn_from_file(); + hnqn = nvme_read_hostnqn(); /* incomplete configuration, thus derive hostid from hostnqn */ if (!hid && hnqn) @@ -157,7 +157,7 @@ int nvme_host_get_ids(struct nvme_global_ctx *ctx, * fails generate one */ if (!hid) { - hid = nvme_hostid_generate(); + hid = nvme_generate_hostid(); if (!hid) return -ENOMEM; @@ -167,7 +167,7 @@ int nvme_host_get_ids(struct nvme_global_ctx *ctx, /* incomplete configuration, thus derive hostnqn from hostid */ if (!hnqn) { - hnqn = nvme_hostnqn_generate_from_hostid(hid); + hnqn = nvme_generate_hostnqn_from_hostid(hid); if (!hnqn) return -ENOMEM; } @@ -188,7 +188,7 @@ int nvme_host_get_ids(struct nvme_global_ctx *ctx, return 0; } -int nvme_host_get(struct nvme_global_ctx *ctx, const char *hostnqn, +int nvme_get_host(struct nvme_global_ctx *ctx, const char *hostnqn, const char *hostid, nvme_host_t *host) { _cleanup_free_ char *hnqn = NULL; @@ -592,7 +592,7 @@ struct nvme_subsystem *nvme_lookup_subsystem(struct nvme_host *h, return nvme_alloc_subsystem(h, name, subsysnqn); } -int nvme_subsystem_get(struct nvme_global_ctx *ctx, +int nvme_get_subsystem(struct nvme_global_ctx *ctx, struct nvme_host *h, const char *name, const char *subsysnqn, struct nvme_subsystem **subsys) { @@ -779,7 +779,7 @@ static int nvme_scan_subsystem(struct nvme_global_ctx *ctx, const char *name) */ nvme_msg(ctx, LOG_DEBUG, "creating detached subsystem '%s'\n", name); - ret = nvme_host_get(ctx, NULL, NULL, &h); + ret = nvme_get_host(ctx, NULL, NULL, &h); if (ret) return ret; s = nvme_alloc_subsystem(h, name, subsysnqn); @@ -1525,7 +1525,7 @@ nvme_ctrl_t __nvme_lookup_ctrl(nvme_subsystem_t s, const char *transport, return matching_c; } -bool nvme_ctrl_config_match(struct nvme_ctrl *c, const char *transport, +bool nvme_ctrl_match_config(struct nvme_ctrl *c, const char *transport, const char *traddr, const char *trsvcid, const char *subsysnqn, const char *host_traddr, const char *host_iface) @@ -1981,7 +1981,7 @@ int nvme_scan_ctrl(struct nvme_global_ctx *ctx, const char *name, hostnqn = nvme_get_attr(path, "hostnqn"); hostid = nvme_get_attr(path, "hostid"); - ret = nvme_host_get(ctx, hostnqn, hostid, &h); + ret = nvme_get_host(ctx, hostnqn, hostid, &h); if (ret) return ret; @@ -2294,13 +2294,15 @@ int nvme_ns_compare(nvme_ns_t n, void *buf, off_t offset, size_t count) int nvme_ns_flush(nvme_ns_t n) { struct nvme_transport_handle *hdl; + struct nvme_passthru_cmd cmd; int err; err = nvme_ns_get_transport_handle(n, &hdl); if (err) return err; - return nvme_flush(hdl, nvme_ns_get_nsid(n)); + nvme_init_flush(&cmd, nvme_ns_get_nsid(n)); + return nvme_submit_io_passthru(hdl, &cmd); } static int nvme_strtou64(const char *str, void *res) diff --git a/libnvme/src/nvme/tree.h b/libnvme/src/nvme/tree.h index 3ced214d1d..d553e97c97 100644 --- a/libnvme/src/nvme/tree.h +++ b/libnvme/src/nvme/tree.h @@ -114,7 +114,7 @@ void nvme_host_set_pdc_enabled(nvme_host_t h, bool enabled); bool nvme_host_is_pdc_enabled(nvme_host_t h, bool fallback); /** - * nvme_host_get() - Returns a host object + * nvme_get_host() - Returns a host object * @ctx: struct nvme_global_ctx object * @hostnqn: Host NQN (optional) * @hostid: Host ID (optional) @@ -125,7 +125,7 @@ bool nvme_host_is_pdc_enabled(nvme_host_t h, bool fallback); * * Return: 0 on success or negative error code otherwise */ -int nvme_host_get(struct nvme_global_ctx *ctx, const char *hostnqn, +int nvme_get_host(struct nvme_global_ctx *ctx, const char *hostnqn, const char *hostid, nvme_host_t *h); /** @@ -181,7 +181,7 @@ nvme_subsystem_t nvme_first_subsystem(nvme_host_t h); nvme_subsystem_t nvme_next_subsystem(nvme_host_t h, nvme_subsystem_t s); /** - * nvme_subsystem_get() - Returns nvme_subsystem_t object + * nvme_get_subsystem() - Returns nvme_subsystem_t object * @ctx: struct nvme_global_ctx object * @h: &nvme_host_t object * @name: Name of the subsystem (may be NULL) @@ -192,7 +192,7 @@ nvme_subsystem_t nvme_next_subsystem(nvme_host_t h, nvme_subsystem_t s); * and @subsysnqn or create one if not found. * */ -int nvme_subsystem_get(struct nvme_global_ctx *ctx, +int nvme_get_subsystem(struct nvme_global_ctx *ctx, struct nvme_host *h, const char *name, const char *subsysnqn, struct nvme_subsystem **s); @@ -281,7 +281,7 @@ nvme_path_t nvme_namespace_first_path(nvme_ns_t ns); nvme_path_t nvme_namespace_next_path(nvme_ns_t ns, nvme_path_t p); /** - * nvme_ctrl_config_match() - Check if ctrl @c matches config params + * nvme_ctrl_match_config() - Check if ctrl @c matches config params * @c: An existing controller instance * @transport: Transport name * @traddr: Transport address @@ -296,7 +296,7 @@ nvme_path_t nvme_namespace_next_path(nvme_ns_t ns, nvme_path_t p); * * Return: true if there's a match, false otherwise. */ -bool nvme_ctrl_config_match(struct nvme_ctrl *c, const char *transport, +bool nvme_ctrl_match_config(struct nvme_ctrl *c, const char *transport, const char *traddr, const char *trsvcid, const char *subsysnqn, const char *host_traddr, const char *host_iface); diff --git a/libnvme/src/nvme/uring.c b/libnvme/src/nvme/uring.c new file mode 100644 index 0000000000..7eb4bfa2ba --- /dev/null +++ b/libnvme/src/nvme/uring.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch + * Chaitanya Kulkarni + * Daniel Wagner + */ +#include + +#include + +#include "private.h" + +/* + * should not exceed CAP.MQES, 16 is rational for most ssd + */ +#define NVME_URING_ENTRIES 16 + +int nvme_open_uring(struct nvme_global_ctx *ctx) +{ + struct io_uring_probe *probe; + struct io_uring *ring; + + probe = io_uring_get_probe(); + if (!probe) + return -ENOTSUP; + + if (!io_uring_opcode_supported(probe, IORING_OP_URING_CMD)) + return -ENOTSUP; + + ring = calloc(1, sizeof(*ring)); + if (!ring) + return -ENOMEM; + + if (io_uring_queue_init(NVME_URING_ENTRIES, ring, + IORING_SETUP_SQE128 | IORING_SETUP_CQE32)) { + free(ring); + return -errno; + } + + ctx->ring = ring; + return 0; +} + +void nvme_close_uring(struct nvme_global_ctx *ctx) +{ + if (!ctx->ring) + return; + + io_uring_queue_exit(ctx->ring); + free(ctx->ring); +} + +int __nvme_transport_handle_open_uring(struct nvme_transport_handle *hdl) +{ + int err; + + switch (hdl->ctx->uring_state) { + case NVME_IO_URING_STATE_NOT_AVAILABLE: + return -ENOTSUP; + case NVME_IO_URING_STATE_AVAILABLE: + goto uring_enabled; + case NVME_IO_URING_STATE_UNKNOWN: + break; + } + + err = nvme_open_uring(hdl->ctx); + if (err) + return err; + +uring_enabled: + hdl->uring_enabled = true; + + return 0; +} + +static int nvme_submit_uring_cmd(struct io_uring *ring, int fd, + struct nvme_passthru_cmd *cmd) +{ + struct io_uring_sqe *sqe; + int ret; + + sqe = io_uring_get_sqe(ring); + if (!sqe) + return -1; + + memcpy(&sqe->cmd, cmd, sizeof(*cmd)); + + sqe->fd = fd; + sqe->opcode = IORING_OP_URING_CMD; + sqe->cmd_op = NVME_URING_CMD_ADMIN; + + ret = io_uring_submit(ring); + if (ret < 0) + return -errno; + + return 0; +} + +int nvme_wait_complete_passthru(struct nvme_transport_handle *hdl) +{ + struct io_uring_cqe *cqe; + struct io_uring *ring; + int err; + + ring = hdl->ctx->ring; + + for (int i = 0; i < hdl->ctx->ring_cmds; i++) { + err = io_uring_wait_cqe(ring, &cqe); + if (err < 0) + return -errno; + io_uring_cqe_seen(ring, cqe); + } + + hdl->ctx->ring_cmds = 0; + return 0; +} + +int nvme_submit_admin_passthru_async(struct nvme_transport_handle *hdl, + struct nvme_passthru_cmd *cmd) +{ + int err; + + if (hdl->ctx->ring_cmds >= NVME_URING_ENTRIES) { + err = nvme_wait_complete_passthru(hdl); + if (err) + return err; + } + + err = nvme_submit_uring_cmd(hdl->ctx->ring, hdl->fd, cmd); + if (err) + return err; + + hdl->ctx->ring_cmds += 1; + return 0; +} diff --git a/libnvme/src/nvme/util.c b/libnvme/src/nvme/util.c index d60667d337..842a52027f 100644 --- a/libnvme/src/nvme/util.c +++ b/libnvme/src/nvme/util.c @@ -749,7 +749,7 @@ int nvme_uuid_from_string(const char *str, unsigned char uuid[NVME_UUID_LEN]) } -int nvme_uuid_random(unsigned char uuid[NVME_UUID_LEN]) +int nvme_random_uuid(unsigned char uuid[NVME_UUID_LEN]) { _cleanup_fd_ int f = -1; ssize_t n; @@ -774,7 +774,8 @@ int nvme_uuid_random(unsigned char uuid[NVME_UUID_LEN]) return 0; } -int nvme_uuid_find(struct nvme_id_uuid_list *uuid_list, const unsigned char uuid[NVME_UUID_LEN]) +int nvme_find_uuid(struct nvme_id_uuid_list *uuid_list, + const unsigned char uuid[NVME_UUID_LEN]) { const unsigned char uuid_end[NVME_UUID_LEN] = {0}; diff --git a/libnvme/src/nvme/util.h b/libnvme/src/nvme/util.h index 0a9e2625e2..eb75a133bb 100644 --- a/libnvme/src/nvme/util.h +++ b/libnvme/src/nvme/util.h @@ -199,7 +199,7 @@ int nvme_uuid_to_string(unsigned char uuid[NVME_UUID_LEN], char *str); int nvme_uuid_from_string(const char *str, unsigned char uuid[NVME_UUID_LEN]); /** - * nvme_uuid_random - Generate random UUID + * nvme_random_uuid - Generate random UUID * @uuid: Generated random UUID * * Generate random number according @@ -207,16 +207,18 @@ int nvme_uuid_from_string(const char *str, unsigned char uuid[NVME_UUID_LEN]); * * Return: Returns error code if generating of random number fails. */ -int nvme_uuid_random(unsigned char uuid[NVME_UUID_LEN]); +int nvme_random_uuid(unsigned char uuid[NVME_UUID_LEN]); /** - * nvme_uuid_find - Find UUID position on UUID list + * nvme_find_uuid - Find UUID position on UUID list * @uuid_list: UUID list returned by identify UUID * @uuid: Binary encoded input UUID * - * Return: The array position where given UUID is present, or -1 on failure with errno set. + * Return: The array position where given UUID is present, or -1 on failure + * with errno set. */ -int nvme_uuid_find(struct nvme_id_uuid_list *uuid_list, const unsigned char uuid[NVME_UUID_LEN]); +int nvme_find_uuid(struct nvme_id_uuid_list *uuid_list, + const unsigned char uuid[NVME_UUID_LEN]); /** * nvme_basename - Return the final path component (the one after the last '/') diff --git a/libnvme/test/ioctl/misc.c b/libnvme/test/ioctl/misc.c index 66994d45dc..28f640bb29 100644 --- a/libnvme/test/ioctl/misc.c +++ b/libnvme/test/ioctl/misc.c @@ -742,11 +742,12 @@ static void test_flush(void) .opcode = nvme_cmd_flush, .nsid = TEST_NSID, }; - + struct nvme_passthru_cmd cmd; int err; set_mock_io_cmds(&mock_io_cmd, 1); - err = nvme_flush(test_hdl, TEST_NSID); + nvme_init_flush(&cmd, TEST_NSID); + err = nvme_submit_io_passthru(test_hdl, &cmd); end_mock_cmds(); check(err == 0, "returned error %d", err); } diff --git a/libnvme/test/nbft/nbft-dump.c b/libnvme/test/nbft/nbft-dump.c index d170cf1ec9..2b14bf5869 100644 --- a/libnvme/test/nbft/nbft-dump.c +++ b/libnvme/test/nbft/nbft-dump.c @@ -118,7 +118,7 @@ int main(int argc, char **argv) return 1; } - if (nvme_nbft_read(ctx, &table, argv[1]) != 0) { + if (nvme_read_nbft(ctx, &table, argv[1]) != 0) { fprintf(stderr, "Error parsing the NBFT table %s: %m\n", argv[1]); nvme_free_global_ctx(ctx); @@ -127,7 +127,7 @@ int main(int argc, char **argv) print_nbft(table); - nvme_nbft_free(ctx, table); + nvme_free_nbft(ctx, table); nvme_free_global_ctx(ctx); return 0; } diff --git a/libnvme/test/tree.c b/libnvme/test/tree.c index db10f2af0f..076c10c485 100644 --- a/libnvme/test/tree.c +++ b/libnvme/test/tree.c @@ -131,14 +131,14 @@ static struct nvme_global_ctx *create_tree() ctx = nvme_create_global_ctx(stdout, LOG_DEBUG); assert(ctx); - nvme_host_get(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); + nvme_get_host(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); assert(h); printf(" ctrls created:\n"); for (int i = 0; i < ARRAY_SIZE(test_data); i++) { struct test_data *d = &test_data[i]; - assert(!nvme_subsystem_get(ctx, h, d->subsysname, + assert(!nvme_get_subsystem(ctx, h, d->subsysname, d->subsysnqn, &d->s)); assert(d->s); d->c = nvme_lookup_ctrl(d->s, d->transport, d->traddr, @@ -236,7 +236,7 @@ static bool ctrl_lookups(struct nvme_global_ctx *ctx) bool pass = true; h = nvme_first_host(ctx); - nvme_subsystem_get(ctx, h, DEFAULT_SUBSYSNAME, DEFAULT_SUBSYSNQN, &s); + nvme_get_subsystem(ctx, h, DEFAULT_SUBSYSNAME, DEFAULT_SUBSYSNQN, &s); printf(" lookup controller:\n"); for (int i = 0; i < ARRAY_SIZE(test_data); i++) { @@ -288,10 +288,10 @@ static bool test_src_addr() ctx = nvme_create_global_ctx(stdout, LOG_DEBUG); assert(ctx); - nvme_host_get(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); + nvme_get_host(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); assert(h); - nvme_subsystem_get(ctx, h, DEFAULT_SUBSYSNAME, DEFAULT_SUBSYSNQN, &s); + nvme_get_subsystem(ctx, h, DEFAULT_SUBSYSNAME, DEFAULT_SUBSYSNQN, &s); assert(s); c = nvme_lookup_ctrl(s, "tcp", "192.168.56.1", NULL, NULL, "8009", NULL); @@ -464,10 +464,10 @@ static bool ctrl_match(const char *tag, ctx = nvme_create_global_ctx(stdout, LOG_INFO); assert(ctx); - nvme_host_get(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); + nvme_get_host(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); assert(h); - assert(!nvme_subsystem_get(ctx, h, DEFAULT_SUBSYSNAME, + assert(!nvme_get_subsystem(ctx, h, DEFAULT_SUBSYSNAME, reference->subsysnqn ? reference->subsysnqn : DEFAULT_SUBSYSNQN, &s)); @@ -1080,10 +1080,10 @@ static bool ctrl_config_match(const char *tag, ctx = nvme_create_global_ctx(stdout, LOG_INFO); assert(ctx); - nvme_host_get(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); + nvme_get_host(ctx, DEFAULT_HOSTNQN, DEFAULT_HOSTID, &h); assert(h); - assert(!nvme_subsystem_get(ctx, h, DEFAULT_SUBSYSNAME, + assert(!nvme_get_subsystem(ctx, h, DEFAULT_SUBSYSNAME, reference->subsysnqn ? reference->subsysnqn : DEFAULT_SUBSYSNQN, &s)); @@ -1098,9 +1098,10 @@ static bool ctrl_config_match(const char *tag, reference_ctrl->address = (char *)reference->address; } - match = nvme_ctrl_config_match(reference_ctrl, candidate->transport, candidate->traddr, - candidate->trsvcid, candidate->subsysnqn, - candidate->host_traddr, candidate->host_iface); + match = nvme_ctrl_match_config(reference_ctrl, + candidate->transport, candidate->traddr, + candidate->trsvcid, candidate->subsysnqn, + candidate->host_traddr, candidate->host_iface); if (should_match) { if (!match) { diff --git a/libnvme/test/uuid.c b/libnvme/test/uuid.c index 9a2bbc659c..3b557969c4 100644 --- a/libnvme/test/uuid.c +++ b/libnvme/test/uuid.c @@ -85,9 +85,9 @@ static void random_uuid_test(void) unsigned char uuid1[NVME_UUID_LEN], uuid2[NVME_UUID_LEN]; char str1[NVME_UUID_LEN_STRING], str2[NVME_UUID_LEN_STRING]; - if (nvme_uuid_random(uuid1) || nvme_uuid_random(uuid2)) { + if (nvme_random_uuid(uuid1) || nvme_random_uuid(uuid2)) { test_rc = 1; - printf("ERROR: nvme_uuid_random() failed\n"); + printf("ERROR: nvme_random_uuid() failed\n"); return; } diff --git a/meson.build b/meson.build index 1c1d87351c..503809a65f 100644 --- a/meson.build +++ b/meson.build @@ -472,15 +472,16 @@ if want_nvme subdir('util') # declares: util_sources sources = [ - 'nvme.c', + 'libnvme-wrap.c', + 'logging.c', + 'nvme-cmds.c', 'nvme-models.c', - 'nvme-print.c', - 'nvme-print-stdout.c', 'nvme-print-binary.c', + 'nvme-print-stdout.c', + 'nvme-print.c', 'nvme-rpmb.c', + 'nvme.c', 'plugin.c', - 'libnvme-wrap.c', - 'logging.c', ] if want_fabrics sources += 'fabrics.c' diff --git a/nvme-cmds.c b/nvme-cmds.c new file mode 100644 index 0000000000..11148379d0 --- /dev/null +++ b/nvme-cmds.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch + * Chaitanya Kulkarni + */ + +#include + +#include "nvme-cmds.h" + +static int nvme_ns_attachment(struct nvme_transport_handle *hdl, bool ish, + __u32 nsid, __u16 num_ctrls, __u16 *ctrlist, bool attach) +{ + struct nvme_ctrl_list cntlist = { 0 }; + struct nvme_passthru_cmd cmd; + + nvme_init_ctrl_list(&cntlist, num_ctrls, ctrlist); + if (ish && nvme_transport_handle_is_mi(hdl)) + nvme_init_mi_cmd_flags(&cmd, ish); + + if (attach) + nvme_init_ns_attach_ctrls(&cmd, nsid, &cntlist); + else + nvme_init_ns_detach_ctrls(&cmd, nsid, &cntlist); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +int nvme_namespace_attach_ctrls(struct nvme_transport_handle *hdl, bool ish, + __u32 nsid, __u16 num_ctrls, __u16 *ctrlist) +{ + return nvme_ns_attachment(hdl, ish, nsid, num_ctrls, ctrlist, true); +} + +int nvme_namespace_detach_ctrls(struct nvme_transport_handle *hdl, bool ish, + __u32 nsid, __u16 num_ctrls, __u16 *ctrlist) +{ + return nvme_ns_attachment(hdl, ish, nsid, num_ctrls, ctrlist, false); +} + + diff --git a/nvme-cmds.h b/nvme-cmds.h new file mode 100644 index 0000000000..97cd8ba578 --- /dev/null +++ b/nvme-cmds.h @@ -0,0 +1,1749 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * This file is part of libnvme. + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * Authors: Keith Busch + * Chaitanya Kulkarni + * Daniel Wagner + */ + +#ifndef NVME_CMDS +#define NVME_CMDS + +#include +#include +#include + +/** + * nvme_flush() - Send an nvme flush command + * @hdl: Transport handle + * @nsid: Namespace identifier + * + * The Flush command requests that the contents of volatile write cache be made + * non-volatile. + * + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. + */ +static inline int nvme_flush(struct nvme_transport_handle *hdl, __u32 nsid) +{ + struct nvme_passthru_cmd cmd = {}; + + cmd.opcode = nvme_cmd_flush; + cmd.nsid = nsid; + + return nvme_submit_io_passthru(hdl, &cmd); +} + +/** + * nvme_identify() - Submit a generic Identify command + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID (if applicable to the requested CNS). + * @csi: Command Set Identifier. + * @cns: Identify Controller or Namespace Structure (CNS) value, + * specifying the type of data to be returned. + * @data: Pointer to the buffer where the identification data will + * be stored. + * @len: Length of the data buffer in bytes. + * + * The generic wrapper for submitting an Identify command, allowing the host + * to specify any combination of Identify parameters. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify(struct nvme_transport_handle *hdl, __u32 nsid, enum nvme_csi csi, + enum nvme_identify_cns cns, void *data, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify(&cmd, nsid, csi, cns, data, len); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_ctrl() - Submit an Identify Controller command + * @hdl: Transport handle for the controller. + * @id: Pointer to the buffer (&struct nvme_id_ctrl) where the + * controller identification data will be stored upon + * successful completion. + * + * Submits the Identify Controller command to retrieve the controller's + * capabilities and configuration data. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_ctrl(struct nvme_transport_handle *hdl, + struct nvme_id_ctrl *id) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_ctrl(&cmd, id); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_active_ns_list() - Submit an Identify Active Namespace + * List command + * @hdl: Transport handle for the controller. + * @nsid: The Namespace ID to query + * @ns_list: Pointer to the buffer (&struct nvme_ns_list) where the + * active namespace list will be stored. + * + * Submits the Identify command to retrieve a list of active Namespace IDs. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_active_ns_list(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_ns_list *ns_list) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_active_ns_list(&cmd, nsid, ns_list); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_ns() - Submit an Identify Namespace command + * @hdl: Transport handle for the controller. + * @nsid: The Namespace ID to identify. + * @ns: Pointer to the buffer (&struct nvme_id_ns) where the namespace + * identification data will be stored. + * + * Submits the Identify command to retrieve the Namespace Identification + * data structure for a specified namespace. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ + +static inline int +nvme_identify_ns(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_id_ns *ns) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_ns(&cmd, nsid, ns); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_csi_ns() - Submit a CSI-specific Identify Namespace command + * @hdl: Transport handle for the controller. + * @nsid: The Namespace ID to identify. + * @csi: The Command Set Identifier + * @uidx: The UUID Index for the command. + * @id_ns: Pointer to the buffer (@struct nvme_nvm_id_ns) where the + * CSI-specific namespace identification data will be stored. + * + * Submits the Identify command to retrieve Namespace Identification data + * specific to a Command Set Identifier (CSI). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_csi_ns(struct nvme_transport_handle *hdl, __u32 nsid, + enum nvme_csi csi, __u8 uidx, struct nvme_nvm_id_ns *id_ns) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_csi_ns(&cmd, nsid, csi, uidx, id_ns); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_uuid_list() - Submit an Identify UUID List command + * @hdl: Transport handle for the controller. + * @uuid_list: Pointer to the buffer (&struct nvme_id_uuid_list) where the + * UUID list will be stored. + * + * Submits the Identify command to retrieve a list of UUIDs associated + * with the controller. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_uuid_list(struct nvme_transport_handle *hdl, + struct nvme_id_uuid_list *uuid_list) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_uuid_list(&cmd, uuid_list); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_csi_ns_user_data_format() - Submit an Identify CSI Namespace + * User Data Format command + * @hdl: Transport handle for the controller. + * @csi: Command Set Identifier. + * @fidx: Format Index, specifying which format entry to return. + * @uidx: The UUID Index for the command. + * @data: Pointer to the buffer where the format data will be stored. + * + * Submits the Identify command to retrieve a CSI-specific Namespace User + * Data Format data structure. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_csi_ns_user_data_format(struct nvme_transport_handle *hdl, + enum nvme_csi csi, __u16 fidx, __u8 uidx, void *data) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_csi_ns_user_data_format(&cmd, csi, fidx, uidx, data); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_ns_granularity() - Submit an Identify Namespace Granularity + * List command + * @hdl: Transport handle for the controller. + * @gr_list: Pointer to the buffer (&struct nvme_id_ns_granularity_list) + * where the granularity list will be stored. + * + * Submits the Identify command to retrieve the Namespace Granularity List. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_ns_granularity(struct nvme_transport_handle *hdl, + struct nvme_id_ns_granularity_list *gr_list) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_ns_granularity(&cmd, gr_list); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_identify_ns_descs_list() - Submit an Identify Namespace ID Descriptor + * List command + * @hdl: Transport handle for the controller. + * @nsid: The Namespace ID to query. + * @descs: Pointer to the buffer (&struct nvme_ns_id_desc) where the + * descriptor list will be stored. + * + * Submits the Identify command to retrieve the Namespace ID Descriptor List + * for a specified namespace. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_identify_ns_descs_list(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_ns_id_desc *descs) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_identify_ns_descs_list(&cmd, nsid, descs); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_zns_identify_ns() - Submit a ZNS-specific Identify Namespace command + * @hdl: Transport handle for the controller. + * @nsid: The Namespace ID to identify. + * @data: Pointer to the buffer (&struct nvme_zns_id_ns) where the ZNS + * namespace identification data will be stored. + * + * Submits the Identify command to retrieve the Zoned Namespace (ZNS) + * specific identification data structure for a specified namespace. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_zns_identify_ns(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_zns_id_ns *data) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_zns_identify_ns(&cmd, nsid, data); + + return nvme_submit_admin_passthru(hdl, &cmd); +} + +/** + * nvme_get_log_simple() - Retrieve a log page using default parameters + * @hdl: Transport handle for the controller. + * @lid: Log Identifier, specifying the log page to retrieve + * (@enum nvme_cmd_get_log_lid). + * @data: Pointer to the buffer where the log page data will be stored. + * @len: Length of the data buffer in bytes. + * + * Submits the Get Log Page command using the common settings: + * NVME\_NSID\_ALL, Retain Asynchronous Event (RAE) set to false, + * and assuming the NVM Command Set. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_simple(struct nvme_transport_handle *hdl, + enum nvme_cmd_get_log_lid lid, void *data, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log(&cmd, NVME_NSID_ALL, lid, NVME_CSI_NVM, data, len); + + return nvme_get_log(hdl, &cmd, false, NVME_LOG_PAGE_PDU_SIZE); +} + +/** + * nvme_get_log_supported_log_pages() - Retrieve the Supported Log Pages + * Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_supported_log_pages) where + * the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Supported Log Pages + * Log. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_supported_log_pages(struct nvme_transport_handle *hdl, + struct nvme_supported_log_pages *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log(&cmd, NVME_NSID_ALL, NVME_LOG_LID_SUPPORTED_LOG_PAGES, + NVME_CSI_NVM, log, sizeof(*log)); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + + +/** + * nvme_get_log_error() - Retrieve the Error Information Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for (usually NVME_NSID_ALL). + * @nr_entries: The maximum number of error log entries to retrieve. + * @err_log: Pointer to the buffer (array of @struct nvme_error_log_page) + * where the log page data will be stored. + * + * This log page describes extended error information for a command that + * completed with error, or may report an error that is not specific to a + * particular command. The total size requested is determined by + * @nr_entries * sizeof(@struct nvme_error_log_page). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_error(struct nvme_transport_handle *hdl, __u32 nsid, + unsigned int nr_entries, struct nvme_error_log_page *err_log) +{ + struct nvme_passthru_cmd cmd; + size_t len = sizeof(*err_log) * nr_entries; + + nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_ERROR, + NVME_CSI_NVM, err_log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_fw_slot() - Retrieve the Firmware Slot Information Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for (use NVME_NSID_ALL). + * @fw_log: Pointer to the buffer (@struct nvme_firmware_slot) where the log + * page data will be stored. + * + * This log page describes the firmware revision stored in each firmware slot + * supported. The firmware revision is indicated as an ASCII string. The log + * page also indicates the active slot number. + * + * This command is typically issued for the controller scope, thus using + * NVME_NSID_ALL. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_fw_slot(struct nvme_transport_handle *hdl, __u32 nsid, + struct nvme_firmware_slot *fw_log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_FW_SLOT, + NVME_CSI_NVM, fw_log, sizeof(*fw_log)); + + return nvme_get_log(hdl, &cmd, false, sizeof(*fw_log)); +} + +/** + * nvme_get_log_changed_ns_list() - Retrieve the Namespace Change Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for (use NVME_NSID_ALL). + * @ns_log: Pointer to the buffer (@struct nvme_ns_list) where the log + * page data will be stored. + * + * This log page describes namespaces attached to this controller that have + * changed since the last time the namespace was identified, been added, or + * deleted. + * + * This command is typically issued for the controller scope, thus using + * NVME_NSID_ALL. The Retain Asynchronous Event (RAE) is true to retain + * asynchronous events associated with the log page + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_changed_ns_list(struct nvme_transport_handle *hdl, __u32 nsid, + struct nvme_ns_list *ns_log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log(&cmd, nsid, NVME_LOG_LID_CHANGED_NS, + NVME_CSI_NVM, ns_log, sizeof(*ns_log)); + + return nvme_get_log(hdl, &cmd, true, sizeof(*ns_log)); +} + +/** + * nvme_get_log_cmd_effects() - Retrieve the Command Effects Log Page + * @hdl: Transport handle for the controller. + * @csi: Command Set Identifier for the requested log page. + * @effects_log:Pointer to the buffer (@struct nvme_cmd_effects_log) where the + * log page data will be stored. + * + * This log page describes the commands that the controller supports and the + * effects of those commands on the state of the NVM subsystem. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_ALL. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_cmd_effects(struct nvme_transport_handle *hdl, + enum nvme_csi csi, struct nvme_cmd_effects_log *effects_log) +{ + struct nvme_passthru_cmd cmd; + size_t len = sizeof(*effects_log); + + nvme_init_get_log_cmd_effects(&cmd, csi, effects_log); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_device_self_test() - Retrieve the Device Self-Test Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_self_test_log) where the log + * page data will be stored. + * + * This log page indicates the status of an in-progress self-test and the + * percent complete of that operation, and the results of the previous 20 + * self-test operations. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_ALL. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_device_self_test(struct nvme_transport_handle *hdl, + struct nvme_self_test_log *log) +{ + struct nvme_passthru_cmd cmd; + size_t len = sizeof(*log); + + nvme_init_get_log(&cmd, NVME_NSID_ALL, NVME_LOG_LID_DEVICE_SELF_TEST, + NVME_CSI_NVM, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_create_telemetry_host_mcda() - Create the Host Initiated + * Telemetry Log + * @hdl: Transport handle for the controller. + * @mcda: Maximum Created Data Area. Specifies the maximum amount of data + * that may be returned by the controller. + * @log: Pointer to the buffer (@struct nvme_telemetry_log) where the log + * page data will be stored. + * + * Submits the Get Log Page command to initiate the creation of a Host Initiated + * Telemetry Log. It sets the Log Identifier (LID) to Telemetry Host and + * includes the Maximum Created Data Area (MCDA) in the Log Specific Parameter + * (LSP) field along with the Create bit. + * + * It automatically sets Retain Asynchronous Event (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_create_telemetry_host_mcda(struct nvme_transport_handle *hdl, + enum nvme_telemetry_da mcda, struct nvme_telemetry_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_create_telemetry_host_mcda(&cmd, mcda, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_create_telemetry_host() - Create the Host Initiated Telemetry + * Log (Controller Determined Size) + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_telemetry_log) where the log + * page data will be stored. + * + * Submits the Get Log Page command to initiate the creation of a Host Initiated + * Telemetry Log. This is a convenience wrapper that automatically uses the + * Controller Determined size for the Maximum Created Data Area (MCDA). + * + * It automatically sets Retain Asynchronous Event (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_create_telemetry_host(struct nvme_transport_handle *hdl, + struct nvme_telemetry_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_create_telemetry_host(&cmd, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_telemetry_host() - Retrieve the Host-Initiated + * Telemetry Log Page (Retain) + * @hdl: Transport handle for the controller. + * @lpo: Offset (in bytes) into the telemetry data to start the + * retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command to retrieve a previously captured + * Host-Initiated Telemetry Log, starting at a specified offset (@lpo). The Log + * Specific Parameter (LSP) field is set to indicate the capture should be + * retained (not deleted after read). + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous Event + * (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_telemetry_host(struct nvme_transport_handle *hdl, + __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_telemetry_host(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_telemetry_ctrl() - Retrieve the Controller-Initiated + * Telemetry Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @lpo: Offset (in bytes) into the telemetry data to start the + * retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Controller-Initiated + * Telemetry Log, allowing retrieval of data starting at a specified offset + * (@lpo). + * + * It automatically sets the Log Identifier (LID). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_telemetry_ctrl(struct nvme_transport_handle *hdl, bool rae, + __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_telemetry_ctrl(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_endurance_group() - Retrieve the Endurance Group Log Page + * @hdl: Transport handle for the controller. + * @endgid: Starting Endurance Group Identifier (ENDGID) to return in + * the list. + * @log: Pointer to the buffer (@struct nvme_endurance_group_log) where + * the log page data will be stored. + * + * This log page indicates if an Endurance Group Event has occurred for a + * particular Endurance Group. The ENDGID is placed in the Log Specific + * Identifier (LSI) field of the Get Log Page command. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_endurance_group(struct nvme_transport_handle *hdl, + __u16 endgid, struct nvme_endurance_group_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_endurance_group(&cmd, endgid, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_predictable_lat_nvmset() - Retrieve the Predictable Latency + * Per NVM Set Log Page + * @hdl: Transport handle for the controller. + * @nvmsetid: The NVM Set Identifier (NVMSETID) for which to retrieve the log. + * @log: Pointer to the buffer (@struct nvme_nvmset_predictable_lat_log) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Predictable Latency Per + * NVM Set Log. The NVMSETID is placed in the Log Specific Identifier (LSI) + * field of the command. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_predictable_lat_nvmset(struct nvme_transport_handle *hdl, + __u16 nvmsetid, struct nvme_nvmset_predictable_lat_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_predictable_lat_nvmset(&cmd, nvmsetid, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_predictable_lat_event() - Retrieve the Predictable Latency Event + * Aggregate Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Predictable Latency + * Event Aggregate Log, allowing retrieval of data starting at a specified + * offset (@lpo). + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_PREDICTABLE_LAT_AGG. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_predictable_lat_event(struct nvme_transport_handle *hdl, + bool rae, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_predictable_lat_event(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_fdp_configurations() - Retrieve the Flexible Data Placement + * (FDP) Configurations Log Page + * @hdl: Transport handle for the controller. + * @egid: Endurance Group Identifier (EGID) to return in the + * list (used in LSI). + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the FDP Configurations Log. + * The EGID is placed in the Log Specific Identifier (LSI) field. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_fdp_configurations(struct nvme_transport_handle *hdl, + __u16 egid, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_fdp_configurations(&cmd, egid, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_reclaim_unit_handle_usage() - Retrieve the FDP Reclaim Unit + * Handle (RUH) Usage Log Page + * @hdl: Transport handle for the controller. + * @egid: Endurance Group Identifier (EGID) (used in LSI). + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the FDP Reclaim Unit Handle + * Usage Log. The EGID is placed in the Log Specific Identifier (LSI) field. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_reclaim_unit_handle_usage(struct nvme_transport_handle *hdl, + __u16 egid, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_reclaim_unit_handle_usage(&cmd, egid, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_fdp_stats() - Retrieve the Flexible Data Placement (FDP) + * Statistics Log Page + * @hdl: Transport handle for the controller. + * @egid: Endurance Group Identifier (EGID) (used in LSI). + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the FDP Statistics Log. + * The EGID is placed in the Log Specific Identifier (LSI) field. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_fdp_stats(struct nvme_transport_handle *hdl, + __u16 egid, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_fdp_stats(&cmd, egid, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_fdp_events() - Retrieve the Flexible Data Placement (FDP) + * Events Log Page + * @hdl: Transport handle for the controller. + * @egid: Endurance Group Identifier (EGID) (used in LSI). + * @host_events:Whether to report host-initiated events (true) or + * controller-initiated events (false). + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the FDP Events Log. + * The EGID is placed in the Log Specific Identifier (LSI) field, and the + * @host_events flag is used to set the Log Specific Parameter (LSP) field. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_fdp_events(struct nvme_transport_handle *hdl, + __u16 egid, bool host_events, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_fdp_events(&cmd, egid, host_events, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_ana() - Retrieve the Asymmetric Namespace Access (ANA) Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @lsp: Log specific parameter, see &enum nvme_get_log_ana_lsp. + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * This log consists of a header describing the log and descriptors containing + * the ANA information for groups that contain namespaces attached to the + * controller. The @lsp parameter is placed in the Log Specific Parameter field + * of the command. + * + * See &struct nvme_ana_log for the definition of the returned structure. + * + * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_ANA. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_ana(struct nvme_transport_handle *hdl, bool rae, + enum nvme_log_ana_lsp lsp, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_ana(&cmd, lsp, lpo, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_ana_groups() - Retrieve the Asymmetric Namespace Access (ANA) + * Groups Only Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @log: Pointer to the buffer (@struct nvme_ana_log) where the log page + * data will be stored. + * @len: Length of the buffer provided in @log. + * + * This function retrieves only the ANA Group Descriptors by setting the Log + * Specific Parameter (LSP) field to NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY. It is a + * convenience wrapper around nvme_get_log_ana, using a Log Page Offset (LPO) of + * 0. + * + * See &struct nvme_ana_log for the definition of the returned structure. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_ana_groups(struct nvme_transport_handle *hdl, bool rae, + struct nvme_ana_log *log, __u32 len) +{ + return nvme_get_log_ana(hdl, rae, NVME_LOG_ANA_LSP_RGO_GROUPS_ONLY, + 0, log, len); +} + +/** + * nvme_get_log_lba_status() - Retrieve the LBA Status Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the LBA Status Log. + * + * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_LBA_STATUS. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_lba_status(struct nvme_transport_handle *hdl, + bool rae, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_lba_status(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_endurance_grp_evt() - Retrieve the Endurance Group Event + * Aggregate Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Endurance Group Event + * Aggregate Log, allowing retrieval of data starting at a specified offset + * (@lpo). + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_ENDURANCE_GRP_EVT. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_endurance_grp_evt(struct nvme_transport_handle *hdl, + bool rae, __u64 lpo, void *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_endurance_grp_evt(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_fid_supported_effects() - Retrieve the Feature Identifiers + * Supported and Effects Log Page + * @hdl: Transport handle for the controller. + * @csi: Command set identifier, see &enum nvme_csi for known values + * @log: Pointer to the buffer (@struct nvme_fid_supported_effects_log) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Feature Identifiers + * Supported and Effects Log. It automatically sets the Log Identifier (LID). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_fid_supported_effects(struct nvme_transport_handle *hdl, + enum nvme_csi csi, struct nvme_fid_supported_effects_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_fid_supported_effects(&cmd, csi, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_mi_cmd_supported_effects() - Retrieve the Management Interface + * (MI) Commands Supported and Effects Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer + * (@struct nvme_mi_cmd_supported_effects_log) where the log page + * data will be stored. + * + * Submits the Get Log Page command specifically for the MI Commands Supported + * and Effects Log. It automatically sets the Log Identifier (LID). This command + * is typically issued with a namespace ID of 0xFFFFFFFF (NVME_NSID_NONE). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_mi_cmd_supported_effects(struct nvme_transport_handle *hdl, + struct nvme_mi_cmd_supported_effects_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_mi_cmd_supported_effects(&cmd, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_boot_partition() - Retrieve the Boot Partition Log Page + * @hdl: Transport handle for the controller. + * @lsp: The Log Specific Parameter (LSP) field for this Log + * Identifier (LID). + * @part: Pointer to the buffer (@struct nvme_boot_partition) where + * the log page data will be stored. + * @len: Length of the buffer provided in @part. + * + * Submits the Get Log Page command specifically for the Boot Partition Log. + * The LSP field is set based on the @lsp parameter. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_BOOT_PARTITION. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_boot_partition(struct nvme_transport_handle *hdl, + __u8 lsp, struct nvme_boot_partition *part, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_boot_partition(&cmd, lsp, part, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_rotational_media_info() - Retrieve the Rotational Media + * Information Log Page + * @hdl: Transport handle for the controller. + * @endgid: The Endurance Group Identifier (ENDGID) to retrieve the + * log for (used in LSI). + * @log: Pointer to the buffer (@struct nvme_rotational_media_info_log) + * where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Rotational Media + * Information Log. The ENDGID is placed in the Log Specific Identifier (LSI) + * field of the command. + * + * It automatically sets the Log Identifier (LID) and Retain Asynchronous + * Event (RAE) to false. This command is typically issued for the controller + * scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_rotational_media_info(struct nvme_transport_handle *hdl, + __u16 endgid, struct nvme_rotational_media_info_log *log, + __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_rotational_media_info(&cmd, endgid, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_dispersed_ns_participating_nss() - Retrieve the Dispersed + * Namespace Participating NVM Subsystems Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for. + * @log: Pointer to the buffer + * (@struct nvme_dispersed_ns_participating_nss_log) + * where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Dispersed Namespace + * Participating NVM Subsystems Log. It automatically sets the Log Identifier + * (LID) and Retain Asynchronous Event (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_dispersed_ns_participating_nss(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_dispersed_ns_participating_nss_log *log, + __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_dispersed_ns_participating_nss(&cmd, nsid, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_mgmt_addr_list() - Retrieve the Management Address List Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_mgmt_addr_list_log) where + * the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Management Address List + Log. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_MGMT_ADDR_LIST, Retain Asynchronous Event (RAE) to false, and + * uses NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_mgmt_addr_list(struct nvme_transport_handle *hdl, + struct nvme_mgmt_addr_list_log *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_mgmt_addr_list(&cmd, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_phy_rx_eom() - Retrieve the Physical Interface Receiver Eye + * Opening Measurement Log Page + * @hdl: Transport handle for the controller. + * @lsp: Log Specific Parameter (LSP), which controls the action + * and measurement quality. + * @controller: Target Controller ID (used in LSI). + * @log: Pointer to the buffer (@struct nvme_phy_rx_eom_log) where + * the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Physical Interface + * Receiver Eye Opening Measurement Log. The Controller ID is placed in the + * Log Specific Identifier (LSI) field. + * + * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_PHY_RX_EOM, + * and Retain Asynchronous Event (RAE) to false. This command is typically + * issued for the controller scope, thus using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_phy_rx_eom(struct nvme_transport_handle *hdl, + __u8 lsp, __u16 controller, struct nvme_phy_rx_eom_log *log, + __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_phy_rx_eom(&cmd, lsp, controller, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_reachability_groups() - Retrieve the Reachability Groups + * Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for. + * @rgo: Return Groups Only. Set to true to return only the Reachability + * Group Descriptors. + * @log: Pointer to the buffer (@struct nvme_reachability_groups_log) + * where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Reachability Groups + * Log. The @rgo parameter is placed in the Log Specific Parameter (LSP) field. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_REACHABILITY_GROUPS. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_reachability_groups(struct nvme_transport_handle *hdl, + __u32 nsid, bool rgo, struct nvme_reachability_groups_log *log, + __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_reachability_groups(&cmd, rgo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_reachability_associations() - Retrieve the Reachability + * Associations Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @rao: Return Associations Only. Set to true to return only the + * Reachability Association Descriptors. + * @log: Pointer to the buffer + * (@struct nvme_reachability_associations_log) where the log + * page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Reachability + * Associations Log. The @rao parameter is placed in the Log Specific Parameter + * (LSP) field. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_REACHABILITY_ASSOCIATIONS. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_reachability_associations(struct nvme_transport_handle *hdl, + bool rae, bool rao, + struct nvme_reachability_associations_log *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_reachability_associations(&cmd, rao, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_changed_alloc_ns_list() - Retrieve the Changed Allocated + * Namespace List Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_ns_list) where the log page + * data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Changed Allocated + * Namespace List Log. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_CHANGED_ALLOC_NS_LIST. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_changed_alloc_ns_list(struct nvme_transport_handle *hdl, + struct nvme_ns_list *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_changed_ns(&cmd, log); + + return nvme_get_log(hdl, &cmd, true, len); +} + +/** + * nvme_get_log_discovery() - Retrieve the Discovery Log Page + * @hdl: Transport handle for the controller. + * @lpo: Offset (in bytes) into the log page data to start the retrieval. + * @log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Discovery Log. + * Supported only by NVMe-oF Discovery controllers, returning discovery records. + * + * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_DISCOVERY. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_discovery(struct nvme_transport_handle *hdl, + __u64 lpo, __u32 len, void *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_discovery(&cmd, lpo, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_host_discovery() - Retrieve the Host Discovery Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @allhoste: All Host Entries. Set to true to report all host entries. + * @log: Pointer to the buffer (@struct nvme_host_discover_log) + * where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Host Discovery Log. + * The @allhoste parameter is placed in the Log Specific Parameter (LSP) field. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_HOST_DISCOVERY. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_host_discovery(struct nvme_transport_handle *hdl, + bool rae, bool allhoste, + struct nvme_host_discover_log *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_host_discovery(&cmd, allhoste, log, len); + + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_ave_discovery() - Retrieve the Asynchronous Event + * Group (AVE) Discovery Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @log: Pointer to the buffer (@struct nvme_ave_discover_log) where + * the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Asynchronous Event + * Group (AVE) Discovery Log. It automatically sets the Log Identifier (LID). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_ave_discovery(struct nvme_transport_handle *hdl, + bool rae, struct nvme_ave_discover_log *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_ave_discovery(&cmd, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_pull_model_ddc_req() - Retrieve the Pull Model DDC Request + * Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @log: Pointer to the buffer (@struct nvme_pull_model_ddc_req_log) + * where the log page data will be stored. + * @len: Length of the buffer provided in @log. + * + * Submits the Get Log Page command specifically for the Pull Model DDC Request + * Log. It automatically sets the Log Identifier (LID). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_pull_model_ddc_req(struct nvme_transport_handle *hdl, + bool rae, struct nvme_pull_model_ddc_req_log *log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_pull_model_ddc_req(&cmd, log, len); + + return nvme_get_log(hdl, &cmd, rae, len); +} + +/** + * nvme_get_log_media_unit_stat() - Retrieve the Media Unit Status Log Page + * @hdl: Transport handle for the controller. + * @domid: The Domain Identifier (DOMID) selection, if supported + * (used in LSI). + * @mus: Pointer to the buffer (@struct nvme_media_unit_stat_log) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Media Unit Status Log. + * The DOMID is placed in the Log Specific Identifier (LSI) field of the + * command. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_MEDIA_UNIT_STATUS, and Retain Asynchronous Event (RAE) to false. + * This command is typically issued for the controller scope, thus using + * NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_media_unit_stat(struct nvme_transport_handle *hdl, + __u16 domid, struct nvme_media_unit_stat_log *mus) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_media_unit_stat(&cmd, domid, mus); + + return nvme_get_log(hdl, &cmd, false, sizeof(*mus)); +} + +/** + * nvme_get_log_support_cap_config_list() - Retrieve the Supported Capacity + * Configuration List Log Page + * @hdl: Transport handle for the controller. + * @domid: The Domain Identifier (DOMID) selection, if + * supported (used in LSI). + * @cap: Pointer to the buffer + * (@struct nvme_supported_cap_config_list_log) where the log + * page data will be stored. + * + * Submits the Get Log Page command specifically for the Supported Capacity + * Configuration List Log. The DOMID is placed in the Log Specific Identifier + * (LSI) field of the command. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_SUPPORTED_CAP_CONFIG_LIST, and Retain Asynchronous Event (RAE) + * to false. This command is typically issued for the controller scope, thus + * using NVME_NSID_NONE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_support_cap_config_list(struct nvme_transport_handle *hdl, + __u16 domid, struct nvme_supported_cap_config_list_log *cap) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_support_cap_config_list(&cmd, domid, cap); + + return nvme_get_log(hdl, &cmd, false, sizeof(*cap)); +} + +/** + * nvme_get_log_reservation() - Retrieve the Reservation Notification Log Page + * @hdl: Transport handle for the controller. + * @log: Pointer to the buffer (@struct nvme_resv_notification_log) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Reservation + * Notification Log. It automatically sets the Log Identifier (LID). + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_reservation(struct nvme_transport_handle *hdl, + struct nvme_resv_notification_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_reservation(&cmd, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_sanitize() - Retrieve the Sanitize Status Log Page + * @hdl: Transport handle for the controller. + * @rae: Retain asynchronous events + * @log: Pointer to the buffer (@struct nvme_sanitize_log_page) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the Sanitize Status Log. + * The log page reports sanitize operation time estimates and information about + * the most recent sanitize operation. + * + * It automatically sets the Log Identifier (LID) to NVME_LOG_LID_SANITIZE. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_sanitize(struct nvme_transport_handle *hdl, + bool rae, struct nvme_sanitize_log_page *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_sanitize(&cmd, log); + + return nvme_get_log(hdl, &cmd, rae, sizeof(*log)); +} + +/** + * nvme_get_log_zns_changed_zones() - Retrieve the ZNS Changed Zones Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for. + * @rae: Retain asynchronous events + * @log: Pointer to the buffer (@struct nvme_zns_changed_zone_log) + * where the log page data will be stored. + * + * Submits the Get Log Page command specifically for the ZNS Changed Zones Log. + * This log lists zones that have changed state due to an exceptional event. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_ZNS_CHANGED_ZONES. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_zns_changed_zones(struct nvme_transport_handle *hdl, + __u32 nsid, bool rae, struct nvme_zns_changed_zone_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_zns_changed_zones(&cmd, nsid, log); + + return nvme_get_log(hdl, &cmd, rae, sizeof(*log)); +} + +/** + * nvme_get_log_persistent_event() - Retrieve the Persistent Event Log Page + * @hdl: Transport handle for the controller. + * @action: Action the controller should take during processing this + * command, see &enum nvme_pevent_log_action (used in LSP). + * @pevent_log: Pointer to the buffer where the log page data will be stored. + * @len: Length of the buffer provided in @pevent_log. + * + * Submits the Get Log Page command specifically for the Persistent Event Log. + * The @action parameter is placed in the Log Specific Parameter (LSP) field. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_PERSISTENT_EVENT and Retain Asynchronous Event (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_persistent_event(struct nvme_transport_handle *hdl, + enum nvme_pevent_log_action action, void *pevent_log, __u32 len) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_persistent_event(&cmd, action, pevent_log, len); + + /* + * Call the generic log execution function. + * The data length is determined by the 'len' parameter. + */ + return nvme_get_log(hdl, &cmd, false, len); +} + +/** + * nvme_get_log_lockdown() - Retrieve the Command and Feature Lockdown Log Page + * @hdl: Transport handle for the controller. + * @cnscp: Contents and Scope (CNSCP) of Command and Feature + * Identifier Lists (used in LSP). + * @log: Pointer to the buffer (@struct nvme_lockdown_log) where the log + * page data will be stored. + * + * Submits the Get Log Page command specifically for the Command and Feature + * Lockdown Log. The @cnscp parameter is placed in the Log Specific Parameter + * (LSP) field. + * + * It automatically sets the Log Identifier (LID) to + * NVME_LOG_LID_CMD_AND_FEAT_LOCKDOWN and Retain Asynchronous Event (RAE) to + * false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_lockdown(struct nvme_transport_handle *hdl, + __u8 cnscp, struct nvme_lockdown_log *log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_lockdown(&cmd, cnscp, log); + + return nvme_get_log(hdl, &cmd, false, sizeof(*log)); +} + +/** + * nvme_get_log_smart() - Retrieve the SMART / Health Information Log Page + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to request the log for. + * @smart_log: Pointer to the buffer (@struct nvme_smart_log) where the log + * page data will be stored. + * + * Submits the Get Log Page command specifically for the SMART / Health + * Information Log. It automatically sets the Log Identifier (LID) and + * Retain Asynchronous Event (RAE) to false. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_log_smart(struct nvme_transport_handle *hdl, + __u32 nsid, struct nvme_smart_log *smart_log) +{ + struct nvme_passthru_cmd cmd; + + nvme_init_get_log_smart(&cmd, nsid, smart_log); + + return nvme_get_log(hdl, &cmd, false, NVME_LOG_PAGE_PDU_SIZE); +} + + +/** + * nvme_set_features() - Submit a generic Set Features command + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID sto apply the feature to. + * @fid: Feature Identifier (FID) to be set. + * @sv: Save Value (SV): If true, the feature value persists + * across power states. + * @cdw11: Command Dword 11 parameter (feature-specific). + * @cdw12: Command Dword 12 parameter (feature-specific). + * @cdw13: Command Dword 13 parameter (feature-specific). + * @uidx: UUID Index (UIDX) for the command, encoded into cdw14 + * @cdw15: Command Dword 15 parameter (feature-specific). + * @data: Pointer to the data buffer to transfer (if applicable). + * @len: Length of the data buffer in bytes. + * @result: The command completion result (CQE dword0) on success. + * + * Submits the Set Features command, allowing all standard command + * fields (cdw11-cdw15) and data buffer fields to be specified directly. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_set_features(struct nvme_transport_handle *hdl, __u32 nsid, __u8 fid, + bool sv, __u32 cdw11, __u32 cdw12, __u32 cdw13, __u8 uidx, + __u32 cdw15, void *data, __u32 len, __u64 *result) +{ + struct nvme_passthru_cmd cmd; + int err; + + nvme_init_set_features(&cmd, fid, sv); + cmd.nsid = nsid; + cmd.cdw11 = cdw11; + cmd.cdw12 = cdw12; + cmd.cdw13 = cdw13; + cmd.cdw14 = NVME_FIELD_ENCODE(uidx, + NVME_SET_FEATURES_CDW14_UUID_SHIFT, + NVME_SET_FEATURES_CDW14_UUID_MASK); + cmd.cdw15 = cdw15; + cmd.data_len = len; + cmd.addr = (__u64)(uintptr_t)data; + + err = nvme_submit_admin_passthru(hdl, &cmd); + if (result) + *result = cmd.result; + return err; +} + +/** + * nvme_set_features_simple() - Submit a Set Features command using only cdw11 + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID to apply the feature to. + * @fid: Feature Identifier (FID) to be set. + * @sv: Save Value (SV): If true, the feature value persists across + * power states. + * @cdw11: Command Dword 11 parameter (feature-specific value). + * @result: The command completion result (CQE dword0) on success. + * + * Submits the Set Features command for features that only require + * parameters in cdw11. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_set_features_simple(struct nvme_transport_handle *hdl, + __u32 nsid, __u8 fid, bool sv, __u32 cdw11, __u64 *result) +{ + struct nvme_passthru_cmd cmd; + int err; + + nvme_init_set_features(&cmd, fid, sv); + cmd.nsid = nsid; + cmd.cdw11 = cdw11; + + err = nvme_submit_admin_passthru(hdl, &cmd); + if (result) + *result = cmd.result; + return err; +} + +/** + * nvme_get_features() - Submit a Get Features command + * @hdl: Transport handle for the controller. + * @nsid: Namespace ID, if applicable + * @fid: Feature identifier, see &enum nvme_features_id + * @sel: Select which type of attribute to return, + * see &enum nvme_get_features_sel + * @cdw11: Feature specific command dword11 field + * @uidx: UUID Index for differentiating vendor specific encoding + * @data: User address of feature data, if applicable + * @len: Length of feature data, if applicable, in bytes + * @result: The command completion result (CQE dword0) on success. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_features(struct nvme_transport_handle *hdl, __u32 nsid, + __u8 fid, enum nvme_get_features_sel sel, + __u32 cdw11, __u8 uidx, void *data, + __u32 len, __u64 *result) +{ + struct nvme_passthru_cmd cmd; + int err; + + nvme_init_get_features(&cmd, fid, sel); + + cmd.nsid = nsid; + cmd.cdw11 = cdw11; + cmd.cdw14 = NVME_FIELD_ENCODE(uidx, + NVME_GET_FEATURES_CDW14_UUID_SHIFT, + NVME_GET_FEATURES_CDW14_UUID_MASK); + cmd.data_len = len; + cmd.addr = (__u64)(uintptr_t)data; + + err = nvme_submit_admin_passthru(hdl, &cmd); + if (result) + *result = cmd.result; + return err; +} + +/** + * nvme_get_features_simple() - Submit a simple Get Features command + * @hdl: Transport handle for the controller. + * @fid: Feature Identifier (FID) to be retrieved. + * @sel: Select (SEL), specifying which feature value + * to return (&struct nvme_get_features_sel). + * @result: The command completion result (CQE dword0) on success. + * + * Submits the Get Features command for features that only require parameters in + * the CQE dword0 and do not need any parameters in cdw11 through cdw15. + * + * Return: 0 on success, the NVMe command status on error, or a negative + * errno otherwise. + */ +static inline int +nvme_get_features_simple(struct nvme_transport_handle *hdl, __u8 fid, + enum nvme_get_features_sel sel, __u64 *result) +{ + struct nvme_passthru_cmd cmd; + int err; + + nvme_init_get_features(&cmd, fid, sel); + + err = nvme_submit_admin_passthru(hdl, &cmd); + if (result) + *result = cmd.result; + return err; +} + +/** + * nvme_namespace_attach_ctrls() - Attach namespace to controller(s) + * @hdl: Transport handle + * @ish: Ignore Shutdown (for NVMe-MI command) + * @nsid: Namespace ID to attach + * @num_ctrls: Number of controllers in ctrlist + * @ctrlist: List of controller IDs to perform the attach action + * + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. + */ +int nvme_namespace_attach_ctrls(struct nvme_transport_handle *hdl, bool ish, + __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); + +/** + * nvme_namespace_detach_ctrls() - Detach namespace from controller(s) + * @hdl: Transport handle + * @ish: Ignore Shutdown (for NVMe-MI command) + * @nsid: Namespace ID to detach + * @num_ctrls: Number of controllers in ctrlist + * @ctrlist: List of controller IDs to perform the detach action + * + * Return: 0 on success, the nvme command status if a response was + * received (see &enum nvme_status_field) or a negative error otherwise. + */ +int nvme_namespace_detach_ctrls(struct nvme_transport_handle *hdl, bool ish, + __u32 nsid, __u16 num_ctrls, __u16 *ctrlist); + + +#endif /* NVME_CMDS */ diff --git a/nvme-rpmb.c b/nvme-rpmb.c index ee56b414a8..19cdd24e0a 100644 --- a/nvme-rpmb.c +++ b/nvme-rpmb.c @@ -34,8 +34,9 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #define CREATE_CMD diff --git a/nvme.c b/nvme.c index 7539e937a7..08fb9f8363 100644 --- a/nvme.c +++ b/nvme.c @@ -53,8 +53,11 @@ #include #include "common.h" -#include "nvme.h" +#include "fabrics.h" +#include "logging.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "nvme/tree.h" #include "nvme/types.h" #include "plugin.h" @@ -62,10 +65,9 @@ #include "util/base64.h" #include "util/cleanup.h" #include "util/crc32.h" -#include "util/suffix.h" -#include "logging.h" #include "util/sighdl.h" -#include "fabrics.h" +#include "util/suffix.h" + #define CREATE_CMD #include "nvme-builtin.h" #include "malloc.h" @@ -5466,7 +5468,7 @@ static int subsystem_reset(int argc, char **argv, struct command *acmd, struct p return -EINVAL; } - err = nvme_subsystem_reset(hdl); + err = nvme_reset_subsystem(hdl); if (err < 0) { if (errno == ENOTTY) nvme_show_error("Subsystem-reset: NVM Subsystem Reset not supported."); @@ -5497,7 +5499,7 @@ static int reset(int argc, char **argv, struct command *acmd, struct plugin *plu return -EINVAL; } - err = nvme_ctrl_reset(hdl); + err = nvme_reset_ctrl(hdl); if (err < 0) nvme_show_error("Reset: %s", nvme_strerror(-err)); else if (argconfig_parse_seen(opts, "verbose")) @@ -5532,7 +5534,7 @@ static int ns_rescan(int argc, char **argv, struct command *acmd, struct plugin return err; } - err = nvme_ns_rescan(hdl); + err = nvme_rescan_ns(hdl); if (err < 0) nvme_show_error("Namespace Rescan: %s\n", nvme_strerror(-err)); else if (argconfig_parse_seen(opts, "verbose")) @@ -6823,7 +6825,7 @@ static int format_cmd(int argc, char **argv, struct command *acmd, struct plugin printf("Success formatting namespace:%x\n", cfg.namespace_id); if (nvme_transport_handle_is_direct(hdl) && cfg.lbaf != prev_lbaf) { if (nvme_transport_handle_is_chardev(hdl)) { - if (nvme_ns_rescan(hdl) < 0) { + if (nvme_rescan_ns(hdl) < 0) { nvme_show_error("failed to rescan namespaces"); return -errno; } @@ -6855,7 +6857,7 @@ static int format_cmd(int argc, char **argv, struct command *acmd, struct plugin } if (nvme_transport_handle_is_direct(hdl) && cfg.reset && nvme_transport_handle_is_chardev(hdl)) - nvme_ctrl_reset(hdl); + nvme_reset_ctrl(hdl); return err; } @@ -9522,7 +9524,7 @@ static int gen_hostnqn_cmd(int argc, char **argv, struct command *acmd, struct p { char *hostnqn; - hostnqn = nvme_hostnqn_generate(); + hostnqn = nvme_generate_hostnqn(); if (!hostnqn) { nvme_show_error("\"%s\" not supported. Install lib uuid and rebuild.", acmd->name); @@ -9537,9 +9539,9 @@ static int show_hostnqn_cmd(int argc, char **argv, struct command *acmd, struct { char *hostnqn; - hostnqn = nvme_hostnqn_from_file(); + hostnqn = nvme_read_hostnqn(); if (!hostnqn) - hostnqn = nvme_hostnqn_generate(); + hostnqn = nvme_generate_hostnqn(); if (!hostnqn) { nvme_show_error("hostnqn is not available -- use nvme gen-hostnqn"); @@ -9667,7 +9669,7 @@ static int gen_dhchap_key(int argc, char **argv, struct command *acmd, struct pl } if (!cfg.nqn) { - cfg.nqn = hnqn = nvme_hostnqn_from_file(); + cfg.nqn = hnqn = nvme_read_hostnqn(); if (!cfg.nqn) { nvme_show_error("Could not read host NQN"); return -ENOENT; @@ -9931,7 +9933,7 @@ static int gen_tls_key(int argc, char **argv, struct command *acmd, struct plugi return -EINVAL; } if (!cfg.hostnqn) { - cfg.hostnqn = hnqn = nvme_hostnqn_from_file(); + cfg.hostnqn = hnqn = nvme_read_hostnqn(); if (!cfg.hostnqn) { nvme_show_error("Failed to read host NQN"); return -EINVAL; @@ -10093,7 +10095,7 @@ static int check_tls_key(int argc, char **argv, struct command *acmd, struct plu if (cfg.subsysnqn) { if (!cfg.hostnqn) { - cfg.hostnqn = hnqn = nvme_hostnqn_from_file(); + cfg.hostnqn = hnqn = nvme_read_hostnqn(); if (!cfg.hostnqn) { nvme_show_error("Failed to read host NQN"); return -EINVAL; diff --git a/plugins/amzn/amzn-nvme.c b/plugins/amzn/amzn-nvme.c index 276f71c681..5613095144 100644 --- a/plugins/amzn/amzn-nvme.c +++ b/plugins/amzn/amzn-nvme.c @@ -10,9 +10,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "nvme-print.h" #define CREATE_CMD #include "amzn-nvme.h" diff --git a/plugins/dapustor/dapustor-nvme.c b/plugins/dapustor/dapustor-nvme.c index d2b3cc3439..cb88df38f9 100644 --- a/plugins/dapustor/dapustor-nvme.c +++ b/plugins/dapustor/dapustor-nvme.c @@ -14,9 +14,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "nvme-print.h" #include "util/suffix.h" diff --git a/plugins/dera/dera-nvme.c b/plugins/dera/dera-nvme.c index 4634e878fd..0d01060ac9 100644 --- a/plugins/dera/dera-nvme.c +++ b/plugins/dera/dera-nvme.c @@ -5,17 +5,17 @@ #include #include #include -#include -#include +#include +#include #include #include #include +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "dera-nvme.h" diff --git a/plugins/fdp/fdp.c b/plugins/fdp/fdp.c index 896a4ee24e..4c59d6d3f7 100644 --- a/plugins/fdp/fdp.c +++ b/plugins/fdp/fdp.c @@ -12,8 +12,9 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #define CREATE_CMD #include "fdp.h" diff --git a/plugins/feat/feat-nvme.c b/plugins/feat/feat-nvme.c index d38e2e4ed8..3c37e14312 100644 --- a/plugins/feat/feat-nvme.c +++ b/plugins/feat/feat-nvme.c @@ -3,8 +3,9 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "plugin.h" #define CREATE_CMD diff --git a/plugins/huawei/huawei-nvme.c b/plugins/huawei/huawei-nvme.c index 65f3fc73c6..dcf4f3dc2b 100644 --- a/plugins/huawei/huawei-nvme.c +++ b/plugins/huawei/huawei-nvme.c @@ -30,6 +30,7 @@ #include #include "common.h" +#include "nvme-cmds.h" #include "nvme.h" #include "plugin.h" @@ -318,7 +319,7 @@ static int huawei_list(int argc, char **argv, struct command *acmd, if (ret < 0 || (fmt != JSON && fmt != NORMAL)) return ret; - n = scandir("/dev", &devices, nvme_namespace_filter, alphasort); + n = scandir("/dev", &devices, nvme_filter_namespace, alphasort); if (n <= 0) return n; diff --git a/plugins/ibm/ibm-nvme.c b/plugins/ibm/ibm-nvme.c index 2407049a76..38f3cf0fe1 100644 --- a/plugins/ibm/ibm-nvme.c +++ b/plugins/ibm/ibm-nvme.c @@ -4,8 +4,9 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "plugin.h" #define CREATE_CMD diff --git a/plugins/inspur/inspur-nvme.c b/plugins/inspur/inspur-nvme.c index 7681a8ee63..7e798c4916 100644 --- a/plugins/inspur/inspur-nvme.c +++ b/plugins/inspur/inspur-nvme.c @@ -13,9 +13,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "nvme-print.h" #include "util/suffix.h" #define CREATE_CMD diff --git a/plugins/intel/intel-nvme.c b/plugins/intel/intel-nvme.c index 3e5bb5ef63..2d1d601b45 100644 --- a/plugins/intel/intel-nvme.c +++ b/plugins/intel/intel-nvme.c @@ -9,10 +9,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "intel-nvme.h" diff --git a/plugins/lm/lm-nvme.c b/plugins/lm/lm-nvme.c index e430e202ca..423cf2c9f9 100644 --- a/plugins/lm/lm-nvme.c +++ b/plugins/lm/lm-nvme.c @@ -28,10 +28,10 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "plugin.h" -#include "linux/types.h" #include "util/cleanup.h" #define CREATE_CMD diff --git a/plugins/memblaze/memblaze-nvme.c b/plugins/memblaze/memblaze-nvme.c index fd93cc6f76..d1cd761912 100644 --- a/plugins/memblaze/memblaze-nvme.c +++ b/plugins/memblaze/memblaze-nvme.c @@ -9,11 +9,12 @@ #include -#include "nvme.h" #include "common.h" -#include "plugin.h" #include "linux/types.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" +#include "plugin.h" #define CREATE_CMD #include "memblaze-nvme.h" diff --git a/plugins/micron/micron-nvme.c b/plugins/micron/micron-nvme.c index 1d2724455f..6a88c827f1 100644 --- a/plugins/micron/micron-nvme.c +++ b/plugins/micron/micron-nvme.c @@ -28,9 +28,9 @@ #include #include "common.h" -#include "linux/types.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "util/cleanup.h" #include "util/types.h" #include "util/utils.h" diff --git a/plugins/netapp/netapp-nvme.c b/plugins/netapp/netapp-nvme.c index e3e78b2bec..a8d1add9f7 100644 --- a/plugins/netapp/netapp-nvme.c +++ b/plugins/netapp/netapp-nvme.c @@ -27,8 +27,8 @@ #include #include "common.h" +#include "nvme-cmds.h" #include "nvme.h" - #include "util/suffix.h" #define CREATE_CMD diff --git a/plugins/ocp/ocp-clear-features.c b/plugins/ocp/ocp-clear-features.c index 76cb35f3ad..02001a1d0e 100644 --- a/plugins/ocp/ocp-clear-features.c +++ b/plugins/ocp/ocp-clear-features.c @@ -7,10 +7,13 @@ */ #include + +#include "nvme-cmds.h" +#include "nvme-print.h" #include "util/types.h" + #include "ocp-nvme.h" #include "ocp-utils.h" -#include "nvme-print.h" static int ocp_clear_feature(int argc, char **argv, const char *desc, const __u8 fid) { diff --git a/plugins/ocp/ocp-nvme.c b/plugins/ocp/ocp-nvme.c index a6feb03ef4..280188558b 100644 --- a/plugins/ocp/ocp-nvme.c +++ b/plugins/ocp/ocp-nvme.c @@ -19,12 +19,12 @@ #include #include "common.h" +#include "logging.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" #include "util/types.h" -#include "logging.h" -#include "nvme-print.h" #include "ocp-smart-extended-log.h" #include "ocp-clear-features.h" diff --git a/plugins/ocp/ocp-utils.c b/plugins/ocp/ocp-utils.c index 08401bb73e..966a3e9b62 100644 --- a/plugins/ocp/ocp-utils.c +++ b/plugins/ocp/ocp-utils.c @@ -8,8 +8,7 @@ #include #include -#include - +#include "nvme-cmds.h" #include "ocp-nvme.h" #include "ocp-utils.h" #include "types.h" @@ -21,7 +20,7 @@ const unsigned char ocp_uuid[NVME_UUID_LEN] = { int ocp_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index) { - int i = nvme_uuid_find(uuid_list, ocp_uuid); + int i = nvme_find_uuid(uuid_list, ocp_uuid); *index = 0; if (i > 0) diff --git a/plugins/sandisk/sandisk-nvme.c b/plugins/sandisk/sandisk-nvme.c index 5cfb8eae80..2e1b5e96ae 100644 --- a/plugins/sandisk/sandisk-nvme.c +++ b/plugins/sandisk/sandisk-nvme.c @@ -17,12 +17,12 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" #include "util/cleanup.h" #include "util/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "sandisk-nvme.h" diff --git a/plugins/sandisk/sandisk-utils.c b/plugins/sandisk/sandisk-utils.c index 04f00fc0ab..8b41ade6ca 100644 --- a/plugins/sandisk/sandisk-utils.c +++ b/plugins/sandisk/sandisk-utils.c @@ -15,8 +15,10 @@ #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" + #include "sandisk-utils.h" #include "plugins/wdc/wdc-nvme-cmds.h" @@ -335,16 +337,16 @@ bool sndk_get_dev_mgment_data(struct nvme_global_ctx *ctx, struct nvme_transport memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (!nvme_get_uuid_list(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) { /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); if (uuid_index < 0) /* Check for the UUID used on SN640 and SN655 drives */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID_SN640_3); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID_SN640_3); } if (uuid_index >= 0) @@ -682,16 +684,16 @@ __u64 sndk_get_enc_drive_capabilities(struct nvme_global_ctx *ctx, memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (!nvme_get_uuid_list(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) { /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); if (uuid_index < 0) /* Check for the UUID used on SN640 and SN655 drives */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID_SN640_3); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID_SN640_3); } } else { /* UUID Lists not supported, Use default uuid index - 0 */ diff --git a/plugins/scaleflux/sfx-nvme.c b/plugins/scaleflux/sfx-nvme.c index e4b35150c4..6de7e36c79 100644 --- a/plugins/scaleflux/sfx-nvme.c +++ b/plugins/scaleflux/sfx-nvme.c @@ -18,10 +18,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #include "util/cleanup.h" #include "util/types.h" @@ -1422,7 +1422,7 @@ static int nvme_expand_cap(struct nvme_transport_handle *hdl, __u32 namespace_id else strcpy(dev_name, nvme_transport_handle_get_name(hdl)); - num = scandir("/dev", &devices, nvme_namespace_filter, alphasort); + num = scandir("/dev", &devices, nvme_filter_namespace, alphasort); if (num <= 0) { err = num; goto ret; diff --git a/plugins/seagate/seagate-nvme.c b/plugins/seagate/seagate-nvme.c index 467d15bbbe..be61e55e7f 100644 --- a/plugins/seagate/seagate-nvme.c +++ b/plugins/seagate/seagate-nvme.c @@ -38,9 +38,9 @@ #include #include "common.h" -#include "linux/types.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "plugin.h" #define CREATE_CMD diff --git a/plugins/shannon/shannon-nvme.c b/plugins/shannon/shannon-nvme.c index 13af395908..490617bb86 100644 --- a/plugins/shannon/shannon-nvme.c +++ b/plugins/shannon/shannon-nvme.c @@ -8,10 +8,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "shannon-nvme.h" diff --git a/plugins/solidigm/solidigm-internal-logs.c b/plugins/solidigm/solidigm-internal-logs.c index f18bf6bdc1..e26e8bfcc0 100644 --- a/plugins/solidigm/solidigm-internal-logs.c +++ b/plugins/solidigm/solidigm-internal-logs.c @@ -18,9 +18,11 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "nvme-print.h" + #include "solidigm-util.h" #define DWORD_SIZE 4 diff --git a/plugins/solidigm/solidigm-latency-tracking.c b/plugins/solidigm/solidigm-latency-tracking.c index 5987da5192..e007ea3401 100644 --- a/plugins/solidigm/solidigm-latency-tracking.c +++ b/plugins/solidigm/solidigm-latency-tracking.c @@ -14,10 +14,11 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" + #include "solidigm-util.h" #define BUCKET_LIST_SIZE_4_0 152 diff --git a/plugins/solidigm/solidigm-log-page-dir.c b/plugins/solidigm/solidigm-log-page-dir.c index 9132dae2af..6583ac896d 100644 --- a/plugins/solidigm/solidigm-log-page-dir.c +++ b/plugins/solidigm/solidigm-log-page-dir.c @@ -10,9 +10,12 @@ #include #include "common.h" +#include "nvme-cmds.h" #include "nvme-print.h" + #include "plugins/ocp/ocp-nvme.h" #include "plugins/ocp/ocp-utils.h" + #include "solidigm-log-page-dir.h" #include "solidigm-util.h" diff --git a/plugins/solidigm/solidigm-util.c b/plugins/solidigm/solidigm-util.c index 11b3ba1647..5a9009da77 100644 --- a/plugins/solidigm/solidigm-util.c +++ b/plugins/solidigm/solidigm-util.c @@ -6,6 +6,9 @@ */ #include + +#include "nvme-cmds.h" + #include "solidigm-util.h" const unsigned char solidigm_uuid[NVME_UUID_LEN] = { @@ -15,7 +18,7 @@ const unsigned char solidigm_uuid[NVME_UUID_LEN] = { int sldgm_find_uuid_index(struct nvme_id_uuid_list *uuid_list, __u8 *index) { - int i = nvme_uuid_find(uuid_list, solidigm_uuid); + int i = nvme_find_uuid(uuid_list, solidigm_uuid); *index = 0; if (i > 0) diff --git a/plugins/solidigm/solidigm-workload-tracker.c b/plugins/solidigm/solidigm-workload-tracker.c index 952cd0431b..5c408f6930 100644 --- a/plugins/solidigm/solidigm-workload-tracker.c +++ b/plugins/solidigm/solidigm-workload-tracker.c @@ -9,6 +9,7 @@ #include #include "common.h" +#include "nvme-cmds.h" #include "nvme-print.h" #define LID 0xf9 diff --git a/plugins/ssstc/ssstc-nvme.c b/plugins/ssstc/ssstc-nvme.c index f35275327d..95c3b2c266 100644 --- a/plugins/ssstc/ssstc-nvme.c +++ b/plugins/ssstc/ssstc-nvme.c @@ -9,10 +9,10 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "ssstc-nvme.h" diff --git a/plugins/toshiba/toshiba-nvme.c b/plugins/toshiba/toshiba-nvme.c index 87569793c3..3c0e46f926 100644 --- a/plugins/toshiba/toshiba-nvme.c +++ b/plugins/toshiba/toshiba-nvme.c @@ -10,10 +10,10 @@ #include +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "toshiba-nvme.h" diff --git a/plugins/transcend/transcend-nvme.c b/plugins/transcend/transcend-nvme.c index 3c36f2434a..425fc4cc2a 100644 --- a/plugins/transcend/transcend-nvme.c +++ b/plugins/transcend/transcend-nvme.c @@ -8,6 +8,7 @@ #include +#include "nvme-cmds.h" #include "nvme.h" #include "plugin.h" diff --git a/plugins/virtium/virtium-nvme.c b/plugins/virtium/virtium-nvme.c index 1e8e5e5d4b..20367db310 100644 --- a/plugins/virtium/virtium-nvme.c +++ b/plugins/virtium/virtium-nvme.c @@ -13,6 +13,7 @@ #include #include "common.h" +#include "nvme-cmds.h" #include "nvme.h" #include "plugin.h" #include "util/types.h" diff --git a/plugins/wdc/wdc-nvme.c b/plugins/wdc/wdc-nvme.c index 0662e16e4b..3b55b6a2cb 100644 --- a/plugins/wdc/wdc-nvme.c +++ b/plugins/wdc/wdc-nvme.c @@ -34,12 +34,12 @@ #include #include "common.h" +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" #include "util/cleanup.h" #include "util/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "wdc-nvme.h" @@ -2121,13 +2121,13 @@ static __u64 wdc_get_enc_drive_capabilities(struct nvme_global_ctx *ctx, memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (wdc_CheckUuidListSupport(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); } /* WD UUID not found, use default uuid index - 0 */ @@ -2734,15 +2734,15 @@ static bool get_dev_mgment_data(struct nvme_global_ctx *ctx, struct nvme_transpo memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (wdc_CheckUuidListSupport(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) { /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); if (uuid_index < 0) - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID_SN640_3); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID_SN640_3); } if (uuid_index >= 0) @@ -2803,15 +2803,15 @@ static bool get_dev_mgment_cbs_data(struct nvme_global_ctx *ctx, struct nvme_tra memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (wdc_CheckUuidListSupport(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) { /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); if (uuid_index < 0) - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID_SN640_3); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID_SN640_3); } if (uuid_index >= 0) @@ -9096,13 +9096,13 @@ static int wdc_drive_status(int argc, char **argv, struct command *acmd, memset(&uuid_list, 0, sizeof(struct nvme_id_uuid_list)); if (wdc_CheckUuidListSupport(hdl, &uuid_list)) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); } /* WD UUID not found, use default uuid index - 0 */ @@ -10871,13 +10871,13 @@ static int wdc_log_page_directory(int argc, char **argv, struct command *acmd, if (!wdc_is_sn861(device_id)) { if (uuid_supported) { /* check for the Sandisk UUID first */ - uuid_index = nvme_uuid_find(&uuid_list, SNDK_UUID); + uuid_index = nvme_find_uuid(&uuid_list, SNDK_UUID); if (uuid_index < 0) /* The Sandisk UUID is not found; * check for the WDC UUID second. */ - uuid_index = nvme_uuid_find(&uuid_list, WDC_UUID); + uuid_index = nvme_find_uuid(&uuid_list, WDC_UUID); } /* WD UUID not found, use default uuid index - 0 */ diff --git a/plugins/wdc/wdc-utils.c b/plugins/wdc/wdc-utils.c index 7e257d355d..7e6afaf014 100644 --- a/plugins/wdc/wdc-utils.c +++ b/plugins/wdc/wdc-utils.c @@ -27,8 +27,10 @@ #include -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" + #include "wdc-utils.h" int wdc_UtilsSnprintf(char *buffer, unsigned int sizeOfBuffer, const char *format, ...) diff --git a/plugins/ymtc/ymtc-nvme.c b/plugins/ymtc/ymtc-nvme.c index 9e65ccadf7..6f4a5e5d1d 100644 --- a/plugins/ymtc/ymtc-nvme.c +++ b/plugins/ymtc/ymtc-nvme.c @@ -7,10 +7,10 @@ #include +#include "nvme-cmds.h" +#include "nvme-print.h" #include "nvme.h" #include "plugin.h" -#include "linux/types.h" -#include "nvme-print.h" #define CREATE_CMD #include "ymtc-nvme.h" diff --git a/plugins/zns/zns.c b/plugins/zns/zns.c index 4fdfc160c6..9a988e01dc 100644 --- a/plugins/zns/zns.c +++ b/plugins/zns/zns.c @@ -1,19 +1,22 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include #include #include #include #include + #include #include +#include + #include #include "common.h" -#include "nvme.h" +#include "nvme-cmds.h" #include "nvme-print.h" +#include "nvme.h" #include "util/cleanup.h" #define CREATE_CMD