|
59 | 59 |
|
60 | 60 | static
|
61 | 61 | int ctx_init_pack_request(struct obd_import *imp,
|
62 |
| - struct ptlrpc_request *req, |
63 |
| - int lustre_srv, |
64 |
| - uid_t uid, gid_t gid, |
65 |
| - long token_size, |
66 |
| - char __user *token) |
| 62 | + struct ptlrpc_request *req, |
| 63 | + int lustre_srv, |
| 64 | + uid_t uid, gid_t gid, |
| 65 | + long token_size, |
| 66 | + char __user *token) |
67 | 67 | {
|
68 |
| - struct lustre_msg *msg = req->rq_reqbuf; |
69 |
| - struct gss_sec *gsec; |
70 |
| - struct gss_header *ghdr; |
71 |
| - struct ptlrpc_user_desc *pud; |
72 |
| - __u32 *p, size, offset = 2; |
73 |
| - rawobj_t obj; |
74 |
| - |
75 |
| - LASSERT(msg->lm_bufcount <= 4); |
76 |
| - LASSERT(req->rq_cli_ctx); |
77 |
| - LASSERT(req->rq_cli_ctx->cc_sec); |
78 |
| - |
79 |
| - /* gss hdr */ |
80 |
| - ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr)); |
81 |
| - ghdr->gh_version = PTLRPC_GSS_VERSION; |
82 |
| - ghdr->gh_sp = (__u8) imp->imp_sec->ps_part; |
83 |
| - ghdr->gh_flags = 0; |
84 |
| - ghdr->gh_proc = PTLRPC_GSS_PROC_INIT; |
85 |
| - ghdr->gh_seq = 0; |
86 |
| - ghdr->gh_svc = SPTLRPC_SVC_NULL; |
87 |
| - ghdr->gh_handle.len = 0; |
88 |
| - |
89 |
| - /* fix the user desc */ |
90 |
| - if (req->rq_pack_udesc) { |
91 |
| - ghdr->gh_flags |= LUSTRE_GSS_PACK_USER; |
92 |
| - |
93 |
| - pud = lustre_msg_buf(msg, offset, sizeof(*pud)); |
94 |
| - LASSERT(pud); |
95 |
| - pud->pud_uid = pud->pud_fsuid = uid; |
96 |
| - pud->pud_gid = pud->pud_fsgid = gid; |
97 |
| - pud->pud_cap = 0; |
98 |
| - pud->pud_ngroups = 0; |
99 |
| - offset++; |
100 |
| - } |
| 68 | + struct lustre_msg *msg = req->rq_reqbuf; |
| 69 | + struct gss_sec *gsec; |
| 70 | + struct gss_header *ghdr; |
| 71 | + struct ptlrpc_user_desc *pud; |
| 72 | + __u32 *p, size, offset = 2; |
| 73 | + rawobj_t obj; |
| 74 | + |
| 75 | + LASSERT(msg->lm_bufcount <= 4); |
| 76 | + LASSERT(req->rq_cli_ctx); |
| 77 | + LASSERT(req->rq_cli_ctx->cc_sec); |
| 78 | + |
| 79 | + /* gss hdr */ |
| 80 | + ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr)); |
| 81 | + ghdr->gh_version = PTLRPC_GSS_VERSION; |
| 82 | + ghdr->gh_sp = (__u8) imp->imp_sec->ps_part; |
| 83 | + ghdr->gh_flags = 0; |
| 84 | + ghdr->gh_proc = PTLRPC_GSS_PROC_INIT; |
| 85 | + ghdr->gh_seq = 0; |
| 86 | + ghdr->gh_svc = SPTLRPC_SVC_NULL; |
| 87 | + ghdr->gh_handle.len = 0; |
| 88 | + |
| 89 | + /* fix the user desc */ |
| 90 | + if (req->rq_pack_udesc) { |
| 91 | + ghdr->gh_flags |= LUSTRE_GSS_PACK_USER; |
| 92 | + |
| 93 | + pud = lustre_msg_buf(msg, offset, sizeof(*pud)); |
| 94 | + LASSERT(pud); |
| 95 | + pud->pud_uid = pud->pud_fsuid = uid; |
| 96 | + pud->pud_gid = pud->pud_fsgid = gid; |
| 97 | + pud->pud_cap = 0; |
| 98 | + pud->pud_ngroups = 0; |
| 99 | + offset++; |
| 100 | + } |
101 | 101 |
|
102 |
| - /* security payload */ |
103 |
| - p = lustre_msg_buf(msg, offset, 0); |
104 |
| - size = msg->lm_buflens[offset]; |
105 |
| - LASSERT(p); |
106 |
| - |
107 |
| - /* 1. lustre svc type */ |
108 |
| - LASSERT(size > 4); |
109 |
| - *p++ = cpu_to_le32(lustre_srv); |
110 |
| - size -= 4; |
111 |
| - |
112 |
| - /* 2. target uuid */ |
113 |
| - obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1; |
114 |
| - obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid; |
115 |
| - if (rawobj_serialize(&obj, &p, &size)) |
116 |
| - LBUG(); |
117 |
| - |
118 |
| - /* 3. reverse context handle. actually only needed by root user, |
119 |
| - * but we send it anyway. */ |
120 |
| - gsec = sec2gsec(req->rq_cli_ctx->cc_sec); |
121 |
| - obj.len = sizeof(gsec->gs_rvs_hdl); |
122 |
| - obj.data = (__u8 *) &gsec->gs_rvs_hdl; |
123 |
| - if (rawobj_serialize(&obj, &p, &size)) |
124 |
| - LBUG(); |
125 |
| - |
126 |
| - /* 4. now the token */ |
127 |
| - LASSERT(size >= (sizeof(__u32) + token_size)); |
128 |
| - *p++ = cpu_to_le32(((__u32) token_size)); |
| 102 | + /* new clients are expected to set KCSUM flag */ |
| 103 | + ghdr->gh_flags |= LUSTRE_GSS_PACK_KCSUM; |
| 104 | + |
| 105 | + /* security payload */ |
| 106 | + p = lustre_msg_buf(msg, offset, 0); |
| 107 | + size = msg->lm_buflens[offset]; |
| 108 | + LASSERT(p); |
| 109 | + |
| 110 | + /* 1. lustre svc type */ |
| 111 | + LASSERT(size > 4); |
| 112 | + *p++ = cpu_to_le32(lustre_srv); |
| 113 | + size -= 4; |
| 114 | + |
| 115 | + /* 2. target uuid */ |
| 116 | + obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1; |
| 117 | + obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid; |
| 118 | + if (rawobj_serialize(&obj, &p, &size)) |
| 119 | + LBUG(); |
| 120 | + |
| 121 | + /* 3. reverse context handle. actually only needed by root user, |
| 122 | + * but we send it anyway. */ |
| 123 | + gsec = sec2gsec(req->rq_cli_ctx->cc_sec); |
| 124 | + obj.len = sizeof(gsec->gs_rvs_hdl); |
| 125 | + obj.data = (__u8 *) &gsec->gs_rvs_hdl; |
| 126 | + if (rawobj_serialize(&obj, &p, &size)) |
| 127 | + LBUG(); |
| 128 | + |
| 129 | + /* 4. now the token */ |
| 130 | + LASSERT(size >= (sizeof(__u32) + token_size)); |
| 131 | + *p++ = cpu_to_le32(((__u32) token_size)); |
129 | 132 | if (copy_from_user(p, token, token_size)) {
|
130 |
| - CERROR("can't copy token\n"); |
131 |
| - return -EFAULT; |
132 |
| - } |
133 |
| - size -= sizeof(__u32) + cfs_size_round4(token_size); |
| 133 | + CERROR("can't copy token\n"); |
| 134 | + return -EFAULT; |
| 135 | + } |
| 136 | + size -= sizeof(__u32) + cfs_size_round4(token_size); |
134 | 137 |
|
135 |
| - req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset, |
136 |
| - msg->lm_buflens[offset] - size, 0); |
137 |
| - return 0; |
| 138 | + req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset, |
| 139 | + msg->lm_buflens[offset] - size, 0); |
| 140 | + return 0; |
138 | 141 | }
|
139 | 142 |
|
140 | 143 | static
|
|
0 commit comments