Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : Core SMB2 server
4 :
5 : Copyright (C) Stefan Metzmacher 2009
6 : Copyright (C) Jeremy Allison 2010
7 :
8 : This program is free software; you can redistribute it and/or modify
9 : it under the terms of the GNU General Public License as published by
10 : the Free Software Foundation; either version 3 of the License, or
11 : (at your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful,
14 : but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : GNU General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program. If not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include "includes.h"
23 : #include "locking/share_mode_lock.h"
24 : #include "smbd/smbd.h"
25 : #include "smbd/globals.h"
26 : #include "../libcli/smb/smb_common.h"
27 : #include "../lib/util/tevent_ntstatus.h"
28 : #include "lib/dbwrap/dbwrap_watch.h"
29 : #include "librpc/gen_ndr/open_files.h"
30 : #include "messages.h"
31 :
32 : #undef DBGC_CLASS
33 : #define DBGC_CLASS DBGC_SMB2
34 :
35 : struct smbd_smb2_lock_element {
36 : uint64_t offset;
37 : uint64_t length;
38 : uint32_t flags;
39 : };
40 :
41 : struct smbd_smb2_lock_state {
42 : struct tevent_context *ev;
43 : struct smbd_smb2_request *smb2req;
44 : struct smb_request *smb1req;
45 : struct files_struct *fsp;
46 : bool blocking;
47 : uint32_t polling_msecs;
48 : uint32_t retry_msecs;
49 : uint16_t lock_count;
50 : struct smbd_lock_element *locks;
51 : uint8_t lock_sequence_value;
52 : uint8_t *lock_sequence_element;
53 : };
54 :
55 : static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
56 : struct tevent_context *ev,
57 : struct smbd_smb2_request *smb2req,
58 : struct files_struct *in_fsp,
59 : uint32_t in_lock_sequence,
60 : uint16_t in_lock_count,
61 : struct smbd_smb2_lock_element *in_locks);
62 : static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
63 :
64 : static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
65 1957 : NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
66 : {
67 8 : const uint8_t *inbody;
68 8 : uint16_t in_lock_count;
69 8 : uint32_t in_lock_sequence;
70 8 : uint64_t in_file_id_persistent;
71 8 : uint64_t in_file_id_volatile;
72 8 : struct files_struct *in_fsp;
73 8 : struct smbd_smb2_lock_element *in_locks;
74 8 : struct tevent_req *subreq;
75 8 : const uint8_t *lock_buffer;
76 8 : uint16_t l;
77 8 : NTSTATUS status;
78 :
79 1957 : status = smbd_smb2_request_verify_sizes(req, 0x30);
80 1957 : if (!NT_STATUS_IS_OK(status)) {
81 8 : return smbd_smb2_request_error(req, status);
82 : }
83 1949 : inbody = SMBD_SMB2_IN_BODY_PTR(req);
84 :
85 1949 : in_lock_count = CVAL(inbody, 0x02);
86 1949 : if (req->xconn->protocol >= PROTOCOL_SMB2_10) {
87 1949 : in_lock_sequence = IVAL(inbody, 0x04);
88 : } else {
89 : /* 0x04 - 4 bytes reserved */
90 0 : in_lock_sequence = 0;
91 : }
92 1949 : in_file_id_persistent = BVAL(inbody, 0x08);
93 1949 : in_file_id_volatile = BVAL(inbody, 0x10);
94 :
95 1949 : if (in_lock_count < 1) {
96 0 : return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
97 : }
98 :
99 1949 : if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
100 0 : return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
101 : }
102 :
103 1949 : in_locks = talloc_array(req, struct smbd_smb2_lock_element,
104 : in_lock_count);
105 1949 : if (in_locks == NULL) {
106 0 : return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
107 : }
108 :
109 1949 : l = 0;
110 1949 : lock_buffer = inbody + 0x18;
111 :
112 1949 : in_locks[l].offset = BVAL(lock_buffer, 0x00);
113 1949 : in_locks[l].length = BVAL(lock_buffer, 0x08);
114 1949 : in_locks[l].flags = IVAL(lock_buffer, 0x10);
115 : /* 0x14 - 4 reserved bytes */
116 :
117 1949 : status = req->session->status;
118 1949 : if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
119 : /*
120 : * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
121 : * for lock requests only.
122 : *
123 : * Unlock requests still need to be processed!
124 : *
125 : * This means smbd_smb2_request_check_session()
126 : * can't handle the difference and always
127 : * allows SMB2_OP_LOCK.
128 : */
129 30 : if (in_locks[0].flags != SMB2_LOCK_FLAG_UNLOCK) {
130 10 : return smbd_smb2_request_error(req, status);
131 : }
132 : }
133 :
134 1939 : lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
135 :
136 2007 : for (l=1; l < in_lock_count; l++) {
137 68 : in_locks[l].offset = BVAL(lock_buffer, 0x00);
138 68 : in_locks[l].length = BVAL(lock_buffer, 0x08);
139 68 : in_locks[l].flags = IVAL(lock_buffer, 0x10);
140 : /* 0x14 - 4 reserved bytes */
141 :
142 68 : lock_buffer += 0x18;
143 : }
144 :
145 1939 : in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
146 1939 : if (in_fsp == NULL) {
147 0 : return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
148 : }
149 :
150 1939 : subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
151 : req, in_fsp,
152 : in_lock_sequence,
153 : in_lock_count,
154 : in_locks);
155 1939 : if (subreq == NULL) {
156 0 : return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
157 : }
158 1939 : tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
159 :
160 1939 : return smbd_smb2_request_pending_queue(req, subreq, 500);
161 : }
162 :
163 1939 : static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
164 : {
165 1939 : struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
166 : struct smbd_smb2_request);
167 6 : DATA_BLOB outbody;
168 6 : NTSTATUS status;
169 6 : NTSTATUS error; /* transport error */
170 :
171 1945 : status = smbd_smb2_lock_recv(subreq);
172 1939 : TALLOC_FREE(subreq);
173 1939 : if (!NT_STATUS_IS_OK(status)) {
174 636 : error = smbd_smb2_request_error(smb2req, status);
175 636 : if (!NT_STATUS_IS_OK(error)) {
176 0 : smbd_server_connection_terminate(smb2req->xconn,
177 : nt_errstr(error));
178 636 : return;
179 : }
180 634 : return;
181 : }
182 :
183 1303 : outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
184 1303 : if (outbody.data == NULL) {
185 0 : error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
186 0 : if (!NT_STATUS_IS_OK(error)) {
187 0 : smbd_server_connection_terminate(smb2req->xconn,
188 : nt_errstr(error));
189 0 : return;
190 : }
191 0 : return;
192 : }
193 :
194 1303 : SSVAL(outbody.data, 0x00, 0x04); /* struct size */
195 1303 : SSVAL(outbody.data, 0x02, 0); /* reserved */
196 :
197 1303 : error = smbd_smb2_request_done(smb2req, outbody, NULL);
198 1303 : if (!NT_STATUS_IS_OK(error)) {
199 0 : smbd_server_connection_terminate(smb2req->xconn,
200 : nt_errstr(error));
201 0 : return;
202 : }
203 : }
204 :
205 : static void smbd_smb2_lock_cleanup(struct tevent_req *req,
206 : enum tevent_req_state req_state);
207 : static void smbd_smb2_lock_try(struct tevent_req *req);
208 : static void smbd_smb2_lock_retry(struct tevent_req *subreq);
209 : static bool smbd_smb2_lock_cancel(struct tevent_req *req);
210 :
211 1939 : static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
212 : struct tevent_context *ev,
213 : struct smbd_smb2_request *smb2req,
214 : struct files_struct *fsp,
215 : uint32_t in_lock_sequence,
216 : uint16_t in_lock_count,
217 : struct smbd_smb2_lock_element *in_locks)
218 : {
219 6 : struct tevent_req *req;
220 6 : struct smbd_smb2_lock_state *state;
221 1939 : bool isunlock = false;
222 6 : uint16_t i;
223 6 : struct smbd_lock_element *locks;
224 6 : NTSTATUS status;
225 1939 : bool check_lock_sequence = false;
226 1939 : uint32_t lock_sequence_bucket = 0;
227 :
228 1939 : req = tevent_req_create(mem_ctx, &state,
229 : struct smbd_smb2_lock_state);
230 1939 : if (req == NULL) {
231 0 : return NULL;
232 : }
233 1939 : state->ev = ev;
234 1939 : state->fsp = fsp;
235 1939 : state->smb2req = smb2req;
236 1939 : smb2req->subreq = req; /* So we can find this when going async. */
237 :
238 1939 : tevent_req_set_cleanup_fn(req, smbd_smb2_lock_cleanup);
239 :
240 1939 : state->smb1req = smbd_smb2_fake_smb_request(smb2req, fsp);
241 1939 : if (tevent_req_nomem(state->smb1req, req)) {
242 0 : return tevent_req_post(req, ev);
243 : }
244 :
245 1939 : DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
246 : fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
247 :
248 : /*
249 : * Windows sets check_lock_sequence = true
250 : * only for resilient and persistent handles.
251 : *
252 : * [MS-SMB2] 3.3.5.14 Receiving an SMB2 LOCK Request
253 : *
254 : * ... if Open.IsResilient or Open.IsDurable or Open.IsPersistent is
255 : * TRUE or if Connection.Dialect belongs to the SMB 3.x dialect family
256 : * and Connection.ServerCapabilities includes
257 : * SMB2_GLOBAL_CAP_MULTI_CHANNEL bit, the server SHOULD<314>
258 : * perform lock sequence * verification ...
259 :
260 : * <314> Section 3.3.5.14: Windows 7 and Windows Server 2008 R2 perform
261 : * lock sequence verification only when Open.IsResilient is TRUE.
262 : * Windows 8 through Windows 10 v1909 and Windows Server 2012 through
263 : * Windows Server v1909 perform lock sequence verification only when
264 : * Open.IsResilient or Open.IsPersistent is TRUE.
265 : *
266 : * Note <314> also applies to all versions (at least) up to
267 : * Windows Server v2004.
268 : *
269 : * Hopefully this will be fixed in future Windows versions and they
270 : * will avoid Note <314>.
271 : *
272 : * We implement what the specification says by default, but
273 : * allow "smb2 disable lock sequence checking = yes" to
274 : * behave like Windows again.
275 : *
276 : * Note: that we already check the dialect before setting
277 : * SMB2_CAP_MULTI_CHANNEL in smb2_negprot.c
278 : */
279 1939 : if (smb2req->xconn->smb2.server.capabilities & SMB2_CAP_MULTI_CHANNEL) {
280 1939 : check_lock_sequence = true;
281 : }
282 1939 : if (fsp->op->global->durable) {
283 112 : check_lock_sequence = true;
284 : }
285 :
286 1939 : if (check_lock_sequence) {
287 6 : bool disable_lock_sequence_checking =
288 1939 : lp_smb2_disable_lock_sequence_checking();
289 :
290 1939 : if (disable_lock_sequence_checking) {
291 0 : check_lock_sequence = false;
292 : }
293 : }
294 :
295 1939 : if (check_lock_sequence) {
296 1939 : state->lock_sequence_value = in_lock_sequence & 0xF;
297 1939 : lock_sequence_bucket = in_lock_sequence >> 4;
298 : }
299 1939 : if ((lock_sequence_bucket > 0) &&
300 : (lock_sequence_bucket <= sizeof(fsp->op->global->lock_sequence_array)))
301 : {
302 144 : uint32_t idx = lock_sequence_bucket - 1;
303 144 : uint8_t *array = fsp->op->global->lock_sequence_array;
304 :
305 144 : state->lock_sequence_element = &array[idx];
306 : }
307 :
308 1939 : if (state->lock_sequence_element != NULL) {
309 : /*
310 : * The incoming 'state->lock_sequence_value' is masked with 0xF.
311 : *
312 : * Note per default '*state->lock_sequence_element'
313 : * is invalid, a value of 0xFF that can never match on
314 : * incoming value.
315 : */
316 144 : if (*state->lock_sequence_element == state->lock_sequence_value)
317 : {
318 64 : DBG_INFO("replayed smb2 lock request detected: "
319 : "file %s, value %u, bucket %u\n",
320 : fsp_str_dbg(fsp),
321 : (unsigned)state->lock_sequence_value,
322 : (unsigned)lock_sequence_bucket);
323 64 : tevent_req_done(req);
324 64 : return tevent_req_post(req, ev);
325 : }
326 : /*
327 : * If it's not a replay, mark the element as
328 : * invalid again.
329 : */
330 80 : *state->lock_sequence_element = 0xFF;
331 : }
332 :
333 1875 : locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
334 1875 : if (locks == NULL) {
335 0 : tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
336 0 : return tevent_req_post(req, ev);
337 : }
338 :
339 1875 : switch (in_locks[0].flags) {
340 134 : case SMB2_LOCK_FLAG_SHARED:
341 : case SMB2_LOCK_FLAG_EXCLUSIVE:
342 134 : if (in_lock_count > 1) {
343 0 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
344 0 : return tevent_req_post(req, ev);
345 : }
346 134 : state->blocking = true;
347 134 : break;
348 :
349 1029 : case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
350 : case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
351 1029 : break;
352 :
353 674 : case SMB2_LOCK_FLAG_UNLOCK:
354 : /* only the first lock gives the UNLOCK bit - see
355 : MS-SMB2 3.3.5.14 */
356 674 : isunlock = true;
357 674 : break;
358 :
359 36 : default:
360 36 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
361 36 : return tevent_req_post(req, ev);
362 : }
363 :
364 1839 : if (!isunlock && (in_lock_count > 1)) {
365 :
366 : /*
367 : * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
368 : * have more than one lock and one of those is blocking.
369 : */
370 :
371 52 : for (i=0; i<in_lock_count; i++) {
372 40 : uint32_t flags = in_locks[i].flags;
373 :
374 40 : if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
375 8 : tevent_req_nterror(
376 : req, NT_STATUS_INVALID_PARAMETER);
377 8 : return tevent_req_post(req, ev);
378 : }
379 : }
380 : }
381 :
382 3714 : for (i=0; i<in_lock_count; i++) {
383 1883 : bool invalid = false;
384 1883 : bool posix_handle =(fsp->posix_flags & FSP_POSIX_FLAGS_OPEN);
385 :
386 1883 : switch (in_locks[i].flags) {
387 134 : case SMB2_LOCK_FLAG_SHARED:
388 : case SMB2_LOCK_FLAG_EXCLUSIVE:
389 134 : if (isunlock) {
390 0 : invalid = true;
391 0 : break;
392 : }
393 134 : break;
394 :
395 1043 : case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
396 : case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
397 1043 : if (isunlock) {
398 8 : invalid = true;
399 : }
400 1041 : break;
401 :
402 702 : case SMB2_LOCK_FLAG_UNLOCK:
403 702 : if (!isunlock) {
404 0 : tevent_req_nterror(req,
405 : NT_STATUS_INVALID_PARAMETER);
406 0 : return tevent_req_post(req, ev);
407 : }
408 698 : break;
409 :
410 4 : default:
411 4 : if (isunlock) {
412 : /*
413 : * If the first element was a UNLOCK
414 : * we need to defer the error response
415 : * to the backend, because we need to process
416 : * all unlock elements before
417 : */
418 4 : invalid = true;
419 4 : break;
420 : }
421 0 : tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
422 0 : return tevent_req_post(req, ev);
423 : }
424 :
425 1883 : locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
426 1883 : locks[i].smblctx = fsp->op->global->open_persistent_id;
427 1883 : locks[i].offset = in_locks[i].offset;
428 1883 : locks[i].count = in_locks[i].length;
429 :
430 1883 : if (posix_handle) {
431 0 : locks[i].lock_flav = POSIX_LOCK;
432 : } else {
433 1883 : locks[i].lock_flav = WINDOWS_LOCK;
434 : }
435 :
436 1883 : if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
437 1045 : if (posix_handle && fsp->fsp_flags.can_write == false) {
438 : /*
439 : * Can't get a write lock on a posix
440 : * read-only handle.
441 : */
442 0 : DBG_INFO("POSIX write lock requested "
443 : "on read-only handle for file %s\n",
444 : fsp_str_dbg(fsp));
445 0 : tevent_req_nterror(req,
446 : NT_STATUS_INVALID_HANDLE);
447 0 : return tevent_req_post(req, ev);
448 : }
449 1045 : locks[i].brltype = WRITE_LOCK;
450 838 : } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
451 132 : locks[i].brltype = READ_LOCK;
452 706 : } else if (invalid) {
453 : /*
454 : * this is an invalid UNLOCK element
455 : * and the backend needs to test for
456 : * brltype != UNLOCK_LOCK and return
457 : * NT_STATUS_INVALID_PARAMETER
458 : */
459 4 : locks[i].brltype = READ_LOCK;
460 : } else {
461 702 : locks[i].brltype = UNLOCK_LOCK;
462 : }
463 1883 : locks[i].lock_flav = WINDOWS_LOCK;
464 :
465 1883 : DBG_DEBUG("index %"PRIu16" offset=%"PRIu64", count=%"PRIu64", "
466 : "smblctx = %"PRIu64" type %d\n",
467 : i,
468 : locks[i].offset,
469 : locks[i].count,
470 : locks[i].smblctx,
471 : (int)locks[i].brltype);
472 : }
473 :
474 1831 : state->locks = locks;
475 1831 : state->lock_count = in_lock_count;
476 :
477 1831 : if (isunlock) {
478 674 : status = smbd_do_unlocking(
479 670 : state->smb1req, fsp, in_lock_count, locks);
480 :
481 674 : if (tevent_req_nterror(req, status)) {
482 126 : return tevent_req_post(req, ev);
483 : }
484 548 : tevent_req_done(req);
485 548 : return tevent_req_post(req, ev);
486 : }
487 :
488 1157 : smbd_smb2_lock_try(req);
489 1157 : if (!tevent_req_is_in_progress(req)) {
490 1121 : return tevent_req_post(req, ev);
491 : }
492 :
493 36 : tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
494 36 : aio_add_req_to_fsp(state->fsp, req);
495 36 : tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
496 :
497 36 : return req;
498 : }
499 :
500 3878 : static void smbd_smb2_lock_cleanup(struct tevent_req *req,
501 : enum tevent_req_state req_state)
502 : {
503 3878 : struct smbd_smb2_lock_state *state = tevent_req_data(
504 : req, struct smbd_smb2_lock_state);
505 :
506 3878 : if (req_state != TEVENT_REQ_DONE) {
507 2567 : return;
508 : }
509 :
510 1303 : if (state->lock_sequence_element != NULL) {
511 : /*
512 : * On success we remember the given/incoming
513 : * value (which was masked with 0xF.
514 : */
515 84 : *state->lock_sequence_element = state->lock_sequence_value;
516 : }
517 : }
518 :
519 4 : static void smbd_smb2_lock_update_retry_msecs(
520 : struct smbd_smb2_lock_state *state)
521 : {
522 : /*
523 : * The default lp_lock_spin_time() is 200ms,
524 : * we just use half of it to trigger the first retry.
525 : *
526 : * v_min is in the range of 0.001 to 10 secs
527 : * (0.1 secs by default)
528 : *
529 : * v_max is in the range of 0.01 to 100 secs
530 : * (1.0 secs by default)
531 : *
532 : * The typical steps are:
533 : * 0.1, 0.2, 0.3, 0.4, ... 1.0
534 : */
535 4 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
536 4 : uint32_t v_max = 10 * v_min;
537 :
538 4 : if (state->retry_msecs >= v_max) {
539 0 : state->retry_msecs = v_max;
540 0 : return;
541 : }
542 :
543 4 : state->retry_msecs += v_min;
544 : }
545 :
546 4 : static void smbd_smb2_lock_update_polling_msecs(
547 : struct smbd_smb2_lock_state *state)
548 : {
549 : /*
550 : * The default lp_lock_spin_time() is 200ms.
551 : *
552 : * v_min is in the range of 0.002 to 20 secs
553 : * (0.2 secs by default)
554 : *
555 : * v_max is in the range of 0.02 to 200 secs
556 : * (2.0 secs by default)
557 : *
558 : * The typical steps are:
559 : * 0.2, 0.4, 0.6, 0.8, ... 2.0
560 : */
561 4 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
562 4 : uint32_t v_max = 10 * v_min;
563 :
564 4 : if (state->polling_msecs >= v_max) {
565 0 : state->polling_msecs = v_max;
566 0 : return;
567 : }
568 :
569 4 : state->polling_msecs += v_min;
570 : }
571 :
572 1173 : static void smbd_smb2_lock_try(struct tevent_req *req)
573 : {
574 1173 : struct smbd_smb2_lock_state *state = tevent_req_data(
575 : req, struct smbd_smb2_lock_state);
576 1173 : struct share_mode_lock *lck = NULL;
577 2 : uint16_t blocker_idx;
578 1173 : struct server_id blocking_pid = { 0 };
579 2 : uint64_t blocking_smblctx;
580 2 : NTSTATUS status;
581 1173 : struct tevent_req *subreq = NULL;
582 1173 : struct timeval endtime = { 0 };
583 :
584 1173 : lck = get_existing_share_mode_lock(
585 1173 : talloc_tos(), state->fsp->file_id);
586 1173 : if (tevent_req_nomem(lck, req)) {
587 1137 : return;
588 : }
589 :
590 1175 : status = smbd_do_locks_try(
591 : state->fsp,
592 1173 : state->lock_count,
593 : state->locks,
594 : &blocker_idx,
595 : &blocking_pid,
596 : &blocking_smblctx);
597 1173 : if (NT_STATUS_IS_OK(status)) {
598 691 : TALLOC_FREE(lck);
599 691 : tevent_req_done(req);
600 691 : return;
601 : }
602 482 : if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
603 : /*
604 : * We got NT_STATUS_RETRY,
605 : * we reset polling_msecs so that
606 : * that the retries based on LOCK_NOT_GRANTED
607 : * will later start with small intervals again.
608 : */
609 8 : state->polling_msecs = 0;
610 :
611 : /*
612 : * The backend wasn't able to decide yet.
613 : * We need to wait even for non-blocking
614 : * locks.
615 : *
616 : * The backend uses blocking_smblctx == UINT64_MAX
617 : * to indicate that we should use retry timers.
618 : *
619 : * It uses blocking_smblctx == 0 to indicate
620 : * it will use share_mode_wakeup_waiters()
621 : * to wake us. Note that unrelated changes in
622 : * locking.tdb may cause retries.
623 : */
624 :
625 8 : if (blocking_smblctx != UINT64_MAX) {
626 4 : SMB_ASSERT(blocking_smblctx == 0);
627 8 : goto setup_retry;
628 : }
629 :
630 4 : smbd_smb2_lock_update_retry_msecs(state);
631 :
632 4 : DBG_DEBUG("Waiting for a backend decision. "
633 : "Retry in %"PRIu32" msecs\n",
634 : state->retry_msecs);
635 :
636 : /*
637 : * We completely ignore state->endtime here
638 : * we we'll wait for a backend decision forever.
639 : * If the backend is smart enough to implement
640 : * some NT_STATUS_RETRY logic, it has to
641 : * switch to any other status after in order
642 : * to avoid waiting forever.
643 : */
644 4 : endtime = timeval_current_ofs_msec(state->retry_msecs);
645 4 : goto setup_retry;
646 : }
647 474 : if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
648 : /*
649 : * This is a bug and will be changed into an assert
650 : * in future version. We should only
651 : * ever get NT_STATUS_LOCK_NOT_GRANTED here!
652 : */
653 0 : static uint64_t _bug_count;
654 0 : int _level = (_bug_count++ == 0) ? DBGLVL_ERR: DBGLVL_DEBUG;
655 0 : DBG_PREFIX(_level, ("BUG: Got %s mapping to "
656 : "NT_STATUS_LOCK_NOT_GRANTED\n",
657 : nt_errstr(status)));
658 0 : status = NT_STATUS_LOCK_NOT_GRANTED;
659 : }
660 474 : if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
661 12 : TALLOC_FREE(lck);
662 12 : tevent_req_nterror(req, status);
663 12 : return;
664 : }
665 : /*
666 : * We got LOCK_NOT_GRANTED, make sure
667 : * a following STATUS_RETRY will start
668 : * with short intervals again.
669 : */
670 462 : state->retry_msecs = 0;
671 :
672 462 : if (!state->blocking) {
673 434 : TALLOC_FREE(lck);
674 434 : tevent_req_nterror(req, status);
675 434 : return;
676 : }
677 :
678 28 : if (blocking_smblctx == UINT64_MAX) {
679 4 : smbd_smb2_lock_update_polling_msecs(state);
680 :
681 4 : DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
682 : state->polling_msecs);
683 :
684 4 : endtime = timeval_current_ofs_msec(state->polling_msecs);
685 : }
686 :
687 24 : setup_retry:
688 36 : DBG_DEBUG("Watching share mode lock\n");
689 :
690 36 : subreq = share_mode_watch_send(
691 : state, state->ev, lck, blocking_pid);
692 36 : TALLOC_FREE(lck);
693 36 : if (tevent_req_nomem(subreq, req)) {
694 0 : return;
695 : }
696 36 : tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
697 :
698 36 : if (!timeval_is_zero(&endtime)) {
699 0 : bool ok;
700 :
701 8 : ok = tevent_req_set_endtime(subreq,
702 : state->ev,
703 : endtime);
704 8 : if (!ok) {
705 0 : tevent_req_oom(req);
706 0 : return;
707 : }
708 : }
709 : }
710 :
711 16 : static void smbd_smb2_lock_retry(struct tevent_req *subreq)
712 : {
713 16 : struct tevent_req *req = tevent_req_callback_data(
714 : subreq, struct tevent_req);
715 16 : struct smbd_smb2_lock_state *state = tevent_req_data(
716 : req, struct smbd_smb2_lock_state);
717 0 : NTSTATUS status;
718 0 : bool ok;
719 :
720 : /*
721 : * Make sure we run as the user again
722 : */
723 16 : ok = change_to_user_and_service_by_fsp(state->fsp);
724 16 : if (!ok) {
725 0 : tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
726 0 : return;
727 : }
728 :
729 16 : status = share_mode_watch_recv(subreq, NULL, NULL);
730 16 : TALLOC_FREE(subreq);
731 16 : if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
732 : /*
733 : * This is just a trigger for a timed retry.
734 : */
735 8 : status = NT_STATUS_OK;
736 : }
737 16 : if (tevent_req_nterror(req, status)) {
738 0 : return;
739 : }
740 :
741 16 : smbd_smb2_lock_try(req);
742 : }
743 :
744 1939 : static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
745 : {
746 1939 : return tevent_req_simple_recv_ntstatus(req);
747 : }
748 :
749 : /****************************************************************
750 : Cancel an outstanding blocking lock request.
751 : *****************************************************************/
752 :
753 24 : static bool smbd_smb2_lock_cancel(struct tevent_req *req)
754 : {
755 24 : struct smbd_smb2_request *smb2req = NULL;
756 24 : struct smbd_smb2_lock_state *state = tevent_req_data(req,
757 : struct smbd_smb2_lock_state);
758 24 : if (!state) {
759 0 : return false;
760 : }
761 :
762 24 : if (!state->smb2req) {
763 0 : return false;
764 : }
765 :
766 24 : smb2req = state->smb2req;
767 :
768 : /*
769 : * If the request is canceled because of close, logoff or tdis
770 : * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
771 : * NT_STATUS_CANCELLED.
772 : */
773 24 : if (state->fsp->fsp_flags.closing ||
774 16 : !NT_STATUS_IS_OK(smb2req->session->status) ||
775 12 : !NT_STATUS_IS_OK(smb2req->tcon->status)) {
776 16 : tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
777 16 : return true;
778 : }
779 :
780 8 : tevent_req_nterror(req, NT_STATUS_CANCELLED);
781 8 : return true;
782 : }
|