Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : Blocking Locking functions
4 : Copyright (C) Jeremy Allison 1998-2003
5 :
6 : This program is free software; you can redistribute it and/or modify
7 : it under the terms of the GNU General Public License as published by
8 : the Free Software Foundation; either version 3 of the License, or
9 : (at your option) any later version.
10 :
11 : This program is distributed in the hope that it will be useful,
12 : but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : GNU General Public License for more details.
15 :
16 : You should have received a copy of the GNU General Public License
17 : along with this program. If not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : #include "includes.h"
21 : #include "locking/share_mode_lock.h"
22 : #include "smbd/smbd.h"
23 : #include "smbd/globals.h"
24 : #include "messages.h"
25 : #include "lib/util/tevent_ntstatus.h"
26 : #include "lib/dbwrap/dbwrap_watch.h"
27 : #include "librpc/gen_ndr/ndr_open_files.h"
28 :
29 : #undef DBGC_CLASS
30 : #define DBGC_CLASS DBGC_LOCKING
31 :
32 5803 : NTSTATUS smbd_do_locks_try(
33 : struct files_struct *fsp,
34 : uint16_t num_locks,
35 : struct smbd_lock_element *locks,
36 : uint16_t *blocker_idx,
37 : struct server_id *blocking_pid,
38 : uint64_t *blocking_smblctx)
39 : {
40 5803 : NTSTATUS status = NT_STATUS_OK;
41 26 : uint16_t i;
42 :
43 8520 : for (i=0; i<num_locks; i++) {
44 5869 : struct smbd_lock_element *e = &locks[i];
45 :
46 5895 : status = do_lock(
47 : fsp,
48 : locks, /* req_mem_ctx */
49 5869 : &e->req_guid,
50 : e->smblctx,
51 : e->count,
52 : e->offset,
53 : e->brltype,
54 : e->lock_flav,
55 : blocking_pid,
56 : blocking_smblctx);
57 5869 : if (!NT_STATUS_IS_OK(status)) {
58 3144 : break;
59 : }
60 : }
61 :
62 5803 : if (NT_STATUS_IS_OK(status)) {
63 2651 : return NT_STATUS_OK;
64 : }
65 :
66 3152 : *blocker_idx = i;
67 :
68 : /*
69 : * Undo the locks we successfully got
70 : */
71 3184 : for (i = i-1; i != UINT16_MAX; i--) {
72 32 : struct smbd_lock_element *e = &locks[i];
73 32 : do_unlock(fsp,
74 : e->smblctx,
75 : e->count,
76 : e->offset,
77 : e->lock_flav);
78 : }
79 :
80 3152 : return status;
81 : }
82 :
83 492 : static bool smbd_smb1_fsp_add_blocked_lock_req(
84 : struct files_struct *fsp, struct tevent_req *req)
85 : {
86 492 : size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
87 492 : struct tevent_req **tmp = NULL;
88 :
89 492 : tmp = talloc_realloc(
90 : fsp,
91 : fsp->blocked_smb1_lock_reqs,
92 : struct tevent_req *,
93 : num_reqs+1);
94 492 : if (tmp == NULL) {
95 0 : return false;
96 : }
97 492 : fsp->blocked_smb1_lock_reqs = tmp;
98 492 : fsp->blocked_smb1_lock_reqs[num_reqs] = req;
99 492 : return true;
100 : }
101 :
102 : struct smbd_smb1_do_locks_state {
103 : struct tevent_context *ev;
104 : struct smb_request *smbreq;
105 : struct files_struct *fsp;
106 : uint32_t timeout;
107 : uint32_t polling_msecs;
108 : uint32_t retry_msecs;
109 : struct timeval endtime;
110 : bool large_offset; /* required for correct cancel */
111 : uint16_t num_locks;
112 : struct smbd_lock_element *locks;
113 : uint16_t blocker;
114 : NTSTATUS deny_status;
115 : };
116 :
117 : static void smbd_smb1_do_locks_try(struct tevent_req *req);
118 : static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
119 : static void smbd_smb1_blocked_locks_cleanup(
120 : struct tevent_req *req, enum tevent_req_state req_state);
121 : static NTSTATUS smbd_smb1_do_locks_check(
122 : struct files_struct *fsp,
123 : uint16_t num_locks,
124 : struct smbd_lock_element *locks,
125 : uint16_t *blocker_idx,
126 : struct server_id *blocking_pid,
127 : uint64_t *blocking_smblctx);
128 :
129 2732 : static void smbd_smb1_do_locks_setup_timeout(
130 : struct smbd_smb1_do_locks_state *state,
131 : const struct smbd_lock_element *blocker)
132 : {
133 2732 : struct files_struct *fsp = state->fsp;
134 :
135 2732 : if (!timeval_is_zero(&state->endtime)) {
136 : /*
137 : * already done
138 : */
139 478 : return;
140 : }
141 :
142 2252 : if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
143 : /*
144 : * Windows internal resolution for blocking locks
145 : * seems to be about 200ms... Don't wait for less than
146 : * that. JRA.
147 : */
148 214 : state->timeout = MAX(state->timeout, lp_lock_spin_time());
149 : }
150 :
151 2252 : if (state->timeout != 0) {
152 228 : goto set_endtime;
153 : }
154 :
155 2024 : if (blocker == NULL) {
156 0 : goto set_endtime;
157 : }
158 :
159 2024 : if ((blocker->offset >= 0xEF000000) &&
160 128 : ((blocker->offset >> 63) == 0)) {
161 : /*
162 : * This must be an optimization of an ancient
163 : * application bug...
164 : */
165 126 : state->timeout = lp_lock_spin_time();
166 : }
167 :
168 2024 : if (fsp->fsp_flags.lock_failure_seen &&
169 1895 : (blocker->offset == fsp->lock_failure_offset)) {
170 : /*
171 : * Delay repeated lock attempts on the same
172 : * lock. Maybe a more advanced version of the
173 : * above check?
174 : */
175 134 : DBG_DEBUG("Delaying lock request due to previous "
176 : "failure\n");
177 134 : state->timeout = lp_lock_spin_time();
178 : }
179 :
180 1890 : set_endtime:
181 : /*
182 : * Note state->timeout might still 0,
183 : * but that's ok, as we don't want to retry
184 : * in that case.
185 : */
186 2252 : state->endtime = timeval_add(&state->smbreq->request_time,
187 2246 : state->timeout / 1000,
188 2252 : (state->timeout % 1000) * 1000);
189 : }
190 :
191 10 : static void smbd_smb1_do_locks_update_retry_msecs(
192 : struct smbd_smb1_do_locks_state *state)
193 : {
194 : /*
195 : * The default lp_lock_spin_time() is 200ms,
196 : * we just use half of it to trigger the first retry.
197 : *
198 : * v_min is in the range of 0.001 to 10 secs
199 : * (0.1 secs by default)
200 : *
201 : * v_max is in the range of 0.01 to 100 secs
202 : * (1.0 secs by default)
203 : *
204 : * The typical steps are:
205 : * 0.1, 0.2, 0.3, 0.4, ... 1.0
206 : */
207 10 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
208 10 : uint32_t v_max = 10 * v_min;
209 :
210 10 : if (state->retry_msecs >= v_max) {
211 0 : state->retry_msecs = v_max;
212 0 : return;
213 : }
214 :
215 10 : state->retry_msecs += v_min;
216 : }
217 :
218 82 : static void smbd_smb1_do_locks_update_polling_msecs(
219 : struct smbd_smb1_do_locks_state *state)
220 : {
221 : /*
222 : * The default lp_lock_spin_time() is 200ms.
223 : *
224 : * v_min is in the range of 0.002 to 20 secs
225 : * (0.2 secs by default)
226 : *
227 : * v_max is in the range of 0.02 to 200 secs
228 : * (2.0 secs by default)
229 : *
230 : * The typical steps are:
231 : * 0.2, 0.4, 0.6, 0.8, ... 2.0
232 : */
233 82 : uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
234 82 : uint32_t v_max = 10 * v_min;
235 :
236 82 : if (state->polling_msecs >= v_max) {
237 0 : state->polling_msecs = v_max;
238 0 : return;
239 : }
240 :
241 82 : state->polling_msecs += v_min;
242 : }
243 :
244 5667 : struct tevent_req *smbd_smb1_do_locks_send(
245 : TALLOC_CTX *mem_ctx,
246 : struct tevent_context *ev,
247 : struct smb_request **smbreq, /* talloc_move()d into our state */
248 : struct files_struct *fsp,
249 : uint32_t lock_timeout,
250 : bool large_offset,
251 : uint16_t num_locks,
252 : struct smbd_lock_element *locks)
253 : {
254 5667 : struct tevent_req *req = NULL;
255 5667 : struct smbd_smb1_do_locks_state *state = NULL;
256 27 : bool ok;
257 :
258 5667 : req = tevent_req_create(
259 : mem_ctx, &state, struct smbd_smb1_do_locks_state);
260 5667 : if (req == NULL) {
261 0 : return NULL;
262 : }
263 5667 : state->ev = ev;
264 5667 : state->smbreq = talloc_move(state, smbreq);
265 5667 : state->fsp = fsp;
266 5667 : state->timeout = lock_timeout;
267 5667 : state->large_offset = large_offset;
268 5667 : state->num_locks = num_locks;
269 5667 : state->locks = locks;
270 5667 : state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
271 :
272 5667 : DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
273 :
274 5667 : if (num_locks == 0 || locks == NULL) {
275 1499 : DBG_DEBUG("no locks\n");
276 1499 : tevent_req_done(req);
277 1499 : return tevent_req_post(req, ev);
278 : }
279 :
280 4168 : if (state->locks[0].lock_flav == POSIX_LOCK) {
281 : /*
282 : * SMB1 posix locks always use
283 : * NT_STATUS_FILE_LOCK_CONFLICT.
284 : */
285 28 : state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
286 : }
287 :
288 4168 : smbd_smb1_do_locks_try(req);
289 4168 : if (!tevent_req_is_in_progress(req)) {
290 3676 : return tevent_req_post(req, ev);
291 : }
292 :
293 492 : ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
294 492 : if (!ok) {
295 0 : tevent_req_oom(req);
296 0 : return tevent_req_post(req, ev);
297 : }
298 492 : tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
299 492 : return req;
300 : }
301 :
302 984 : static void smbd_smb1_blocked_locks_cleanup(
303 : struct tevent_req *req, enum tevent_req_state req_state)
304 : {
305 984 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
306 : req, struct smbd_smb1_do_locks_state);
307 984 : struct files_struct *fsp = state->fsp;
308 984 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
309 984 : size_t num_blocked = talloc_array_length(blocked);
310 4 : size_t i;
311 :
312 984 : DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
313 : req,
314 : state,
315 : (int)req_state);
316 :
317 984 : if (req_state == TEVENT_REQ_RECEIVED) {
318 492 : DBG_DEBUG("already received\n");
319 492 : return;
320 : }
321 :
322 548 : for (i=0; i<num_blocked; i++) {
323 548 : if (blocked[i] == req) {
324 490 : break;
325 : }
326 : }
327 492 : SMB_ASSERT(i<num_blocked);
328 :
329 492 : ARRAY_DEL_ELEMENT(blocked, i, num_blocked);
330 :
331 492 : fsp->blocked_smb1_lock_reqs = talloc_realloc(
332 : fsp, blocked, struct tevent_req *, num_blocked-1);
333 : }
334 :
335 146 : static NTSTATUS smbd_smb1_do_locks_check_blocked(
336 : uint16_t num_blocked,
337 : struct smbd_lock_element *blocked,
338 : uint16_t num_locks,
339 : struct smbd_lock_element *locks,
340 : uint16_t *blocker_idx,
341 : uint64_t *blocking_smblctx)
342 : {
343 0 : uint16_t li;
344 :
345 206 : for (li=0; li < num_locks; li++) {
346 146 : struct smbd_lock_element *l = &locks[li];
347 0 : uint16_t bi;
348 0 : bool valid;
349 :
350 146 : valid = byte_range_valid(l->offset, l->count);
351 146 : if (!valid) {
352 0 : return NT_STATUS_INVALID_LOCK_RANGE;
353 : }
354 :
355 242 : for (bi = 0; bi < num_blocked; bi++) {
356 182 : struct smbd_lock_element *b = &blocked[li];
357 0 : bool overlap;
358 :
359 : /* Read locks never conflict. */
360 182 : if (l->brltype == READ_LOCK && b->brltype == READ_LOCK) {
361 96 : continue;
362 : }
363 :
364 174 : overlap = byte_range_overlap(l->offset,
365 : l->count,
366 : b->offset,
367 : b->count);
368 174 : if (!overlap) {
369 88 : continue;
370 : }
371 :
372 86 : *blocker_idx = li;
373 86 : *blocking_smblctx = b->smblctx;
374 86 : return NT_STATUS_LOCK_NOT_GRANTED;
375 : }
376 : }
377 :
378 60 : return NT_STATUS_OK;
379 : }
380 :
381 4716 : static NTSTATUS smbd_smb1_do_locks_check(
382 : struct files_struct *fsp,
383 : uint16_t num_locks,
384 : struct smbd_lock_element *locks,
385 : uint16_t *blocker_idx,
386 : struct server_id *blocking_pid,
387 : uint64_t *blocking_smblctx)
388 : {
389 4716 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
390 4716 : size_t num_blocked = talloc_array_length(blocked);
391 24 : NTSTATUS status;
392 24 : size_t bi;
393 :
394 : /*
395 : * We check the pending/blocked requests
396 : * from the oldest to the youngest request.
397 : *
398 : * Note due to the retry logic the current request
399 : * might already be in the list.
400 : */
401 :
402 4776 : for (bi = 0; bi < num_blocked; bi++) {
403 2 : struct smbd_smb1_do_locks_state *blocked_state =
404 654 : tevent_req_data(blocked[bi],
405 : struct smbd_smb1_do_locks_state);
406 :
407 654 : if (blocked_state->locks == locks) {
408 508 : SMB_ASSERT(blocked_state->num_locks == num_locks);
409 :
410 : /*
411 : * We found ourself...
412 : */
413 506 : break;
414 : }
415 :
416 146 : status = smbd_smb1_do_locks_check_blocked(
417 146 : blocked_state->num_locks,
418 : blocked_state->locks,
419 : num_locks,
420 : locks,
421 : blocker_idx,
422 : blocking_smblctx);
423 146 : if (!NT_STATUS_IS_OK(status)) {
424 86 : *blocking_pid = messaging_server_id(
425 86 : fsp->conn->sconn->msg_ctx);
426 86 : return status;
427 : }
428 : }
429 :
430 4630 : status = smbd_do_locks_try(
431 : fsp,
432 : num_locks,
433 : locks,
434 : blocker_idx,
435 : blocking_pid,
436 : blocking_smblctx);
437 4630 : if (!NT_STATUS_IS_OK(status)) {
438 2670 : return status;
439 : }
440 :
441 1960 : return NT_STATUS_OK;
442 : }
443 :
444 4716 : static void smbd_smb1_do_locks_try(struct tevent_req *req)
445 : {
446 4716 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
447 : req, struct smbd_smb1_do_locks_state);
448 4716 : struct files_struct *fsp = state->fsp;
449 24 : struct share_mode_lock *lck;
450 4716 : struct timeval endtime = { 0 };
451 4716 : struct server_id blocking_pid = { 0 };
452 4716 : uint64_t blocking_smblctx = 0;
453 4716 : struct tevent_req *subreq = NULL;
454 24 : NTSTATUS status;
455 24 : bool ok;
456 24 : bool expired;
457 :
458 4716 : lck = get_existing_share_mode_lock(state, fsp->file_id);
459 4716 : if (tevent_req_nomem(lck, req)) {
460 0 : DBG_DEBUG("Could not get share mode lock\n");
461 612 : return;
462 : }
463 :
464 4740 : status = smbd_smb1_do_locks_check(
465 : fsp,
466 4716 : state->num_locks,
467 : state->locks,
468 : &state->blocker,
469 : &blocking_pid,
470 : &blocking_smblctx);
471 4716 : if (NT_STATUS_IS_OK(status)) {
472 1960 : goto done;
473 : }
474 2756 : if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
475 : /*
476 : * We got NT_STATUS_RETRY,
477 : * we reset polling_msecs so that
478 : * that the retries based on LOCK_NOT_GRANTED
479 : * will later start with small intervals again.
480 : */
481 22 : state->polling_msecs = 0;
482 :
483 : /*
484 : * The backend wasn't able to decide yet.
485 : * We need to wait even for non-blocking
486 : * locks.
487 : *
488 : * The backend uses blocking_smblctx == UINT64_MAX
489 : * to indicate that we should use retry timers.
490 : *
491 : * It uses blocking_smblctx == 0 to indicate
492 : * it will use share_mode_wakeup_waiters()
493 : * to wake us. Note that unrelated changes in
494 : * locking.tdb may cause retries.
495 : */
496 :
497 22 : if (blocking_smblctx != UINT64_MAX) {
498 12 : SMB_ASSERT(blocking_smblctx == 0);
499 22 : goto setup_retry;
500 : }
501 :
502 10 : smbd_smb1_do_locks_update_retry_msecs(state);
503 :
504 10 : DBG_DEBUG("Waiting for a backend decision. "
505 : "Retry in %"PRIu32" msecs\n",
506 : state->retry_msecs);
507 :
508 : /*
509 : * We completely ignore state->endtime here
510 : * we we'll wait for a backend decision forever.
511 : * If the backend is smart enough to implement
512 : * some NT_STATUS_RETRY logic, it has to
513 : * switch to any other status after in order
514 : * to avoid waiting forever.
515 : */
516 10 : endtime = timeval_current_ofs_msec(state->retry_msecs);
517 10 : goto setup_retry;
518 : }
519 2734 : if (!ERROR_WAS_LOCK_DENIED(status)) {
520 2 : goto done;
521 : }
522 : /*
523 : * We got LOCK_NOT_GRANTED, make sure
524 : * a following STATUS_RETRY will start
525 : * with short intervals again.
526 : */
527 2732 : state->retry_msecs = 0;
528 :
529 2732 : smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
530 2732 : DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
531 : state->timeout,
532 : blocking_smblctx);
533 :
534 : /*
535 : * The client specified timeout expired
536 : * avoid further retries.
537 : *
538 : * Otherwise keep waiting either waiting
539 : * for changes in locking.tdb or the polling
540 : * mode timers waiting for posix locks.
541 : *
542 : * If the endtime is not expired yet,
543 : * it means we'll retry after a timeout.
544 : * In that case we'll have to return
545 : * NT_STATUS_FILE_LOCK_CONFLICT
546 : * instead of NT_STATUS_LOCK_NOT_GRANTED.
547 : */
548 2732 : expired = timeval_expired(&state->endtime);
549 2732 : if (expired) {
550 2142 : status = state->deny_status;
551 2142 : goto done;
552 : }
553 590 : state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
554 :
555 590 : endtime = state->endtime;
556 :
557 590 : if (blocking_smblctx == UINT64_MAX) {
558 0 : struct timeval tmp;
559 :
560 82 : smbd_smb1_do_locks_update_polling_msecs(state);
561 :
562 82 : DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
563 : state->polling_msecs);
564 :
565 82 : tmp = timeval_current_ofs_msec(state->polling_msecs);
566 82 : endtime = timeval_min(&endtime, &tmp);
567 : }
568 :
569 508 : setup_retry:
570 612 : subreq = share_mode_watch_send(
571 : state, state->ev, lck, blocking_pid);
572 612 : if (tevent_req_nomem(subreq, req)) {
573 0 : goto done;
574 : }
575 612 : TALLOC_FREE(lck);
576 612 : tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
577 :
578 612 : if (timeval_is_zero(&endtime)) {
579 12 : return;
580 : }
581 :
582 600 : ok = tevent_req_set_endtime(subreq, state->ev, endtime);
583 600 : if (!ok) {
584 0 : status = NT_STATUS_NO_MEMORY;
585 0 : goto done;
586 : }
587 598 : return;
588 4104 : done:
589 4104 : TALLOC_FREE(lck);
590 4104 : smbd_smb1_brl_finish_by_req(req, status);
591 : }
592 :
593 548 : static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
594 : {
595 548 : struct tevent_req *req = tevent_req_callback_data(
596 : subreq, struct tevent_req);
597 548 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
598 : req, struct smbd_smb1_do_locks_state);
599 2 : NTSTATUS status;
600 2 : bool ok;
601 :
602 : /*
603 : * Make sure we run as the user again
604 : */
605 548 : ok = change_to_user_and_service_by_fsp(state->fsp);
606 548 : if (!ok) {
607 0 : tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
608 0 : return;
609 : }
610 :
611 548 : status = share_mode_watch_recv(subreq, NULL, NULL);
612 548 : TALLOC_FREE(subreq);
613 :
614 548 : DBG_DEBUG("share_mode_watch_recv returned %s\n",
615 : nt_errstr(status));
616 :
617 : /*
618 : * We ignore any errors here, it's most likely
619 : * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
620 : *
621 : * In any case we can just give it a retry.
622 : */
623 :
624 548 : smbd_smb1_do_locks_try(req);
625 : }
626 :
627 5667 : NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
628 : {
629 5667 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
630 : req, struct smbd_smb1_do_locks_state);
631 5667 : NTSTATUS status = NT_STATUS_OK;
632 27 : bool err;
633 :
634 5667 : err = tevent_req_is_nterror(req, &status);
635 :
636 5667 : DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
637 :
638 5667 : if (tevent_req_is_nterror(req, &status)) {
639 2206 : struct files_struct *fsp = state->fsp;
640 2206 : struct smbd_lock_element *blocker =
641 2206 : &state->locks[state->blocker];
642 :
643 2206 : DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
644 : blocker->offset);
645 :
646 2206 : fsp->fsp_flags.lock_failure_seen = true;
647 2206 : fsp->lock_failure_offset = blocker->offset;
648 2206 : return status;
649 : }
650 :
651 3461 : tevent_req_received(req);
652 :
653 3461 : return NT_STATUS_OK;
654 : }
655 :
656 5667 : bool smbd_smb1_do_locks_extract_smbreq(
657 : struct tevent_req *req,
658 : TALLOC_CTX *mem_ctx,
659 : struct smb_request **psmbreq)
660 : {
661 5667 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
662 : req, struct smbd_smb1_do_locks_state);
663 :
664 5667 : DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
665 : req,
666 : state,
667 : state->smbreq);
668 :
669 5667 : if (state->smbreq == NULL) {
670 0 : return false;
671 : }
672 5667 : *psmbreq = talloc_move(mem_ctx, &state->smbreq);
673 5667 : return true;
674 : }
675 :
676 4164 : void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
677 : {
678 4164 : DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
679 :
680 4164 : if (NT_STATUS_IS_OK(status)) {
681 1962 : tevent_req_done(req);
682 : } else {
683 2202 : tevent_req_nterror(req, status);
684 : }
685 4164 : }
686 :
687 1589 : bool smbd_smb1_brl_finish_by_lock(
688 : struct files_struct *fsp,
689 : bool large_offset,
690 : struct smbd_lock_element lock,
691 : NTSTATUS finish_status)
692 : {
693 1589 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
694 1589 : size_t num_blocked = talloc_array_length(blocked);
695 5 : size_t i;
696 :
697 1589 : DBG_DEBUG("num_blocked=%zu\n", num_blocked);
698 :
699 1651 : for (i=0; i<num_blocked; i++) {
700 74 : struct tevent_req *req = blocked[i];
701 74 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
702 : req, struct smbd_smb1_do_locks_state);
703 0 : uint16_t j;
704 :
705 74 : DBG_DEBUG("i=%zu, req=%p\n", i, req);
706 :
707 74 : if (state->large_offset != large_offset) {
708 4 : continue;
709 : }
710 :
711 162 : for (j=0; j<state->num_locks; j++) {
712 104 : struct smbd_lock_element *l = &state->locks[j];
713 :
714 104 : if ((lock.smblctx == l->smblctx) &&
715 14 : (lock.offset == l->offset) &&
716 12 : (lock.count == l->count)) {
717 12 : smbd_smb1_brl_finish_by_req(
718 : req, finish_status);
719 12 : return true;
720 : }
721 : }
722 : }
723 1572 : return false;
724 : }
725 :
726 755 : static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
727 : struct files_struct *fsp, void *private_data)
728 : {
729 755 : struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
730 755 : size_t num_blocked = talloc_array_length(blocked);
731 755 : uint64_t mid = *((uint64_t *)private_data);
732 0 : size_t i;
733 :
734 755 : DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
735 :
736 755 : for (i=0; i<num_blocked; i++) {
737 4 : struct tevent_req *req = blocked[i];
738 4 : struct smbd_smb1_do_locks_state *state = tevent_req_data(
739 : req, struct smbd_smb1_do_locks_state);
740 4 : struct smb_request *smbreq = state->smbreq;
741 :
742 4 : if (smbreq->mid == mid) {
743 4 : tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
744 4 : return fsp;
745 : }
746 : }
747 :
748 751 : return NULL;
749 : }
750 :
751 : /*
752 : * This walks the list of fsps, we store the blocked reqs attached to
753 : * them. It can be expensive, but this is legacy SMB1 and trying to
754 : * remember looking at traces I don't really see many of those calls.
755 : */
756 :
757 67 : bool smbd_smb1_brl_finish_by_mid(
758 : struct smbd_server_connection *sconn, uint64_t mid)
759 : {
760 67 : struct files_struct *found = files_forall(
761 : sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
762 67 : return (found != NULL);
763 : }
|