Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : byte range locking code
4 : Updated to handle range splits/merges.
5 :
6 : Copyright (C) Andrew Tridgell 1992-2000
7 : Copyright (C) Jeremy Allison 1992-2000
8 :
9 : This program is free software; you can redistribute it and/or modify
10 : it under the terms of the GNU General Public License as published by
11 : the Free Software Foundation; either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : This program is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU General Public License for more details.
18 :
19 : You should have received a copy of the GNU General Public License
20 : along with this program. If not, see <http://www.gnu.org/licenses/>.
21 : */
22 :
23 : /* This module implements a tdb based byte range locking service,
24 : replacing the fcntl() based byte range locking previously
25 : used. This allows us to provide the same semantics as NT */
26 :
27 : #include "includes.h"
28 : #include "system/filesys.h"
29 : #include "lib/util/server_id.h"
30 : #include "locking/proto.h"
31 : #include "smbd/globals.h"
32 : #include "dbwrap/dbwrap.h"
33 : #include "dbwrap/dbwrap_open.h"
34 : #include "serverid.h"
35 : #include "messages.h"
36 : #include "util_tdb.h"
37 :
38 : #undef DBGC_CLASS
39 : #define DBGC_CLASS DBGC_LOCKING
40 :
41 : #define ZERO_ZERO 0
42 :
43 : /* The open brlock.tdb database. */
44 :
45 : static struct db_context *brlock_db;
46 :
47 : struct byte_range_lock {
48 : struct files_struct *fsp;
49 : TALLOC_CTX *req_mem_ctx;
50 : const struct GUID *req_guid;
51 : unsigned int num_locks;
52 : bool modified;
53 : struct lock_struct *lock_data;
54 : struct db_record *record;
55 : };
56 :
57 : /****************************************************************************
58 : Debug info at level 10 for lock struct.
59 : ****************************************************************************/
60 :
61 0 : static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
62 : {
63 0 : struct server_id_buf tmp;
64 :
65 0 : DBG_DEBUG("[%u]: smblctx = %"PRIu64", tid = %"PRIu32", pid = %s, "
66 : "start = %"PRIu64", size = %"PRIu64", fnum = %"PRIu64", "
67 : "%s %s\n",
68 : i,
69 : pls->context.smblctx,
70 : pls->context.tid,
71 : server_id_str_buf(pls->context.pid, &tmp),
72 : pls->start,
73 : pls->size,
74 : pls->fnum,
75 : lock_type_name(pls->lock_type),
76 : lock_flav_name(pls->lock_flav));
77 0 : }
78 :
79 349924 : unsigned int brl_num_locks(const struct byte_range_lock *brl)
80 : {
81 349924 : return brl->num_locks;
82 : }
83 :
84 8462 : struct files_struct *brl_fsp(struct byte_range_lock *brl)
85 : {
86 8462 : return brl->fsp;
87 : }
88 :
89 60 : TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl)
90 : {
91 60 : if (brl->req_mem_ctx == NULL) {
92 0 : return talloc_get_type_abort(brl, struct byte_range_lock);
93 : }
94 :
95 60 : return brl->req_mem_ctx;
96 : }
97 :
98 60 : const struct GUID *brl_req_guid(const struct byte_range_lock *brl)
99 : {
100 60 : if (brl->req_guid == NULL) {
101 0 : static const struct GUID brl_zero_req_guid;
102 0 : return &brl_zero_req_guid;
103 : }
104 :
105 60 : return brl->req_guid;
106 : }
107 :
108 : /****************************************************************************
109 : See if two locking contexts are equal.
110 : ****************************************************************************/
111 :
112 6844 : static bool brl_same_context(const struct lock_context *ctx1,
113 : const struct lock_context *ctx2)
114 : {
115 6844 : return (server_id_equal(&ctx1->pid, &ctx2->pid) &&
116 12075 : (ctx1->smblctx == ctx2->smblctx) &&
117 5256 : (ctx1->tid == ctx2->tid));
118 : }
119 :
120 547409 : bool byte_range_valid(uint64_t ofs, uint64_t len)
121 : {
122 547409 : uint64_t max_len = UINT64_MAX - ofs;
123 98 : uint64_t effective_len;
124 :
125 : /*
126 : * [MS-FSA] specifies this:
127 : *
128 : * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
129 : * return STATUS_INVALID_LOCK_RANGE
130 : * }
131 : *
132 : * We avoid integer wrapping and calculate
133 : * max and effective len instead.
134 : */
135 :
136 547409 : if (len == 0) {
137 790 : return true;
138 : }
139 :
140 546615 : effective_len = len - 1;
141 546615 : if (effective_len <= max_len) {
142 546601 : return true;
143 : }
144 :
145 14 : return false;
146 : }
147 :
148 270826 : bool byte_range_overlap(uint64_t ofs1,
149 : uint64_t len1,
150 : uint64_t ofs2,
151 : uint64_t len2)
152 : {
153 36 : uint64_t last1;
154 36 : uint64_t last2;
155 36 : bool valid;
156 :
157 : /*
158 : * This is based on [MS-FSA] 2.1.4.10
159 : * Algorithm for Determining If a Range Access
160 : * Conflicts with Byte-Range Locks
161 : */
162 :
163 : /*
164 : * The {0, 0} range doesn't conflict with any byte-range lock
165 : */
166 270826 : if (ofs1 == 0 && len1 == 0) {
167 98 : return false;
168 : }
169 270728 : if (ofs2 == 0 && len2 == 0) {
170 0 : return false;
171 : }
172 :
173 : /*
174 : * The caller should have checked that the ranges are
175 : * valid. But currently we gracefully handle
176 : * the overflow of a read/write check.
177 : */
178 270728 : valid = byte_range_valid(ofs1, len1);
179 270728 : if (valid) {
180 270728 : last1 = ofs1 + len1 - 1;
181 : } else {
182 0 : last1 = UINT64_MAX;
183 : }
184 270728 : valid = byte_range_valid(ofs2, len2);
185 270728 : if (valid) {
186 270728 : last2 = ofs2 + len2 - 1;
187 : } else {
188 0 : last2 = UINT64_MAX;
189 : }
190 :
191 : /*
192 : * If one range starts after the last
193 : * byte of the other range there's
194 : * no conflict.
195 : */
196 270728 : if (ofs1 > last2) {
197 1872 : return false;
198 : }
199 268848 : if (ofs2 > last1) {
200 265446 : return false;
201 : }
202 :
203 3382 : return true;
204 : }
205 :
206 : /****************************************************************************
207 : See if lck1 and lck2 overlap.
208 : ****************************************************************************/
209 :
210 270652 : static bool brl_overlap(const struct lock_struct *lck1,
211 : const struct lock_struct *lck2)
212 : {
213 541268 : return byte_range_overlap(lck1->start,
214 270652 : lck1->size,
215 270652 : lck2->start,
216 270652 : lck2->size);
217 : }
218 :
219 : /****************************************************************************
220 : See if lock2 can be added when lock1 is in place.
221 : ****************************************************************************/
222 :
223 270407 : static bool brl_conflict(const struct lock_struct *lck1,
224 : const struct lock_struct *lck2)
225 : {
226 : /* Read locks never conflict. */
227 270407 : if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
228 1138 : return False;
229 : }
230 :
231 : /* A READ lock can stack on top of a WRITE lock if they have the same
232 : * context & fnum. */
233 270009 : if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
234 740 : brl_same_context(&lck1->context, &lck2->context) &&
235 322 : lck1->fnum == lck2->fnum) {
236 298 : return False;
237 : }
238 :
239 268971 : return brl_overlap(lck1, lck2);
240 : }
241 :
242 : /****************************************************************************
243 : See if lock2 can be added when lock1 is in place - when both locks are POSIX
244 : flavour. POSIX locks ignore fnum - they only care about dev/ino which we
245 : know already match.
246 : ****************************************************************************/
247 :
248 480 : static bool brl_conflict_posix(const struct lock_struct *lck1,
249 : const struct lock_struct *lck2)
250 : {
251 : #if defined(DEVELOPER)
252 480 : SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
253 480 : SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
254 : #endif
255 :
256 : /* Read locks never conflict. */
257 480 : if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
258 464 : return False;
259 : }
260 :
261 : /* Locks on the same context don't conflict. Ignore fnum. */
262 16 : if (brl_same_context(&lck1->context, &lck2->context)) {
263 4 : return False;
264 : }
265 :
266 : /* One is read, the other write, or the context is different,
267 : do they overlap ? */
268 12 : return brl_overlap(lck1, lck2);
269 : }
270 :
271 : #if ZERO_ZERO
272 : static bool brl_conflict1(const struct lock_struct *lck1,
273 : const struct lock_struct *lck2)
274 : {
275 : if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
276 : return False;
277 : }
278 :
279 : if (brl_same_context(&lck1->context, &lck2->context) &&
280 : lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
281 : return False;
282 : }
283 :
284 : if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
285 : return True;
286 : }
287 :
288 : if (lck1->start >= (lck2->start + lck2->size) ||
289 : lck2->start >= (lck1->start + lck1->size)) {
290 : return False;
291 : }
292 :
293 : return True;
294 : }
295 : #endif
296 :
297 : /****************************************************************************
298 : Check to see if this lock conflicts, but ignore our own locks on the
299 : same fnum only. This is the read/write lock check code path.
300 : This is never used in the POSIX lock case.
301 : ****************************************************************************/
302 :
303 2175 : static bool brl_conflict_other(const struct lock_struct *lock,
304 : const struct lock_struct *rw_probe)
305 : {
306 2175 : if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) {
307 506 : return False;
308 : }
309 :
310 1669 : if (lock->lock_flav == POSIX_LOCK &&
311 8 : rw_probe->lock_flav == POSIX_LOCK) {
312 : /*
313 : * POSIX flavour locks never conflict here - this is only called
314 : * in the read/write path.
315 : */
316 0 : return False;
317 : }
318 :
319 1669 : if (!brl_overlap(lock, rw_probe)) {
320 : /*
321 : * I/O can only conflict when overlapping a lock, thus let it
322 : * pass
323 : */
324 1342 : return false;
325 : }
326 :
327 318 : if (!brl_same_context(&lock->context, &rw_probe->context)) {
328 : /*
329 : * Different process, conflict
330 : */
331 224 : return true;
332 : }
333 :
334 88 : if (lock->fnum != rw_probe->fnum) {
335 : /*
336 : * Different file handle, conflict
337 : */
338 0 : return true;
339 : }
340 :
341 88 : if ((lock->lock_type == READ_LOCK) &&
342 28 : (rw_probe->lock_type == WRITE_LOCK)) {
343 : /*
344 : * Incoming WRITE locks conflict with existing READ locks even
345 : * if the context is the same. JRA. See LOCKTEST7 in
346 : * smbtorture.
347 : */
348 28 : return true;
349 : }
350 :
351 : /*
352 : * I/O request compatible with existing lock, let it pass without
353 : * conflict
354 : */
355 :
356 54 : return false;
357 : }
358 :
359 : /****************************************************************************
360 : Open up the brlock.tdb database.
361 : ****************************************************************************/
362 :
363 200 : void brl_init(bool read_only)
364 : {
365 0 : int tdb_flags;
366 0 : char *db_path;
367 :
368 200 : if (brlock_db) {
369 0 : return;
370 : }
371 :
372 200 : tdb_flags = SMBD_VOLATILE_TDB_FLAGS | TDB_SEQNUM;
373 :
374 200 : db_path = lock_path(talloc_tos(), "brlock.tdb");
375 200 : if (db_path == NULL) {
376 0 : DEBUG(0, ("out of memory!\n"));
377 0 : return;
378 : }
379 :
380 200 : brlock_db = db_open(NULL, db_path,
381 : SMBD_VOLATILE_TDB_HASH_SIZE, tdb_flags,
382 : read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
383 : DBWRAP_LOCK_ORDER_2, DBWRAP_FLAG_NONE);
384 200 : if (!brlock_db) {
385 0 : DEBUG(0,("Failed to open byte range locking database %s\n",
386 : db_path));
387 0 : TALLOC_FREE(db_path);
388 0 : return;
389 : }
390 200 : TALLOC_FREE(db_path);
391 : }
392 :
393 : /****************************************************************************
394 : Close down the brlock.tdb database.
395 : ****************************************************************************/
396 :
397 31562 : void brl_shutdown(void)
398 : {
399 31562 : TALLOC_FREE(brlock_db);
400 31562 : }
401 :
402 : #if ZERO_ZERO
403 : /****************************************************************************
404 : Compare two locks for sorting.
405 : ****************************************************************************/
406 :
407 : static int lock_compare(const struct lock_struct *lck1,
408 : const struct lock_struct *lck2)
409 : {
410 : if (lck1->start != lck2->start) {
411 : return (lck1->start - lck2->start);
412 : }
413 : if (lck2->size != lck1->size) {
414 : return ((int)lck1->size - (int)lck2->size);
415 : }
416 : return 0;
417 : }
418 : #endif
419 :
420 : /****************************************************************************
421 : Lock a range of bytes - Windows lock semantics.
422 : ****************************************************************************/
423 :
424 5807 : NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
425 : struct lock_struct *plock)
426 : {
427 26 : unsigned int i;
428 5807 : files_struct *fsp = br_lck->fsp;
429 5807 : struct lock_struct *locks = br_lck->lock_data;
430 26 : NTSTATUS status;
431 26 : bool valid;
432 :
433 5807 : SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
434 :
435 5807 : valid = byte_range_valid(plock->start, plock->size);
436 5807 : if (!valid) {
437 14 : return NT_STATUS_INVALID_LOCK_RANGE;
438 : }
439 :
440 273206 : for (i=0; i < br_lck->num_locks; i++) {
441 : /* Do any Windows or POSIX locks conflict ? */
442 270403 : if (brl_conflict(&locks[i], plock)) {
443 2990 : if (!serverid_exists(&locks[i].context.pid)) {
444 0 : locks[i].context.pid.pid = 0;
445 0 : br_lck->modified = true;
446 0 : continue;
447 : }
448 : /* Remember who blocked us. */
449 2990 : plock->context.smblctx = locks[i].context.smblctx;
450 2990 : return NT_STATUS_LOCK_NOT_GRANTED;
451 : }
452 : #if ZERO_ZERO
453 : if (plock->start == 0 && plock->size == 0 &&
454 : locks[i].size == 0) {
455 : break;
456 : }
457 : #endif
458 : }
459 :
460 2803 : contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
461 :
462 : /* We can get the Windows lock, now see if it needs to
463 : be mapped into a lower level POSIX one, and if so can
464 : we get it ? */
465 :
466 2803 : if (lp_posix_locking(fsp->conn->params)) {
467 18 : int errno_ret;
468 2455 : if (!set_posix_lock_windows_flavour(fsp,
469 : plock->start,
470 : plock->size,
471 : plock->lock_type,
472 2455 : &plock->context,
473 : locks,
474 2455 : br_lck->num_locks,
475 : &errno_ret)) {
476 :
477 : /* We don't know who blocked us. */
478 110 : plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
479 :
480 110 : if (errno_ret == EACCES || errno_ret == EAGAIN) {
481 110 : status = NT_STATUS_LOCK_NOT_GRANTED;
482 110 : goto fail;
483 : } else {
484 0 : status = map_nt_error_from_unix(errno);
485 0 : goto fail;
486 : }
487 : }
488 : }
489 :
490 : /* no conflicts - add it to the list of locks */
491 2693 : locks = talloc_realloc(br_lck, locks, struct lock_struct,
492 : (br_lck->num_locks + 1));
493 2693 : if (!locks) {
494 0 : status = NT_STATUS_NO_MEMORY;
495 0 : goto fail;
496 : }
497 :
498 2693 : memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
499 2693 : br_lck->num_locks += 1;
500 2693 : br_lck->lock_data = locks;
501 2693 : br_lck->modified = True;
502 :
503 2693 : return NT_STATUS_OK;
504 110 : fail:
505 110 : contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
506 110 : return status;
507 : }
508 :
509 : /****************************************************************************
510 : Cope with POSIX range splits and merges.
511 : ****************************************************************************/
512 :
513 958 : static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
514 : struct lock_struct *ex, /* existing lock. */
515 : struct lock_struct *plock) /* proposed lock. */
516 : {
517 958 : bool lock_types_differ = (ex->lock_type != plock->lock_type);
518 :
519 : /* We can't merge non-conflicting locks on different context - ignore fnum. */
520 :
521 958 : if (!brl_same_context(&ex->context, &plock->context)) {
522 : /* Just copy. */
523 22 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
524 22 : return 1;
525 : }
526 :
527 : /* We now know we have the same context. */
528 :
529 : /* Did we overlap ? */
530 :
531 : /*********************************************
532 : +---------+
533 : | ex |
534 : +---------+
535 : +-------+
536 : | plock |
537 : +-------+
538 : OR....
539 : +---------+
540 : | ex |
541 : +---------+
542 : **********************************************/
543 :
544 936 : if ( (ex->start > (plock->start + plock->size)) ||
545 924 : (plock->start > (ex->start + ex->size))) {
546 :
547 : /* No overlap with this lock - copy existing. */
548 :
549 20 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
550 20 : return 1;
551 : }
552 :
553 : /*********************************************
554 : +---------------------------+
555 : | ex |
556 : +---------------------------+
557 : +---------------------------+
558 : | plock | -> replace with plock.
559 : +---------------------------+
560 : OR
561 : +---------------+
562 : | ex |
563 : +---------------+
564 : +---------------------------+
565 : | plock | -> replace with plock.
566 : +---------------------------+
567 :
568 : **********************************************/
569 :
570 916 : if ( (ex->start >= plock->start) &&
571 912 : (ex->start + ex->size <= plock->start + plock->size) ) {
572 :
573 : /* Replace - discard existing lock. */
574 :
575 472 : return 0;
576 : }
577 :
578 : /*********************************************
579 : Adjacent after.
580 : +-------+
581 : | ex |
582 : +-------+
583 : +---------------+
584 : | plock |
585 : +---------------+
586 :
587 : BECOMES....
588 : +---------------+-------+
589 : | plock | ex | - different lock types.
590 : +---------------+-------+
591 : OR.... (merge)
592 : +-----------------------+
593 : | plock | - same lock type.
594 : +-----------------------+
595 : **********************************************/
596 :
597 444 : if (plock->start + plock->size == ex->start) {
598 :
599 : /* If the lock types are the same, we merge, if different, we
600 : add the remainder of the old lock. */
601 :
602 440 : if (lock_types_differ) {
603 : /* Add existing. */
604 0 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
605 0 : return 1;
606 : } else {
607 : /* Merge - adjust incoming lock as we may have more
608 : * merging to come. */
609 440 : plock->size += ex->size;
610 440 : return 0;
611 : }
612 : }
613 :
614 : /*********************************************
615 : Adjacent before.
616 : +-------+
617 : | ex |
618 : +-------+
619 : +---------------+
620 : | plock |
621 : +---------------+
622 : BECOMES....
623 : +-------+---------------+
624 : | ex | plock | - different lock types
625 : +-------+---------------+
626 :
627 : OR.... (merge)
628 : +-----------------------+
629 : | plock | - same lock type.
630 : +-----------------------+
631 :
632 : **********************************************/
633 :
634 4 : if (ex->start + ex->size == plock->start) {
635 :
636 : /* If the lock types are the same, we merge, if different, we
637 : add the existing lock. */
638 :
639 0 : if (lock_types_differ) {
640 0 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
641 0 : return 1;
642 : } else {
643 : /* Merge - adjust incoming lock as we may have more
644 : * merging to come. */
645 0 : plock->start = ex->start;
646 0 : plock->size += ex->size;
647 0 : return 0;
648 : }
649 : }
650 :
651 : /*********************************************
652 : Overlap after.
653 : +-----------------------+
654 : | ex |
655 : +-----------------------+
656 : +---------------+
657 : | plock |
658 : +---------------+
659 : OR
660 : +----------------+
661 : | ex |
662 : +----------------+
663 : +---------------+
664 : | plock |
665 : +---------------+
666 :
667 : BECOMES....
668 : +---------------+-------+
669 : | plock | ex | - different lock types.
670 : +---------------+-------+
671 : OR.... (merge)
672 : +-----------------------+
673 : | plock | - same lock type.
674 : +-----------------------+
675 : **********************************************/
676 :
677 4 : if ( (ex->start >= plock->start) &&
678 0 : (ex->start <= plock->start + plock->size) &&
679 0 : (ex->start + ex->size > plock->start + plock->size) ) {
680 :
681 : /* If the lock types are the same, we merge, if different, we
682 : add the remainder of the old lock. */
683 :
684 0 : if (lock_types_differ) {
685 : /* Add remaining existing. */
686 0 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
687 : /* Adjust existing start and size. */
688 0 : lck_arr[0].start = plock->start + plock->size;
689 0 : lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
690 0 : return 1;
691 : } else {
692 : /* Merge - adjust incoming lock as we may have more
693 : * merging to come. */
694 0 : plock->size += (ex->start + ex->size) - (plock->start + plock->size);
695 0 : return 0;
696 : }
697 : }
698 :
699 : /*********************************************
700 : Overlap before.
701 : +-----------------------+
702 : | ex |
703 : +-----------------------+
704 : +---------------+
705 : | plock |
706 : +---------------+
707 : OR
708 : +-------------+
709 : | ex |
710 : +-------------+
711 : +---------------+
712 : | plock |
713 : +---------------+
714 :
715 : BECOMES....
716 : +-------+---------------+
717 : | ex | plock | - different lock types
718 : +-------+---------------+
719 :
720 : OR.... (merge)
721 : +-----------------------+
722 : | plock | - same lock type.
723 : +-----------------------+
724 :
725 : **********************************************/
726 :
727 4 : if ( (ex->start < plock->start) &&
728 4 : (ex->start + ex->size >= plock->start) &&
729 4 : (ex->start + ex->size <= plock->start + plock->size) ) {
730 :
731 : /* If the lock types are the same, we merge, if different, we
732 : add the truncated old lock. */
733 :
734 0 : if (lock_types_differ) {
735 0 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
736 : /* Adjust existing size. */
737 0 : lck_arr[0].size = plock->start - ex->start;
738 0 : return 1;
739 : } else {
740 : /* Merge - adjust incoming lock as we may have more
741 : * merging to come. MUST ADJUST plock SIZE FIRST ! */
742 0 : plock->size += (plock->start - ex->start);
743 0 : plock->start = ex->start;
744 0 : return 0;
745 : }
746 : }
747 :
748 : /*********************************************
749 : Complete overlap.
750 : +---------------------------+
751 : | ex |
752 : +---------------------------+
753 : +---------+
754 : | plock |
755 : +---------+
756 : BECOMES.....
757 : +-------+---------+---------+
758 : | ex | plock | ex | - different lock types.
759 : +-------+---------+---------+
760 : OR
761 : +---------------------------+
762 : | plock | - same lock type.
763 : +---------------------------+
764 : **********************************************/
765 :
766 4 : if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
767 :
768 4 : if (lock_types_differ) {
769 :
770 : /* We have to split ex into two locks here. */
771 :
772 4 : memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
773 4 : memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
774 :
775 : /* Adjust first existing size. */
776 4 : lck_arr[0].size = plock->start - ex->start;
777 :
778 : /* Adjust second existing start and size. */
779 4 : lck_arr[1].start = plock->start + plock->size;
780 4 : lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
781 4 : return 2;
782 : } else {
783 : /* Just eat the existing locks, merge them into plock. */
784 0 : plock->start = ex->start;
785 0 : plock->size = ex->size;
786 0 : return 0;
787 : }
788 : }
789 :
790 : /* Never get here. */
791 0 : smb_panic("brlock_posix_split_merge");
792 : /* Notreached. */
793 :
794 : /* Keep some compilers happy. */
795 : return 0;
796 : }
797 :
798 : /****************************************************************************
799 : Lock a range of bytes - POSIX lock semantics.
800 : We must cope with range splits and merges.
801 : ****************************************************************************/
802 :
803 916 : static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
804 : struct lock_struct *plock)
805 : {
806 0 : unsigned int i, count, posix_count;
807 916 : struct lock_struct *locks = br_lck->lock_data;
808 0 : struct lock_struct *tp;
809 916 : bool break_oplocks = false;
810 0 : NTSTATUS status;
811 :
812 : /* No zero-zero locks for POSIX. */
813 916 : if (plock->start == 0 && plock->size == 0) {
814 0 : return NT_STATUS_INVALID_PARAMETER;
815 : }
816 :
817 : /* Don't allow 64-bit lock wrap. */
818 916 : if (plock->start + plock->size - 1 < plock->start) {
819 0 : return NT_STATUS_INVALID_PARAMETER;
820 : }
821 :
822 : /* The worst case scenario here is we have to split an
823 : existing POSIX lock range into two, and add our lock,
824 : so we need at most 2 more entries. */
825 :
826 916 : tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2);
827 916 : if (!tp) {
828 0 : return NT_STATUS_NO_MEMORY;
829 : }
830 :
831 916 : count = posix_count = 0;
832 :
833 1388 : for (i=0; i < br_lck->num_locks; i++) {
834 480 : struct lock_struct *curr_lock = &locks[i];
835 :
836 480 : if (curr_lock->lock_flav == WINDOWS_LOCK) {
837 : /* Do any Windows flavour locks conflict ? */
838 0 : if (brl_conflict(curr_lock, plock)) {
839 0 : if (!serverid_exists(&curr_lock->context.pid)) {
840 0 : curr_lock->context.pid.pid = 0;
841 0 : br_lck->modified = true;
842 0 : continue;
843 : }
844 : /* No games with error messages. */
845 0 : TALLOC_FREE(tp);
846 : /* Remember who blocked us. */
847 0 : plock->context.smblctx = curr_lock->context.smblctx;
848 0 : return NT_STATUS_LOCK_NOT_GRANTED;
849 : }
850 : /* Just copy the Windows lock into the new array. */
851 0 : memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
852 0 : count++;
853 : } else {
854 480 : unsigned int tmp_count = 0;
855 :
856 : /* POSIX conflict semantics are different. */
857 480 : if (brl_conflict_posix(curr_lock, plock)) {
858 8 : if (!serverid_exists(&curr_lock->context.pid)) {
859 0 : curr_lock->context.pid.pid = 0;
860 0 : br_lck->modified = true;
861 0 : continue;
862 : }
863 : /* Can't block ourselves with POSIX locks. */
864 : /* No games with error messages. */
865 8 : TALLOC_FREE(tp);
866 : /* Remember who blocked us. */
867 8 : plock->context.smblctx = curr_lock->context.smblctx;
868 8 : return NT_STATUS_LOCK_NOT_GRANTED;
869 : }
870 :
871 : /* Work out overlaps. */
872 472 : tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
873 472 : posix_count += tmp_count;
874 472 : count += tmp_count;
875 : }
876 : }
877 :
878 : /*
879 : * Break oplocks while we hold a brl. Since lock() and unlock() calls
880 : * are not symmetric with POSIX semantics, we cannot guarantee our
881 : * contend_level2_oplocks_begin/end calls will be acquired and
882 : * released one-for-one as with Windows semantics. Therefore we only
883 : * call contend_level2_oplocks_begin if this is the first POSIX brl on
884 : * the file.
885 : */
886 908 : break_oplocks = (posix_count == 0);
887 908 : if (break_oplocks) {
888 876 : contend_level2_oplocks_begin(br_lck->fsp,
889 : LEVEL2_CONTEND_POSIX_BRL);
890 : }
891 :
892 : /* Try and add the lock in order, sorted by lock start. */
893 940 : for (i=0; i < count; i++) {
894 32 : struct lock_struct *curr_lock = &tp[i];
895 :
896 32 : if (curr_lock->start <= plock->start) {
897 26 : continue;
898 : }
899 : }
900 :
901 908 : if (i < count) {
902 0 : memmove(&tp[i+1], &tp[i],
903 0 : (count - i)*sizeof(struct lock_struct));
904 : }
905 908 : memcpy(&tp[i], plock, sizeof(struct lock_struct));
906 908 : count++;
907 :
908 : /* We can get the POSIX lock, now see if it needs to
909 : be mapped into a lower level POSIX one, and if so can
910 : we get it ? */
911 :
912 908 : if (lp_posix_locking(br_lck->fsp->conn->params)) {
913 0 : int errno_ret;
914 :
915 : /* The lower layer just needs to attempt to
916 : get the system POSIX lock. We've weeded out
917 : any conflicts above. */
918 :
919 908 : if (!set_posix_lock_posix_flavour(br_lck->fsp,
920 : plock->start,
921 : plock->size,
922 : plock->lock_type,
923 908 : &plock->context,
924 : &errno_ret)) {
925 :
926 : /* We don't know who blocked us. */
927 0 : plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
928 :
929 0 : if (errno_ret == EACCES || errno_ret == EAGAIN) {
930 0 : TALLOC_FREE(tp);
931 0 : status = NT_STATUS_LOCK_NOT_GRANTED;
932 0 : goto fail;
933 : } else {
934 0 : TALLOC_FREE(tp);
935 0 : status = map_nt_error_from_unix(errno);
936 0 : goto fail;
937 : }
938 : }
939 : }
940 :
941 : /* If we didn't use all the allocated size,
942 : * Realloc so we don't leak entries per lock call. */
943 908 : if (count < br_lck->num_locks + 2) {
944 908 : tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
945 908 : if (!tp) {
946 0 : status = NT_STATUS_NO_MEMORY;
947 0 : goto fail;
948 : }
949 : }
950 :
951 908 : br_lck->num_locks = count;
952 908 : TALLOC_FREE(br_lck->lock_data);
953 908 : br_lck->lock_data = tp;
954 908 : locks = tp;
955 908 : br_lck->modified = True;
956 :
957 : /* A successful downgrade from write to read lock can trigger a lock
958 : re-evalutation where waiting readers can now proceed. */
959 :
960 908 : return NT_STATUS_OK;
961 0 : fail:
962 0 : if (break_oplocks) {
963 0 : contend_level2_oplocks_end(br_lck->fsp,
964 : LEVEL2_CONTEND_POSIX_BRL);
965 : }
966 0 : return status;
967 : }
968 :
969 : /****************************************************************************
970 : Lock a range of bytes.
971 : ****************************************************************************/
972 :
973 6753 : NTSTATUS brl_lock(
974 : struct byte_range_lock *br_lck,
975 : uint64_t smblctx,
976 : struct server_id pid,
977 : br_off start,
978 : br_off size,
979 : enum brl_type lock_type,
980 : enum brl_flavour lock_flav,
981 : struct server_id *blocker_pid,
982 : uint64_t *psmblctx)
983 : {
984 26 : NTSTATUS ret;
985 26 : struct lock_struct lock;
986 :
987 6753 : ZERO_STRUCT(lock);
988 :
989 : #if !ZERO_ZERO
990 6753 : if (start == 0 && size == 0) {
991 32 : DEBUG(0,("client sent 0/0 lock - please report this\n"));
992 : }
993 : #endif
994 :
995 6753 : lock = (struct lock_struct) {
996 : .context.smblctx = smblctx,
997 : .context.pid = pid,
998 6753 : .context.tid = br_lck->fsp->conn->cnum,
999 : .start = start,
1000 : .size = size,
1001 6753 : .fnum = br_lck->fsp->fnum,
1002 : .lock_type = lock_type,
1003 : .lock_flav = lock_flav
1004 : };
1005 :
1006 6753 : if (lock_flav == WINDOWS_LOCK) {
1007 5837 : ret = SMB_VFS_BRL_LOCK_WINDOWS(
1008 : br_lck->fsp->conn, br_lck, &lock);
1009 : } else {
1010 916 : ret = brl_lock_posix(br_lck, &lock);
1011 : }
1012 :
1013 : #if ZERO_ZERO
1014 : /* sort the lock list */
1015 : TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
1016 : #endif
1017 : /* If we're returning an error, return who blocked us. */
1018 6753 : if (!NT_STATUS_IS_OK(ret) && psmblctx) {
1019 3152 : *blocker_pid = lock.context.pid;
1020 3152 : *psmblctx = lock.context.smblctx;
1021 : }
1022 6753 : return ret;
1023 : }
1024 :
1025 : /****************************************************************************
1026 : Unlock a range of bytes - Windows semantics.
1027 : ****************************************************************************/
1028 :
1029 2889 : bool brl_unlock_windows_default(struct byte_range_lock *br_lck,
1030 : const struct lock_struct *plock)
1031 : {
1032 22 : unsigned int i;
1033 2889 : struct lock_struct *locks = br_lck->lock_data;
1034 2889 : enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
1035 :
1036 2889 : SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
1037 :
1038 : #if ZERO_ZERO
1039 : /* Delete write locks by preference... The lock list
1040 : is sorted in the zero zero case. */
1041 :
1042 : for (i = 0; i < br_lck->num_locks; i++) {
1043 : struct lock_struct *lock = &locks[i];
1044 :
1045 : if (lock->lock_type == WRITE_LOCK &&
1046 : brl_same_context(&lock->context, &plock->context) &&
1047 : lock->fnum == plock->fnum &&
1048 : lock->lock_flav == WINDOWS_LOCK &&
1049 : lock->start == plock->start &&
1050 : lock->size == plock->size) {
1051 :
1052 : /* found it - delete it */
1053 : deleted_lock_type = lock->lock_type;
1054 : break;
1055 : }
1056 : }
1057 :
1058 : if (i != br_lck->num_locks) {
1059 : /* We found it - don't search again. */
1060 : goto unlock_continue;
1061 : }
1062 : #endif
1063 :
1064 4512 : for (i = 0; i < br_lck->num_locks; i++) {
1065 4308 : struct lock_struct *lock = &locks[i];
1066 :
1067 : /* Only remove our own locks that match in start, size, and flavour. */
1068 4308 : if (brl_same_context(&lock->context, &plock->context) &&
1069 3416 : lock->fnum == plock->fnum &&
1070 3360 : lock->lock_flav == WINDOWS_LOCK &&
1071 3360 : lock->start == plock->start &&
1072 2711 : lock->size == plock->size ) {
1073 2685 : deleted_lock_type = lock->lock_type;
1074 2685 : break;
1075 : }
1076 : }
1077 :
1078 2889 : if (i == br_lck->num_locks) {
1079 : /* we didn't find it */
1080 200 : return False;
1081 : }
1082 :
1083 : #if ZERO_ZERO
1084 : unlock_continue:
1085 : #endif
1086 :
1087 2685 : ARRAY_DEL_ELEMENT(locks, i, br_lck->num_locks);
1088 2685 : br_lck->num_locks -= 1;
1089 2685 : br_lck->modified = True;
1090 :
1091 : /* Unlock the underlying POSIX regions. */
1092 2685 : if(lp_posix_locking(br_lck->fsp->conn->params)) {
1093 2337 : release_posix_lock_windows_flavour(br_lck->fsp,
1094 2337 : plock->start,
1095 2337 : plock->size,
1096 : deleted_lock_type,
1097 : &plock->context,
1098 : locks,
1099 2337 : br_lck->num_locks);
1100 : }
1101 :
1102 2685 : contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1103 2685 : return True;
1104 : }
1105 :
1106 : /****************************************************************************
1107 : Unlock a range of bytes - POSIX semantics.
1108 : ****************************************************************************/
1109 :
1110 476 : static bool brl_unlock_posix(struct byte_range_lock *br_lck,
1111 : struct lock_struct *plock)
1112 : {
1113 0 : unsigned int i, count;
1114 0 : struct lock_struct *tp;
1115 476 : struct lock_struct *locks = br_lck->lock_data;
1116 476 : bool overlap_found = False;
1117 :
1118 : /* No zero-zero locks for POSIX. */
1119 476 : if (plock->start == 0 && plock->size == 0) {
1120 0 : return False;
1121 : }
1122 :
1123 : /* Don't allow 64-bit lock wrap. */
1124 476 : if (plock->start + plock->size < plock->start ||
1125 476 : plock->start + plock->size < plock->size) {
1126 0 : DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1127 0 : return False;
1128 : }
1129 :
1130 : /* The worst case scenario here is we have to split an
1131 : existing POSIX lock range into two, so we need at most
1132 : 1 more entry. */
1133 :
1134 476 : tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1);
1135 476 : if (!tp) {
1136 0 : DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1137 0 : return False;
1138 : }
1139 :
1140 476 : count = 0;
1141 976 : for (i = 0; i < br_lck->num_locks; i++) {
1142 504 : struct lock_struct *lock = &locks[i];
1143 0 : unsigned int tmp_count;
1144 :
1145 : /* Only remove our own locks - ignore fnum. */
1146 504 : if (!brl_same_context(&lock->context, &plock->context)) {
1147 14 : memcpy(&tp[count], lock, sizeof(struct lock_struct));
1148 14 : count++;
1149 14 : continue;
1150 : }
1151 :
1152 490 : if (lock->lock_flav == WINDOWS_LOCK) {
1153 : /* Do any Windows flavour locks conflict ? */
1154 4 : if (brl_conflict(lock, plock)) {
1155 0 : TALLOC_FREE(tp);
1156 0 : return false;
1157 : }
1158 : /* Just copy the Windows lock into the new array. */
1159 4 : memcpy(&tp[count], lock, sizeof(struct lock_struct));
1160 4 : count++;
1161 4 : continue;
1162 : }
1163 :
1164 : /* Work out overlaps. */
1165 486 : tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1166 :
1167 486 : if (tmp_count == 0) {
1168 : /* plock overlapped the existing lock completely,
1169 : or replaced it. Don't copy the existing lock. */
1170 472 : overlap_found = true;
1171 14 : } else if (tmp_count == 1) {
1172 : /* Either no overlap, (simple copy of existing lock) or
1173 : * an overlap of an existing lock. */
1174 : /* If the lock changed size, we had an overlap. */
1175 10 : if (tp[count].size != lock->size) {
1176 0 : overlap_found = true;
1177 : }
1178 10 : count += tmp_count;
1179 4 : } else if (tmp_count == 2) {
1180 : /* We split a lock range in two. */
1181 4 : overlap_found = true;
1182 4 : count += tmp_count;
1183 :
1184 : /* Optimisation... */
1185 : /* We know we're finished here as we can't overlap any
1186 : more POSIX locks. Copy the rest of the lock array. */
1187 :
1188 4 : if (i < br_lck->num_locks - 1) {
1189 0 : memcpy(&tp[count], &locks[i+1],
1190 0 : sizeof(*locks)*((br_lck->num_locks-1) - i));
1191 0 : count += ((br_lck->num_locks-1) - i);
1192 : }
1193 4 : break;
1194 : }
1195 :
1196 : }
1197 :
1198 476 : if (!overlap_found) {
1199 : /* Just ignore - no change. */
1200 0 : TALLOC_FREE(tp);
1201 0 : DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1202 0 : return True;
1203 : }
1204 :
1205 : /* Unlock any POSIX regions. */
1206 476 : if(lp_posix_locking(br_lck->fsp->conn->params)) {
1207 476 : release_posix_lock_posix_flavour(br_lck->fsp,
1208 : plock->start,
1209 : plock->size,
1210 476 : &plock->context,
1211 : tp,
1212 : count);
1213 : }
1214 :
1215 : /* Realloc so we don't leak entries per unlock call. */
1216 476 : if (count) {
1217 30 : tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
1218 30 : if (!tp) {
1219 0 : DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1220 0 : return False;
1221 : }
1222 : } else {
1223 : /* We deleted the last lock. */
1224 446 : TALLOC_FREE(tp);
1225 446 : tp = NULL;
1226 : }
1227 :
1228 476 : contend_level2_oplocks_end(br_lck->fsp,
1229 : LEVEL2_CONTEND_POSIX_BRL);
1230 :
1231 476 : br_lck->num_locks = count;
1232 476 : TALLOC_FREE(br_lck->lock_data);
1233 476 : locks = tp;
1234 476 : br_lck->lock_data = tp;
1235 476 : br_lck->modified = True;
1236 :
1237 476 : return True;
1238 : }
1239 :
1240 : /****************************************************************************
1241 : Unlock a range of bytes.
1242 : ****************************************************************************/
1243 :
1244 3365 : bool brl_unlock(struct byte_range_lock *br_lck,
1245 : uint64_t smblctx,
1246 : struct server_id pid,
1247 : br_off start,
1248 : br_off size,
1249 : enum brl_flavour lock_flav)
1250 : {
1251 22 : struct lock_struct lock;
1252 :
1253 3365 : lock.context.smblctx = smblctx;
1254 3365 : lock.context.pid = pid;
1255 3365 : lock.context.tid = br_lck->fsp->conn->cnum;
1256 3365 : lock.start = start;
1257 3365 : lock.size = size;
1258 3365 : lock.fnum = br_lck->fsp->fnum;
1259 3365 : lock.lock_type = UNLOCK_LOCK;
1260 3365 : lock.lock_flav = lock_flav;
1261 :
1262 3365 : if (lock_flav == WINDOWS_LOCK) {
1263 2889 : return SMB_VFS_BRL_UNLOCK_WINDOWS(
1264 : br_lck->fsp->conn, br_lck, &lock);
1265 : } else {
1266 476 : return brl_unlock_posix(br_lck, &lock);
1267 : }
1268 : }
1269 :
1270 : /****************************************************************************
1271 : Test if we could add a lock if we wanted to.
1272 : Returns True if the region required is currently unlocked, False if locked.
1273 : ****************************************************************************/
1274 :
1275 203904 : bool brl_locktest(struct byte_range_lock *br_lck,
1276 : const struct lock_struct *rw_probe)
1277 : {
1278 203904 : bool ret = True;
1279 119 : unsigned int i;
1280 203904 : struct lock_struct *locks = br_lck->lock_data;
1281 203904 : files_struct *fsp = br_lck->fsp;
1282 :
1283 : /* Make sure existing locks don't conflict */
1284 205825 : for (i=0; i < br_lck->num_locks; i++) {
1285 : /*
1286 : * Our own locks don't conflict.
1287 : */
1288 2175 : if (brl_conflict_other(&locks[i], rw_probe)) {
1289 258 : if (br_lck->record == NULL) {
1290 : /* readonly */
1291 126 : return false;
1292 : }
1293 :
1294 129 : if (!serverid_exists(&locks[i].context.pid)) {
1295 4 : locks[i].context.pid.pid = 0;
1296 4 : br_lck->modified = true;
1297 4 : continue;
1298 : }
1299 :
1300 122 : return False;
1301 : }
1302 : }
1303 :
1304 : /*
1305 : * There is no lock held by an SMB daemon, check to
1306 : * see if there is a POSIX lock from a UNIX or NFS process.
1307 : * This only conflicts with Windows locks, not POSIX locks.
1308 : */
1309 :
1310 203650 : if(lp_posix_locking(fsp->conn->params) &&
1311 203432 : (rw_probe->lock_flav == WINDOWS_LOCK)) {
1312 : /*
1313 : * Make copies -- is_posix_locked might modify the values
1314 : */
1315 :
1316 202912 : br_off start = rw_probe->start;
1317 202912 : br_off size = rw_probe->size;
1318 202912 : enum brl_type lock_type = rw_probe->lock_type;
1319 :
1320 202912 : ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1321 :
1322 202912 : DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1323 : "file %s\n", (uintmax_t)start, (uintmax_t)size,
1324 : ret ? "locked" : "unlocked",
1325 : fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1326 :
1327 : /* We need to return the inverse of is_posix_locked. */
1328 202912 : ret = !ret;
1329 : }
1330 :
1331 : /* no conflicts - we could have added it */
1332 203537 : return ret;
1333 : }
1334 :
1335 : /****************************************************************************
1336 : Query for existing locks.
1337 : ****************************************************************************/
1338 :
1339 0 : NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1340 : uint64_t *psmblctx,
1341 : struct server_id pid,
1342 : br_off *pstart,
1343 : br_off *psize,
1344 : enum brl_type *plock_type,
1345 : enum brl_flavour lock_flav)
1346 : {
1347 0 : unsigned int i;
1348 0 : struct lock_struct lock;
1349 0 : const struct lock_struct *locks = br_lck->lock_data;
1350 0 : files_struct *fsp = br_lck->fsp;
1351 :
1352 0 : lock.context.smblctx = *psmblctx;
1353 0 : lock.context.pid = pid;
1354 0 : lock.context.tid = br_lck->fsp->conn->cnum;
1355 0 : lock.start = *pstart;
1356 0 : lock.size = *psize;
1357 0 : lock.fnum = fsp->fnum;
1358 0 : lock.lock_type = *plock_type;
1359 0 : lock.lock_flav = lock_flav;
1360 :
1361 : /* Make sure existing locks don't conflict */
1362 0 : for (i=0; i < br_lck->num_locks; i++) {
1363 0 : const struct lock_struct *exlock = &locks[i];
1364 0 : bool conflict = False;
1365 :
1366 0 : if (exlock->lock_flav == WINDOWS_LOCK) {
1367 0 : conflict = brl_conflict(exlock, &lock);
1368 : } else {
1369 0 : conflict = brl_conflict_posix(exlock, &lock);
1370 : }
1371 :
1372 0 : if (conflict) {
1373 0 : *psmblctx = exlock->context.smblctx;
1374 0 : *pstart = exlock->start;
1375 0 : *psize = exlock->size;
1376 0 : *plock_type = exlock->lock_type;
1377 0 : return NT_STATUS_LOCK_NOT_GRANTED;
1378 : }
1379 : }
1380 :
1381 : /*
1382 : * There is no lock held by an SMB daemon, check to
1383 : * see if there is a POSIX lock from a UNIX or NFS process.
1384 : */
1385 :
1386 0 : if(lp_posix_locking(fsp->conn->params)) {
1387 0 : bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1388 :
1389 0 : DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1390 : "file %s\n", (uintmax_t)*pstart,
1391 : (uintmax_t)*psize, ret ? "locked" : "unlocked",
1392 : fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1393 :
1394 0 : if (ret) {
1395 : /* Hmmm. No clue what to set smblctx to - use -1. */
1396 0 : *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1397 0 : return NT_STATUS_LOCK_NOT_GRANTED;
1398 : }
1399 : }
1400 :
1401 0 : return NT_STATUS_OK;
1402 : }
1403 :
1404 :
1405 : /****************************************************************************
1406 : Remove any locks associated with a open file.
1407 : We return True if this process owns any other Windows locks on this
1408 : fd and so we should not immediately close the fd.
1409 : ****************************************************************************/
1410 :
1411 899 : void brl_close_fnum(struct byte_range_lock *br_lck)
1412 : {
1413 899 : files_struct *fsp = br_lck->fsp;
1414 899 : uint32_t tid = fsp->conn->cnum;
1415 899 : uint64_t fnum = fsp->fnum;
1416 6 : unsigned int i;
1417 899 : struct lock_struct *locks = br_lck->lock_data;
1418 899 : struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1419 6 : struct lock_struct *locks_copy;
1420 6 : unsigned int num_locks_copy;
1421 :
1422 : /* Copy the current lock array. */
1423 899 : if (br_lck->num_locks) {
1424 803 : locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1425 803 : if (!locks_copy) {
1426 0 : smb_panic("brl_close_fnum: talloc failed");
1427 : }
1428 : } else {
1429 96 : locks_copy = NULL;
1430 : }
1431 :
1432 899 : num_locks_copy = br_lck->num_locks;
1433 :
1434 2068 : for (i=0; i < num_locks_copy; i++) {
1435 1169 : struct lock_struct *lock = &locks_copy[i];
1436 :
1437 2290 : if (lock->context.tid == tid &&
1438 1121 : server_id_equal(&lock->context.pid, &pid) &&
1439 1121 : (lock->fnum == fnum)) {
1440 1003 : brl_unlock(
1441 : br_lck,
1442 : lock->context.smblctx,
1443 : pid,
1444 : lock->start,
1445 : lock->size,
1446 : lock->lock_flav);
1447 : }
1448 : }
1449 899 : }
1450 :
1451 160 : bool brl_mark_disconnected(struct files_struct *fsp)
1452 : {
1453 160 : uint32_t tid = fsp->conn->cnum;
1454 0 : uint64_t smblctx;
1455 160 : uint64_t fnum = fsp->fnum;
1456 0 : unsigned int i;
1457 160 : struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1458 160 : struct byte_range_lock *br_lck = NULL;
1459 :
1460 160 : if (fsp->op == NULL) {
1461 0 : return false;
1462 : }
1463 :
1464 160 : smblctx = fsp->op->global->open_persistent_id;
1465 :
1466 160 : if (!fsp->op->global->durable) {
1467 0 : return false;
1468 : }
1469 :
1470 160 : if (fsp->current_lock_count == 0) {
1471 154 : return true;
1472 : }
1473 :
1474 6 : br_lck = brl_get_locks(talloc_tos(), fsp);
1475 6 : if (br_lck == NULL) {
1476 0 : return false;
1477 : }
1478 :
1479 12 : for (i=0; i < br_lck->num_locks; i++) {
1480 6 : struct lock_struct *lock = &br_lck->lock_data[i];
1481 :
1482 : /*
1483 : * as this is a durable handle, we only expect locks
1484 : * of the current file handle!
1485 : */
1486 :
1487 6 : if (lock->context.smblctx != smblctx) {
1488 0 : TALLOC_FREE(br_lck);
1489 0 : return false;
1490 : }
1491 :
1492 6 : if (lock->context.tid != tid) {
1493 0 : TALLOC_FREE(br_lck);
1494 0 : return false;
1495 : }
1496 :
1497 6 : if (!server_id_equal(&lock->context.pid, &self)) {
1498 0 : TALLOC_FREE(br_lck);
1499 0 : return false;
1500 : }
1501 :
1502 6 : if (lock->fnum != fnum) {
1503 0 : TALLOC_FREE(br_lck);
1504 0 : return false;
1505 : }
1506 :
1507 6 : server_id_set_disconnected(&lock->context.pid);
1508 6 : lock->context.tid = TID_FIELD_INVALID;
1509 6 : lock->fnum = FNUM_FIELD_INVALID;
1510 : }
1511 :
1512 6 : br_lck->modified = true;
1513 6 : TALLOC_FREE(br_lck);
1514 6 : return true;
1515 : }
1516 :
1517 130 : bool brl_reconnect_disconnected(struct files_struct *fsp)
1518 : {
1519 130 : uint32_t tid = fsp->conn->cnum;
1520 0 : uint64_t smblctx;
1521 130 : uint64_t fnum = fsp->fnum;
1522 0 : unsigned int i;
1523 130 : struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1524 130 : struct byte_range_lock *br_lck = NULL;
1525 :
1526 130 : if (fsp->op == NULL) {
1527 0 : return false;
1528 : }
1529 :
1530 130 : smblctx = fsp->op->global->open_persistent_id;
1531 :
1532 130 : if (!fsp->op->global->durable) {
1533 0 : return false;
1534 : }
1535 :
1536 : /*
1537 : * When reconnecting, we do not want to validate the brlock entries
1538 : * and thereby remove our own (disconnected) entries but reactivate
1539 : * them instead.
1540 : */
1541 :
1542 130 : br_lck = brl_get_locks(talloc_tos(), fsp);
1543 130 : if (br_lck == NULL) {
1544 0 : return false;
1545 : }
1546 :
1547 130 : if (br_lck->num_locks == 0) {
1548 124 : TALLOC_FREE(br_lck);
1549 124 : return true;
1550 : }
1551 :
1552 12 : for (i=0; i < br_lck->num_locks; i++) {
1553 6 : struct lock_struct *lock = &br_lck->lock_data[i];
1554 :
1555 : /*
1556 : * as this is a durable handle we only expect locks
1557 : * of the current file handle!
1558 : */
1559 :
1560 6 : if (lock->context.smblctx != smblctx) {
1561 0 : TALLOC_FREE(br_lck);
1562 0 : return false;
1563 : }
1564 :
1565 6 : if (lock->context.tid != TID_FIELD_INVALID) {
1566 0 : TALLOC_FREE(br_lck);
1567 0 : return false;
1568 : }
1569 :
1570 6 : if (!server_id_is_disconnected(&lock->context.pid)) {
1571 0 : TALLOC_FREE(br_lck);
1572 0 : return false;
1573 : }
1574 :
1575 6 : if (lock->fnum != FNUM_FIELD_INVALID) {
1576 0 : TALLOC_FREE(br_lck);
1577 0 : return false;
1578 : }
1579 :
1580 6 : lock->context.pid = self;
1581 6 : lock->context.tid = tid;
1582 6 : lock->fnum = fnum;
1583 : }
1584 :
1585 6 : fsp->current_lock_count = br_lck->num_locks;
1586 6 : br_lck->modified = true;
1587 6 : TALLOC_FREE(br_lck);
1588 6 : return true;
1589 : }
1590 :
1591 : struct brl_forall_cb {
1592 : void (*fn)(struct file_id id, struct server_id pid,
1593 : enum brl_type lock_type,
1594 : enum brl_flavour lock_flav,
1595 : br_off start, br_off size,
1596 : void *private_data);
1597 : void *private_data;
1598 : };
1599 :
1600 : /****************************************************************************
1601 : Traverse the whole database with this function, calling traverse_callback
1602 : on each lock.
1603 : ****************************************************************************/
1604 :
1605 0 : static int brl_traverse_fn(struct db_record *rec, void *state)
1606 : {
1607 0 : struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1608 0 : struct lock_struct *locks;
1609 0 : struct file_id *key;
1610 0 : unsigned int i;
1611 0 : unsigned int num_locks = 0;
1612 0 : TDB_DATA dbkey;
1613 0 : TDB_DATA value;
1614 :
1615 0 : dbkey = dbwrap_record_get_key(rec);
1616 0 : value = dbwrap_record_get_value(rec);
1617 :
1618 : /* In a traverse function we must make a copy of
1619 : dbuf before modifying it. */
1620 :
1621 0 : locks = (struct lock_struct *)talloc_memdup(
1622 : talloc_tos(), value.dptr, value.dsize);
1623 0 : if (!locks) {
1624 0 : return -1; /* Terminate traversal. */
1625 : }
1626 :
1627 0 : key = (struct file_id *)dbkey.dptr;
1628 0 : num_locks = value.dsize/sizeof(*locks);
1629 :
1630 0 : if (cb->fn) {
1631 0 : for ( i=0; i<num_locks; i++) {
1632 0 : cb->fn(*key,
1633 0 : locks[i].context.pid,
1634 0 : locks[i].lock_type,
1635 0 : locks[i].lock_flav,
1636 0 : locks[i].start,
1637 0 : locks[i].size,
1638 : cb->private_data);
1639 : }
1640 : }
1641 :
1642 0 : TALLOC_FREE(locks);
1643 0 : return 0;
1644 : }
1645 :
1646 : /*******************************************************************
1647 : Call the specified function on each lock in the database.
1648 : ********************************************************************/
1649 :
1650 2 : int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1651 : enum brl_type lock_type,
1652 : enum brl_flavour lock_flav,
1653 : br_off start, br_off size,
1654 : void *private_data),
1655 : void *private_data)
1656 : {
1657 0 : struct brl_forall_cb cb;
1658 0 : NTSTATUS status;
1659 2 : int count = 0;
1660 :
1661 2 : if (!brlock_db) {
1662 0 : return 0;
1663 : }
1664 2 : cb.fn = fn;
1665 2 : cb.private_data = private_data;
1666 2 : status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1667 :
1668 2 : if (!NT_STATUS_IS_OK(status)) {
1669 0 : return -1;
1670 : } else {
1671 2 : return count;
1672 : }
1673 : }
1674 :
1675 : /*******************************************************************
1676 : Store a potentially modified set of byte range lock data back into
1677 : the database.
1678 : Unlock the record.
1679 : ********************************************************************/
1680 :
1681 10281 : static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1682 : {
1683 49 : unsigned i;
1684 10281 : struct lock_struct *locks = br_lck->lock_data;
1685 :
1686 10281 : if (!br_lck->modified) {
1687 3731 : DEBUG(10, ("br_lck not modified\n"));
1688 3731 : goto done;
1689 : }
1690 :
1691 6516 : i = 0;
1692 :
1693 176157 : while (i < br_lck->num_locks) {
1694 169607 : if (locks[i].context.pid.pid == 0) {
1695 : /*
1696 : * Autocleanup, the process conflicted and does not
1697 : * exist anymore.
1698 : */
1699 4 : locks[i] = locks[br_lck->num_locks-1];
1700 4 : br_lck->num_locks -= 1;
1701 : } else {
1702 169603 : i += 1;
1703 : }
1704 : }
1705 :
1706 6550 : if (br_lck->num_locks == 0) {
1707 : /* No locks - delete this entry. */
1708 1536 : NTSTATUS status = dbwrap_record_delete(br_lck->record);
1709 1536 : if (!NT_STATUS_IS_OK(status)) {
1710 0 : DEBUG(0, ("delete_rec returned %s\n",
1711 : nt_errstr(status)));
1712 0 : smb_panic("Could not delete byte range lock entry");
1713 : }
1714 : } else {
1715 5014 : TDB_DATA data = {
1716 5014 : .dsize = br_lck->num_locks * sizeof(struct lock_struct),
1717 4993 : .dptr = (uint8_t *)br_lck->lock_data,
1718 : };
1719 21 : NTSTATUS status;
1720 :
1721 5014 : status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1722 5014 : if (!NT_STATUS_IS_OK(status)) {
1723 0 : DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1724 0 : smb_panic("Could not store byte range mode entry");
1725 : }
1726 : }
1727 :
1728 6550 : DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db)));
1729 :
1730 10281 : done:
1731 10281 : br_lck->modified = false;
1732 10281 : TALLOC_FREE(br_lck->record);
1733 10281 : }
1734 :
1735 10281 : static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1736 : {
1737 10281 : byte_range_lock_flush(br_lck);
1738 10281 : return 0;
1739 : }
1740 :
1741 10541 : static bool brl_parse_data(struct byte_range_lock *br_lck, TDB_DATA data)
1742 : {
1743 60 : size_t data_len;
1744 :
1745 10541 : if (data.dsize == 0) {
1746 1985 : return true;
1747 : }
1748 8539 : if (data.dsize % sizeof(struct lock_struct) != 0) {
1749 0 : DEBUG(1, ("Invalid data size: %u\n", (unsigned)data.dsize));
1750 0 : return false;
1751 : }
1752 :
1753 8539 : br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1754 8539 : data_len = br_lck->num_locks * sizeof(struct lock_struct);
1755 :
1756 8539 : br_lck->lock_data = talloc_memdup(br_lck, data.dptr, data_len);
1757 8539 : if (br_lck->lock_data == NULL) {
1758 0 : DEBUG(1, ("talloc_memdup failed\n"));
1759 0 : return false;
1760 : }
1761 8496 : return true;
1762 : }
1763 :
1764 : /*******************************************************************
1765 : Fetch a set of byte range lock data from the database.
1766 : Leave the record locked.
1767 : TALLOC_FREE(brl) will release the lock in the destructor.
1768 : ********************************************************************/
1769 :
1770 10281 : struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
1771 : {
1772 49 : TDB_DATA key, data;
1773 49 : struct byte_range_lock *br_lck;
1774 :
1775 10281 : br_lck = talloc_zero(mem_ctx, struct byte_range_lock);
1776 10281 : if (br_lck == NULL) {
1777 0 : return NULL;
1778 : }
1779 :
1780 10281 : br_lck->fsp = fsp;
1781 :
1782 10281 : key.dptr = (uint8_t *)&fsp->file_id;
1783 10281 : key.dsize = sizeof(struct file_id);
1784 :
1785 10281 : br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1786 :
1787 10281 : if (br_lck->record == NULL) {
1788 0 : DEBUG(3, ("Could not lock byte range lock entry\n"));
1789 0 : TALLOC_FREE(br_lck);
1790 0 : return NULL;
1791 : }
1792 :
1793 10281 : data = dbwrap_record_get_value(br_lck->record);
1794 :
1795 10281 : if (!brl_parse_data(br_lck, data)) {
1796 0 : TALLOC_FREE(br_lck);
1797 0 : return NULL;
1798 : }
1799 :
1800 10281 : talloc_set_destructor(br_lck, byte_range_lock_destructor);
1801 :
1802 10281 : if (DEBUGLEVEL >= 10) {
1803 0 : unsigned int i;
1804 0 : struct file_id_buf buf;
1805 0 : struct lock_struct *locks = br_lck->lock_data;
1806 0 : DBG_DEBUG("%u current locks on file_id %s\n",
1807 : br_lck->num_locks,
1808 : file_id_str_buf(fsp->file_id, &buf));
1809 0 : for( i = 0; i < br_lck->num_locks; i++) {
1810 0 : print_lock_struct(i, &locks[i]);
1811 : }
1812 : }
1813 :
1814 10232 : return br_lck;
1815 : }
1816 :
1817 6753 : struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
1818 : files_struct *fsp,
1819 : TALLOC_CTX *req_mem_ctx,
1820 : const struct GUID *req_guid)
1821 : {
1822 6753 : struct byte_range_lock *br_lck = NULL;
1823 :
1824 6753 : br_lck = brl_get_locks(mem_ctx, fsp);
1825 6753 : if (br_lck == NULL) {
1826 0 : return NULL;
1827 : }
1828 6753 : SMB_ASSERT(req_mem_ctx != NULL);
1829 6753 : br_lck->req_mem_ctx = req_mem_ctx;
1830 6753 : SMB_ASSERT(req_guid != NULL);
1831 6753 : br_lck->req_guid = req_guid;
1832 :
1833 6753 : return br_lck;
1834 : }
1835 :
1836 : struct brl_get_locks_readonly_state {
1837 : TALLOC_CTX *mem_ctx;
1838 : struct byte_range_lock **br_lock;
1839 : };
1840 :
1841 260 : static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data,
1842 : void *private_data)
1843 : {
1844 260 : struct brl_get_locks_readonly_state *state =
1845 : (struct brl_get_locks_readonly_state *)private_data;
1846 11 : struct byte_range_lock *br_lck;
1847 :
1848 260 : br_lck = talloc_pooled_object(
1849 : state->mem_ctx, struct byte_range_lock, 1, data.dsize);
1850 260 : if (br_lck == NULL) {
1851 0 : *state->br_lock = NULL;
1852 0 : return;
1853 : }
1854 260 : *br_lck = (struct byte_range_lock) { 0 };
1855 260 : if (!brl_parse_data(br_lck, data)) {
1856 0 : *state->br_lock = NULL;
1857 0 : return;
1858 : }
1859 260 : *state->br_lock = br_lck;
1860 : }
1861 :
1862 553697 : struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
1863 : {
1864 553697 : struct byte_range_lock *br_lock = NULL;
1865 706 : struct brl_get_locks_readonly_state state;
1866 706 : NTSTATUS status;
1867 :
1868 553697 : DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
1869 : dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum));
1870 :
1871 553697 : if ((fsp->brlock_rec != NULL)
1872 203646 : && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
1873 : /*
1874 : * We have cached the brlock_rec and the database did not
1875 : * change.
1876 : */
1877 203426 : return fsp->brlock_rec;
1878 : }
1879 :
1880 : /*
1881 : * Parse the record fresh from the database
1882 : */
1883 :
1884 350271 : state.mem_ctx = fsp;
1885 350271 : state.br_lock = &br_lock;
1886 :
1887 350271 : status = dbwrap_parse_record(
1888 : brlock_db,
1889 350271 : make_tdb_data((uint8_t *)&fsp->file_id,
1890 : sizeof(fsp->file_id)),
1891 : brl_get_locks_readonly_parser, &state);
1892 :
1893 350271 : if (NT_STATUS_EQUAL(status,NT_STATUS_NOT_FOUND)) {
1894 : /*
1895 : * No locks on this file. Return an empty br_lock.
1896 : */
1897 350011 : br_lock = talloc_zero(fsp, struct byte_range_lock);
1898 350011 : if (br_lock == NULL) {
1899 0 : return NULL;
1900 : }
1901 :
1902 260 : } else if (!NT_STATUS_IS_OK(status)) {
1903 0 : DEBUG(3, ("Could not parse byte range lock record: "
1904 : "%s\n", nt_errstr(status)));
1905 0 : return NULL;
1906 : }
1907 350271 : if (br_lock == NULL) {
1908 0 : return NULL;
1909 : }
1910 :
1911 350271 : br_lock->fsp = fsp;
1912 350271 : br_lock->modified = false;
1913 350271 : br_lock->record = NULL;
1914 :
1915 : /*
1916 : * Cache the brlock struct, invalidated when the dbwrap_seqnum
1917 : * changes. See beginning of this routine.
1918 : */
1919 350271 : TALLOC_FREE(fsp->brlock_rec);
1920 350271 : fsp->brlock_rec = br_lock;
1921 350271 : fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
1922 :
1923 350271 : return br_lock;
1924 : }
1925 :
1926 3 : bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
1927 : {
1928 3 : bool ret = false;
1929 3 : TALLOC_CTX *frame = talloc_stackframe();
1930 0 : TDB_DATA key, val;
1931 0 : struct db_record *rec;
1932 0 : struct lock_struct *lock;
1933 0 : unsigned n, num;
1934 0 : struct file_id_buf buf;
1935 0 : NTSTATUS status;
1936 :
1937 3 : key = make_tdb_data((void*)&fid, sizeof(fid));
1938 :
1939 3 : rec = dbwrap_fetch_locked(brlock_db, frame, key);
1940 3 : if (rec == NULL) {
1941 0 : DBG_INFO("failed to fetch record for file %s\n",
1942 : file_id_str_buf(fid, &buf));
1943 0 : goto done;
1944 : }
1945 :
1946 3 : val = dbwrap_record_get_value(rec);
1947 3 : lock = (struct lock_struct*)val.dptr;
1948 3 : num = val.dsize / sizeof(struct lock_struct);
1949 3 : if (lock == NULL) {
1950 3 : DBG_DEBUG("no byte range locks for file %s\n",
1951 : file_id_str_buf(fid, &buf));
1952 3 : ret = true;
1953 3 : goto done;
1954 : }
1955 :
1956 0 : for (n=0; n<num; n++) {
1957 0 : struct lock_context *ctx = &lock[n].context;
1958 :
1959 0 : if (!server_id_is_disconnected(&ctx->pid)) {
1960 0 : struct server_id_buf tmp;
1961 0 : DBG_INFO("byte range lock "
1962 : "%s used by server %s, do not cleanup\n",
1963 : file_id_str_buf(fid, &buf),
1964 : server_id_str_buf(ctx->pid, &tmp));
1965 0 : goto done;
1966 : }
1967 :
1968 0 : if (ctx->smblctx != open_persistent_id) {
1969 0 : DBG_INFO("byte range lock %s expected smblctx %"PRIu64" "
1970 : "but found %"PRIu64", do not cleanup\n",
1971 : file_id_str_buf(fid, &buf),
1972 : open_persistent_id,
1973 : ctx->smblctx);
1974 0 : goto done;
1975 : }
1976 : }
1977 :
1978 0 : status = dbwrap_record_delete(rec);
1979 0 : if (!NT_STATUS_IS_OK(status)) {
1980 0 : DBG_INFO("failed to delete record "
1981 : "for file %s from %s, open %"PRIu64": %s\n",
1982 : file_id_str_buf(fid, &buf),
1983 : dbwrap_name(brlock_db),
1984 : open_persistent_id,
1985 : nt_errstr(status));
1986 0 : goto done;
1987 : }
1988 :
1989 0 : DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64"\n",
1990 : file_id_str_buf(fid, &buf),
1991 : num,
1992 : open_persistent_id);
1993 :
1994 0 : ret = true;
1995 3 : done:
1996 3 : talloc_free(frame);
1997 3 : return ret;
1998 : }
|