Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 :
4 : POSIX NTVFS backend - locking
5 :
6 : Copyright (C) Andrew Tridgell 2004
7 :
8 : This program is free software; you can redistribute it and/or modify
9 : it under the terms of the GNU General Public License as published by
10 : the Free Software Foundation; either version 3 of the License, or
11 : (at your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful,
14 : but WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 : GNU General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program. If not, see <http://www.gnu.org/licenses/>.
20 : */
21 :
22 : #include "includes.h"
23 : #include "vfs_posix.h"
24 : #include "system/time.h"
25 : #include "../lib/util/dlinklist.h"
26 : #include "messaging/messaging.h"
27 :
28 :
29 : /*
30 : check if we can perform IO on a range that might be locked
31 : */
32 51616 : NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33 : struct pvfs_file *f,
34 : uint32_t smbpid,
35 : uint64_t offset, uint64_t count,
36 : enum brl_type rw)
37 : {
38 51616 : if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39 0 : return NT_STATUS_OK;
40 : }
41 :
42 51616 : return brlock_locktest(pvfs->brl_context,
43 : f->brl_handle,
44 : smbpid,
45 : offset, count, rw);
46 : }
47 :
48 : /* this state structure holds information about a lock we are waiting on */
49 : struct pvfs_pending_lock {
50 : struct pvfs_pending_lock *next, *prev;
51 : struct pvfs_state *pvfs;
52 : union smb_lock *lck;
53 : struct pvfs_file *f;
54 : struct ntvfs_request *req;
55 : int pending_lock;
56 : struct pvfs_wait *wait_handle;
57 : struct timeval end_time;
58 : };
59 :
60 : /*
61 : a secondary attempt to setup a lock has failed - back out
62 : the locks we did get and send an error
63 : */
64 64 : static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65 : struct ntvfs_request *req,
66 : struct pvfs_file *f,
67 : struct smb_lock_entry *locks,
68 : int i,
69 : NTSTATUS status)
70 : {
71 : /* undo the locks we just did */
72 64 : for (i--;i>=0;i--) {
73 0 : brlock_unlock(pvfs->brl_context,
74 : f->brl_handle,
75 0 : locks[i].pid,
76 0 : locks[i].offset,
77 0 : locks[i].count);
78 0 : f->lock_count--;
79 : }
80 64 : req->async_states->status = status;
81 64 : req->async_states->send_fn(req);
82 64 : }
83 :
84 :
85 : /*
86 : called when we receive a pending lock notification. It means that
87 : either our lock timed out or someone else has unlocked a overlapping
88 : range, so we should try the lock again. Note that on timeout we
89 : do retry the lock, giving it a last chance.
90 : */
91 83 : static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
92 : {
93 83 : struct pvfs_pending_lock *pending = talloc_get_type(private_data,
94 : struct pvfs_pending_lock);
95 83 : struct pvfs_state *pvfs = pending->pvfs;
96 83 : struct pvfs_file *f = pending->f;
97 83 : struct ntvfs_request *req = pending->req;
98 83 : union smb_lock *lck = pending->lck;
99 0 : struct smb_lock_entry *locks;
100 0 : enum brl_type rw;
101 0 : NTSTATUS status;
102 0 : int i;
103 0 : bool timed_out;
104 :
105 83 : timed_out = (reason != PVFS_WAIT_EVENT);
106 :
107 83 : locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
108 :
109 83 : if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
110 4 : rw = READ_LOCK;
111 : } else {
112 79 : rw = WRITE_LOCK;
113 : }
114 :
115 83 : DLIST_REMOVE(f->pending_list, pending);
116 :
117 : /* we don't retry on a cancel */
118 83 : if (reason == PVFS_WAIT_CANCEL) {
119 4 : if (pvfs->ntvfs->ctx->protocol < PROTOCOL_SMB2_02) {
120 2 : status = NT_STATUS_FILE_LOCK_CONFLICT;
121 : } else {
122 2 : status = NT_STATUS_CANCELLED;
123 : }
124 : } else {
125 : /*
126 : * here it's important to pass the pending pointer
127 : * because with this we'll get the correct error code
128 : * FILE_LOCK_CONFLICT in the error case
129 : */
130 79 : status = brlock_lock(pvfs->brl_context,
131 : f->brl_handle,
132 79 : locks[pending->pending_lock].pid,
133 79 : locks[pending->pending_lock].offset,
134 79 : locks[pending->pending_lock].count,
135 : rw, pending);
136 : }
137 83 : if (NT_STATUS_IS_OK(status)) {
138 18 : f->lock_count++;
139 18 : timed_out = false;
140 : }
141 :
142 : /* if we have failed and timed out, or succeeded, then we
143 : don't need the pending lock any more */
144 83 : if (NT_STATUS_IS_OK(status) || timed_out) {
145 0 : NTSTATUS status2;
146 82 : status2 = brlock_remove_pending(pvfs->brl_context,
147 : f->brl_handle, pending);
148 82 : if (!NT_STATUS_IS_OK(status2)) {
149 0 : DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
150 : }
151 82 : talloc_free(pending->wait_handle);
152 : }
153 :
154 83 : if (!NT_STATUS_IS_OK(status)) {
155 65 : if (timed_out) {
156 : /* no more chances */
157 64 : pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158 64 : talloc_free(pending);
159 : } else {
160 : /* we can try again */
161 1 : DLIST_ADD(f->pending_list, pending);
162 : }
163 71 : return;
164 : }
165 :
166 : /* if we haven't timed out yet, then we can do more pending locks */
167 18 : if (rw == READ_LOCK) {
168 4 : rw = PENDING_READ_LOCK;
169 : } else {
170 14 : rw = PENDING_WRITE_LOCK;
171 : }
172 :
173 : /* we've now got the pending lock. try and get the rest, which might
174 : lead to more pending locks */
175 18 : for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
176 6 : pending->pending_lock = i;
177 :
178 6 : status = brlock_lock(pvfs->brl_context,
179 : f->brl_handle,
180 6 : locks[i].pid,
181 6 : locks[i].offset,
182 6 : locks[i].count,
183 : rw, pending);
184 6 : if (!NT_STATUS_IS_OK(status)) {
185 : /* a timed lock failed - setup a wait message to handle
186 : the pending lock notification or a timeout */
187 6 : pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
188 : pending->end_time,
189 : pvfs_pending_lock_continue,
190 : pending);
191 6 : if (pending->wait_handle == NULL) {
192 0 : pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
193 0 : talloc_free(pending);
194 : } else {
195 6 : talloc_steal(pending, pending->wait_handle);
196 6 : DLIST_ADD(f->pending_list, pending);
197 : }
198 6 : return;
199 : }
200 :
201 0 : f->lock_count++;
202 : }
203 :
204 : /* we've managed to get all the locks. Tell the client */
205 12 : req->async_states->status = NT_STATUS_OK;
206 12 : req->async_states->send_fn(req);
207 12 : talloc_free(pending);
208 : }
209 :
210 :
211 : /*
212 : called when we close a file that might have locks
213 : */
214 189155 : void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
215 : {
216 0 : struct pvfs_pending_lock *p, *next;
217 :
218 189155 : if (f->lock_count || f->pending_list) {
219 155 : DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
220 : (double)f->lock_count));
221 155 : brlock_close(f->pvfs->brl_context, f->brl_handle);
222 155 : f->lock_count = 0;
223 : }
224 :
225 : /* reply to all the pending lock requests, telling them the
226 : lock failed */
227 189184 : for (p=f->pending_list;p;p=next) {
228 29 : next = p->next;
229 29 : DLIST_REMOVE(f->pending_list, p);
230 29 : p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
231 29 : p->req->async_states->send_fn(p->req);
232 : }
233 189155 : }
234 :
235 :
236 : /*
237 : cancel a set of locks
238 : */
239 8 : static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
240 : struct pvfs_file *f)
241 : {
242 0 : struct pvfs_pending_lock *p;
243 :
244 12 : for (p=f->pending_list;p;p=p->next) {
245 : /* check if the lock request matches exactly - you can only cancel with exact matches */
246 5 : if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
247 5 : p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
248 3 : p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
249 3 : p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
250 : int i;
251 :
252 2 : for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
253 1 : if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
254 1 : p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
255 1 : p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
256 : break;
257 : }
258 : }
259 1 : if (i < lck->lockx.in.ulock_cnt) continue;
260 :
261 : /* an exact match! we can cancel it, which is equivalent
262 : to triggering the timeout early */
263 1 : pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
264 1 : return NT_STATUS_OK;
265 : }
266 : }
267 :
268 7 : return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
269 : }
270 :
271 :
272 : /*
273 : lock or unlock a byte range
274 : */
275 2901 : NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
276 : struct ntvfs_request *req, union smb_lock *lck)
277 : {
278 2901 : struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
279 : struct pvfs_state);
280 0 : struct pvfs_file *f;
281 0 : struct smb_lock_entry *locks;
282 0 : int i;
283 0 : enum brl_type rw;
284 2901 : struct pvfs_pending_lock *pending = NULL;
285 0 : NTSTATUS status;
286 :
287 2901 : if (lck->generic.level != RAW_LOCK_GENERIC) {
288 462 : return ntvfs_map_lock(ntvfs, req, lck);
289 : }
290 :
291 2439 : if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
292 85 : return pvfs_oplock_release(ntvfs, req, lck);
293 : }
294 :
295 2354 : f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
296 2354 : if (!f) {
297 0 : return NT_STATUS_INVALID_HANDLE;
298 : }
299 :
300 2354 : if (f->handle->fd == -1) {
301 0 : return NT_STATUS_FILE_IS_A_DIRECTORY;
302 : }
303 :
304 2354 : status = pvfs_break_level2_oplocks(f);
305 2354 : NT_STATUS_NOT_OK_RETURN(status);
306 :
307 2354 : if (lck->lockx.in.timeout != 0 &&
308 358 : (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
309 358 : pending = talloc(f, struct pvfs_pending_lock);
310 358 : if (pending == NULL) {
311 0 : return NT_STATUS_NO_MEMORY;
312 : }
313 :
314 358 : pending->pvfs = pvfs;
315 358 : pending->lck = lck;
316 358 : pending->f = f;
317 358 : pending->req = req;
318 :
319 0 : pending->end_time =
320 358 : timeval_current_ofs_msec(lck->lockx.in.timeout);
321 : }
322 :
323 2354 : if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
324 319 : rw = pending? PENDING_READ_LOCK : READ_LOCK;
325 : } else {
326 2035 : rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
327 : }
328 :
329 2354 : if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
330 8 : talloc_free(pending);
331 8 : return pvfs_lock_cancel(pvfs, req, lck, f);
332 : }
333 :
334 2346 : if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
335 : /* this seems to not be supported by any windows server,
336 : or used by any clients */
337 4 : talloc_free(pending);
338 4 : return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
339 : }
340 :
341 : /* the unlocks happen first */
342 2342 : locks = lck->lockx.in.locks;
343 :
344 2922 : for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
345 682 : status = brlock_unlock(pvfs->brl_context,
346 : f->brl_handle,
347 682 : locks[i].pid,
348 682 : locks[i].offset,
349 682 : locks[i].count);
350 682 : if (!NT_STATUS_IS_OK(status)) {
351 102 : talloc_free(pending);
352 102 : return status;
353 : }
354 580 : f->lock_count--;
355 : }
356 :
357 2240 : locks += i;
358 :
359 3074 : for (i=0;i<lck->lockx.in.lock_cnt;i++) {
360 1681 : if (pending) {
361 228 : pending->pending_lock = i;
362 : }
363 :
364 1681 : status = brlock_lock(pvfs->brl_context,
365 : f->brl_handle,
366 1681 : locks[i].pid,
367 1681 : locks[i].offset,
368 1681 : locks[i].count,
369 : rw, pending);
370 1681 : if (!NT_STATUS_IS_OK(status)) {
371 847 : if (pending) {
372 : /* a timed lock failed - setup a wait message to handle
373 : the pending lock notification or a timeout */
374 105 : pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
375 : pending->end_time,
376 : pvfs_pending_lock_continue,
377 : pending);
378 105 : if (pending->wait_handle == NULL) {
379 0 : talloc_free(pending);
380 0 : return NT_STATUS_NO_MEMORY;
381 : }
382 105 : talloc_steal(pending, pending->wait_handle);
383 105 : DLIST_ADD(f->pending_list, pending);
384 105 : return NT_STATUS_OK;
385 : }
386 :
387 : /* undo the locks we just did */
388 744 : for (i--;i>=0;i--) {
389 2 : brlock_unlock(pvfs->brl_context,
390 : f->brl_handle,
391 2 : locks[i].pid,
392 2 : locks[i].offset,
393 2 : locks[i].count);
394 2 : f->lock_count--;
395 : }
396 742 : talloc_free(pending);
397 742 : return status;
398 : }
399 834 : f->lock_count++;
400 : }
401 :
402 1393 : talloc_free(pending);
403 1393 : return NT_STATUS_OK;
404 : }
|