LCOV - code coverage report
Current view: top level - source3/lib - g_lock.c (source / functions) Hit Total Coverage
Test: coverage report for master 2f515e9b Lines: 690 880 78.4 %
Date: 2024-04-21 15:09:00 Functions: 44 47 93.6 %

          Line data    Source code
       1             : /*
       2             :    Unix SMB/CIFS implementation.
       3             :    global locks based on dbwrap and messaging
       4             :    Copyright (C) 2009 by Volker Lendecke
       5             : 
       6             :    This program is free software; you can redistribute it and/or modify
       7             :    it under the terms of the GNU General Public License as published by
       8             :    the Free Software Foundation; either version 3 of the License, or
       9             :    (at your option) any later version.
      10             : 
      11             :    This program is distributed in the hope that it will be useful,
      12             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :    GNU General Public License for more details.
      15             : 
      16             :    You should have received a copy of the GNU General Public License
      17             :    along with this program.  If not, see <http://www.gnu.org/licenses/>.
      18             : */
      19             : 
      20             : #include "replace.h"
      21             : #include "system/filesys.h"
      22             : #include "lib/util/server_id.h"
      23             : #include "lib/util/debug.h"
      24             : #include "lib/util/talloc_stack.h"
      25             : #include "lib/util/samba_util.h"
      26             : #include "lib/util_path.h"
      27             : #include "dbwrap/dbwrap.h"
      28             : #include "dbwrap/dbwrap_open.h"
      29             : #include "dbwrap/dbwrap_watch.h"
      30             : #include "g_lock.h"
      31             : #include "util_tdb.h"
      32             : #include "../lib/util/tevent_ntstatus.h"
      33             : #include "messages.h"
      34             : #include "serverid.h"
      35             : 
      36             : struct g_lock_ctx {
      37             :         struct db_context *db;
      38             :         struct messaging_context *msg;
      39             :         enum dbwrap_lock_order lock_order;
      40             :         bool busy;
      41             : };
      42             : 
      43             : struct g_lock {
      44             :         struct server_id exclusive;
      45             :         size_t num_shared;
      46             :         uint8_t *shared;
      47             :         uint64_t unique_lock_epoch;
      48             :         uint64_t unique_data_epoch;
      49             :         size_t datalen;
      50             :         uint8_t *data;
      51             : };
      52             : 
      53     1879798 : static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
      54             : {
      55        4908 :         struct server_id exclusive;
      56        4908 :         size_t num_shared, shared_len;
      57        4908 :         uint64_t unique_lock_epoch;
      58        4908 :         uint64_t unique_data_epoch;
      59             : 
      60     1879798 :         if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
      61             :                       sizeof(uint64_t) +     /* seqnum */
      62             :                       sizeof(uint32_t))) {   /* num_shared */
      63     1247352 :                 struct g_lock ret = {
      64             :                         .exclusive.pid = 0,
      65      415784 :                         .unique_lock_epoch = generate_unique_u64(0),
      66      415784 :                         .unique_data_epoch = generate_unique_u64(0),
      67             :                 };
      68      415784 :                 *lck = ret;
      69      415784 :                 return true;
      70             :         }
      71             : 
      72     1464014 :         server_id_get(&exclusive, buf);
      73     1464014 :         buf += SERVER_ID_BUF_LENGTH;
      74     1464014 :         buflen -= SERVER_ID_BUF_LENGTH;
      75             : 
      76     1464014 :         unique_lock_epoch = BVAL(buf, 0);
      77     1464014 :         buf += sizeof(uint64_t);
      78     1464014 :         buflen -= sizeof(uint64_t);
      79             : 
      80     1464014 :         unique_data_epoch = BVAL(buf, 0);
      81     1464014 :         buf += sizeof(uint64_t);
      82     1464014 :         buflen -= sizeof(uint64_t);
      83             : 
      84     1464014 :         num_shared = IVAL(buf, 0);
      85     1464014 :         buf += sizeof(uint32_t);
      86     1464014 :         buflen -= sizeof(uint32_t);
      87             : 
      88     1464014 :         if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
      89           0 :                 DBG_DEBUG("num_shared=%zu, buflen=%zu\n",
      90             :                           num_shared,
      91             :                           buflen);
      92           0 :                 return false;
      93             :         }
      94             : 
      95     1464014 :         shared_len = num_shared * SERVER_ID_BUF_LENGTH;
      96             : 
      97     1464014 :         *lck = (struct g_lock) {
      98             :                 .exclusive = exclusive,
      99             :                 .num_shared = num_shared,
     100             :                 .shared = buf,
     101             :                 .unique_lock_epoch = unique_lock_epoch,
     102             :                 .unique_data_epoch = unique_data_epoch,
     103     1464014 :                 .datalen = buflen-shared_len,
     104     1464014 :                 .data = buf+shared_len,
     105             :         };
     106             : 
     107     1464014 :         return true;
     108             : }
     109             : 
     110          72 : static void g_lock_get_shared(const struct g_lock *lck,
     111             :                               size_t i,
     112             :                               struct server_id *shared)
     113             : {
     114          67 :         if (i >= lck->num_shared) {
     115           0 :                 abort();
     116             :         }
     117          71 :         server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
     118          67 : }
     119             : 
     120          17 : static void g_lock_del_shared(struct g_lock *lck, size_t i)
     121             : {
     122          17 :         if (i >= lck->num_shared) {
     123           0 :                 abort();
     124             :         }
     125          17 :         lck->num_shared -= 1;
     126          17 :         if (i < lck->num_shared) {
     127          21 :                 memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
     128           4 :                        lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
     129             :                        SERVER_ID_BUF_LENGTH);
     130             :         }
     131          17 : }
     132             : 
     133     1187536 : static NTSTATUS g_lock_store(
     134             :         struct db_record *rec,
     135             :         struct g_lock *lck,
     136             :         struct server_id *new_shared,
     137             :         const TDB_DATA *new_dbufs,
     138             :         size_t num_new_dbufs)
     139     1187536 : {
     140        2508 :         uint8_t exclusive[SERVER_ID_BUF_LENGTH];
     141        2508 :         uint8_t seqnum_buf[sizeof(uint64_t)*2];
     142        2508 :         uint8_t sizebuf[sizeof(uint32_t)];
     143        2508 :         uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
     144             : 
     145     1187536 :         struct TDB_DATA dbufs[6 + num_new_dbufs];
     146             : 
     147     1187536 :         dbufs[0] = (TDB_DATA) {
     148             :                 .dptr = exclusive, .dsize = sizeof(exclusive),
     149             :         };
     150     1187536 :         dbufs[1] = (TDB_DATA) {
     151             :                 .dptr = seqnum_buf, .dsize = sizeof(seqnum_buf),
     152             :         };
     153     1187536 :         dbufs[2] = (TDB_DATA) {
     154             :                 .dptr = sizebuf, .dsize = sizeof(sizebuf),
     155             :         };
     156     1187536 :         dbufs[3] = (TDB_DATA) {
     157     1187536 :                 .dptr = lck->shared,
     158     1187536 :                 .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH,
     159             :         };
     160     1187536 :         dbufs[4] = (TDB_DATA) { 0 };
     161     1187536 :         dbufs[5] = (TDB_DATA) {
     162     1187536 :                 .dptr = lck->data, .dsize = lck->datalen,
     163             :         };
     164             : 
     165     1187536 :         if (num_new_dbufs != 0) {
     166        1937 :                 memcpy(&dbufs[6],
     167             :                        new_dbufs,
     168             :                        num_new_dbufs * sizeof(TDB_DATA));
     169             :         }
     170             : 
     171     1187536 :         server_id_put(exclusive, lck->exclusive);
     172     1187536 :         SBVAL(seqnum_buf, 0, lck->unique_lock_epoch);
     173     1187536 :         SBVAL(seqnum_buf, 8, lck->unique_data_epoch);
     174             : 
     175     1187536 :         if (new_shared != NULL) {
     176          18 :                 if (lck->num_shared >= UINT32_MAX) {
     177           0 :                         return NT_STATUS_BUFFER_OVERFLOW;
     178             :                 }
     179             : 
     180          18 :                 server_id_put(new_shared_buf, *new_shared);
     181             : 
     182          18 :                 dbufs[4] = (TDB_DATA) {
     183             :                         .dptr = new_shared_buf,
     184             :                         .dsize = sizeof(new_shared_buf),
     185             :                 };
     186             : 
     187          18 :                 lck->num_shared += 1;
     188             :         }
     189             : 
     190     1187536 :         SIVAL(sizebuf, 0, lck->num_shared);
     191             : 
     192     1187536 :         return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
     193             : }
     194             : 
     195         355 : struct g_lock_ctx *g_lock_ctx_init_backend(
     196             :         TALLOC_CTX *mem_ctx,
     197             :         struct messaging_context *msg,
     198             :         struct db_context **backend)
     199             : {
     200          21 :         struct g_lock_ctx *result;
     201             : 
     202         355 :         result = talloc_zero(mem_ctx, struct g_lock_ctx);
     203         355 :         if (result == NULL) {
     204           0 :                 return NULL;
     205             :         }
     206         355 :         result->msg = msg;
     207         355 :         result->lock_order = DBWRAP_LOCK_ORDER_NONE;
     208             : 
     209         355 :         result->db = db_open_watched(result, backend, msg);
     210         355 :         if (result->db == NULL) {
     211           0 :                 DBG_WARNING("db_open_watched failed\n");
     212           0 :                 TALLOC_FREE(result);
     213           0 :                 return NULL;
     214             :         }
     215         334 :         return result;
     216             : }
     217             : 
     218         196 : void g_lock_set_lock_order(struct g_lock_ctx *ctx,
     219             :                            enum dbwrap_lock_order lock_order)
     220             : {
     221         196 :         ctx->lock_order = lock_order;
     222         196 : }
     223             : 
     224         159 : struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
     225             :                                    struct messaging_context *msg)
     226             : {
     227         159 :         char *db_path = NULL;
     228         159 :         struct db_context *backend = NULL;
     229         159 :         struct g_lock_ctx *ctx = NULL;
     230             : 
     231         159 :         db_path = lock_path(mem_ctx, "g_lock.tdb");
     232         159 :         if (db_path == NULL) {
     233           0 :                 return NULL;
     234             :         }
     235             : 
     236         159 :         backend = db_open(
     237             :                 mem_ctx,
     238             :                 db_path,
     239             :                 0,
     240             :                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH|TDB_VOLATILE,
     241             :                 O_RDWR|O_CREAT,
     242             :                 0600,
     243             :                 DBWRAP_LOCK_ORDER_3,
     244             :                 DBWRAP_FLAG_NONE);
     245         159 :         TALLOC_FREE(db_path);
     246         159 :         if (backend == NULL) {
     247           0 :                 DBG_WARNING("Could not open g_lock.tdb\n");
     248           0 :                 return NULL;
     249             :         }
     250             : 
     251         159 :         ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
     252         159 :         return ctx;
     253             : }
     254             : 
     255         323 : static void g_lock_cleanup_dead(
     256             :         struct g_lock *lck,
     257             :         struct server_id *dead_blocker)
     258             : {
     259          13 :         bool exclusive_died;
     260          13 :         struct server_id_buf tmp;
     261             : 
     262         323 :         if (dead_blocker == NULL) {
     263         321 :                 return;
     264             :         }
     265             : 
     266           2 :         exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
     267             : 
     268           2 :         if (exclusive_died) {
     269           1 :                 DBG_DEBUG("Exclusive holder %s died\n",
     270             :                           server_id_str_buf(lck->exclusive, &tmp));
     271           1 :                 lck->exclusive.pid = 0;
     272             :         }
     273             : 
     274           2 :         if (lck->num_shared != 0) {
     275           1 :                 bool shared_died;
     276           1 :                 struct server_id shared;
     277             : 
     278           1 :                 g_lock_get_shared(lck, 0, &shared);
     279           1 :                 shared_died = server_id_equal(dead_blocker, &shared);
     280             : 
     281           1 :                 if (shared_died) {
     282           1 :                         DBG_DEBUG("Shared holder %s died\n",
     283             :                                   server_id_str_buf(shared, &tmp));
     284           1 :                         g_lock_del_shared(lck, 0);
     285             :                 }
     286             :         }
     287             : }
     288             : 
     289         305 : static ssize_t g_lock_find_shared(
     290             :         struct g_lock *lck,
     291             :         const struct server_id *self)
     292             : {
     293          11 :         size_t i;
     294             : 
     295         312 :         for (i=0; i<lck->num_shared; i++) {
     296          11 :                 struct server_id shared;
     297          11 :                 bool same;
     298             : 
     299          11 :                 g_lock_get_shared(lck, i, &shared);
     300             : 
     301          11 :                 same = server_id_equal(self, &shared);
     302          11 :                 if (same) {
     303           4 :                         return i;
     304             :                 }
     305             :         }
     306             : 
     307         294 :         return -1;
     308             : }
     309             : 
     310         320 : static void g_lock_cleanup_shared(struct g_lock *lck)
     311             : {
     312          26 :         size_t i;
     313          26 :         struct server_id check;
     314          26 :         bool exists;
     315             : 
     316         320 :         if (lck->num_shared == 0) {
     317         302 :                 return;
     318             :         }
     319             : 
     320             :         /*
     321             :          * Read locks can stay around forever if the process dies. Do
     322             :          * a heuristic check for process existence: Check one random
     323             :          * process for existence. Hopefully this will keep runaway
     324             :          * read locks under control.
     325             :          */
     326          18 :         i = generate_random() % lck->num_shared;
     327          18 :         g_lock_get_shared(lck, i, &check);
     328             : 
     329          18 :         exists = serverid_exists(&check);
     330          18 :         if (!exists) {
     331           7 :                 struct server_id_buf tmp;
     332           7 :                 DBG_DEBUG("Shared locker %s died -- removing\n",
     333             :                           server_id_str_buf(check, &tmp));
     334           7 :                 g_lock_del_shared(lck, i);
     335             :         }
     336             : }
     337             : 
     338             : struct g_lock_lock_cb_state {
     339             :         struct g_lock_ctx *ctx;
     340             :         struct db_record *rec;
     341             :         struct g_lock *lck;
     342             :         struct server_id *new_shared;
     343             :         g_lock_lock_cb_fn_t cb_fn;
     344             :         void *cb_private;
     345             :         TALLOC_CTX *update_mem_ctx;
     346             :         TDB_DATA updated_data;
     347             :         bool existed;
     348             :         bool modified;
     349             :         bool unlock;
     350             : };
     351             : 
     352     3254292 : NTSTATUS g_lock_lock_cb_dump(struct g_lock_lock_cb_state *cb_state,
     353             :                              void (*fn)(struct server_id exclusive,
     354             :                                         size_t num_shared,
     355             :                                         const struct server_id *shared,
     356             :                                         const uint8_t *data,
     357             :                                         size_t datalen,
     358             :                                         void *private_data),
     359             :                              void *private_data)
     360             : {
     361     3254292 :         struct g_lock *lck = cb_state->lck;
     362             : 
     363             :         /* We allow a cn_fn only for G_LOCK_WRITE for now... */
     364     3254292 :         SMB_ASSERT(lck->num_shared == 0);
     365             : 
     366     3254292 :         fn(lck->exclusive,
     367             :            0, /* num_shared */
     368             :            NULL, /* shared */
     369     3254292 :            lck->data,
     370             :            lck->datalen,
     371             :            private_data);
     372             : 
     373     3254292 :         return NT_STATUS_OK;
     374             : }
     375             : 
     376      756612 : NTSTATUS g_lock_lock_cb_writev(struct g_lock_lock_cb_state *cb_state,
     377             :                                const TDB_DATA *dbufs,
     378             :                                size_t num_dbufs)
     379             : {
     380        1915 :         NTSTATUS status;
     381             : 
     382      756612 :         status = dbwrap_merge_dbufs(&cb_state->updated_data,
     383             :                                     cb_state->update_mem_ctx,
     384             :                                     dbufs, num_dbufs);
     385      756612 :         if (!NT_STATUS_IS_OK(status)) {
     386           0 :                 return status;
     387             :         }
     388             : 
     389      756612 :         cb_state->modified = true;
     390      756612 :         cb_state->lck->data = cb_state->updated_data.dptr;
     391      756612 :         cb_state->lck->datalen = cb_state->updated_data.dsize;
     392             : 
     393      756612 :         return NT_STATUS_OK;
     394             : }
     395             : 
     396      457686 : void g_lock_lock_cb_unlock(struct g_lock_lock_cb_state *cb_state)
     397             : {
     398      457686 :         cb_state->unlock = true;
     399      457686 : }
     400             : 
     401             : struct g_lock_lock_cb_watch_data_state {
     402             :         struct tevent_context *ev;
     403             :         struct g_lock_ctx *ctx;
     404             :         TDB_DATA key;
     405             :         struct server_id blocker;
     406             :         bool blockerdead;
     407             :         uint64_t unique_lock_epoch;
     408             :         uint64_t unique_data_epoch;
     409             :         uint64_t watch_instance;
     410             :         NTSTATUS status;
     411             : };
     412             : 
     413             : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq);
     414             : 
     415         499 : struct tevent_req *g_lock_lock_cb_watch_data_send(
     416             :         TALLOC_CTX *mem_ctx,
     417             :         struct tevent_context *ev,
     418             :         struct g_lock_lock_cb_state *cb_state,
     419             :         struct server_id blocker)
     420             : {
     421         499 :         struct tevent_req *req = NULL;
     422         499 :         struct g_lock_lock_cb_watch_data_state *state = NULL;
     423         499 :         struct tevent_req *subreq = NULL;
     424         499 :         TDB_DATA key = dbwrap_record_get_key(cb_state->rec);
     425             : 
     426         499 :         req = tevent_req_create(
     427             :                 mem_ctx, &state, struct g_lock_lock_cb_watch_data_state);
     428         499 :         if (req == NULL) {
     429           0 :                 return NULL;
     430             :         }
     431         499 :         state->ev = ev;
     432         499 :         state->ctx = cb_state->ctx;
     433         499 :         state->blocker = blocker;
     434             : 
     435         499 :         state->key = tdb_data_talloc_copy(state, key);
     436         499 :         if (tevent_req_nomem(state->key.dptr, req)) {
     437           0 :                 return tevent_req_post(req, ev);
     438             :         }
     439             : 
     440         499 :         state->unique_lock_epoch = cb_state->lck->unique_lock_epoch;
     441         499 :         state->unique_data_epoch = cb_state->lck->unique_data_epoch;
     442             : 
     443         499 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
     444             : 
     445         499 :         subreq = dbwrap_watched_watch_send(
     446         499 :                 state, state->ev, cb_state->rec, 0, state->blocker);
     447         499 :         if (tevent_req_nomem(subreq, req)) {
     448           0 :                 return tevent_req_post(req, ev);
     449             :         }
     450         499 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     451             : 
     452         499 :         return req;
     453             : }
     454             : 
     455         713 : static void g_lock_lock_cb_watch_data_done_fn(
     456             :         struct db_record *rec,
     457             :         TDB_DATA value,
     458             :         void *private_data)
     459             : {
     460         713 :         struct tevent_req *req = talloc_get_type_abort(
     461             :                 private_data, struct tevent_req);
     462         713 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     463             :                 req, struct g_lock_lock_cb_watch_data_state);
     464         713 :         struct tevent_req *subreq = NULL;
     465           0 :         struct g_lock lck;
     466           0 :         bool ok;
     467             : 
     468         713 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     469         713 :         if (!ok) {
     470           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     471           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     472           0 :                 return;
     473             :         }
     474             : 
     475         713 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
     476         447 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     477         447 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
     478             :                           "state->unique_data_epoch=%"PRIu64"\n",
     479             :                           lck.unique_data_epoch,
     480             :                           state->unique_data_epoch);
     481         447 :                 state->status = NT_STATUS_OK;
     482         447 :                 return;
     483             :         }
     484             : 
     485             :         /*
     486             :          * The lock epoch changed, so we better
     487             :          * remove ourself from the waiter list
     488             :          * (most likely the first position)
     489             :          * and re-add us at the end of the list.
     490             :          *
     491             :          * This gives other lock waiters a change
     492             :          * to make progress.
     493             :          *
     494             :          * Otherwise we'll keep our waiter instance alive,
     495             :          * keep waiting (most likely at first position).
     496             :          */
     497         266 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
     498         242 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     499         242 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
     500         242 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
     501             :         }
     502             : 
     503         266 :         subreq = dbwrap_watched_watch_send(
     504             :                 state, state->ev, rec, state->watch_instance, state->blocker);
     505         266 :         if (subreq == NULL) {
     506           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     507           0 :                 state->status = NT_STATUS_NO_MEMORY;
     508           0 :                 return;
     509             :         }
     510         266 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     511             : 
     512         266 :         state->status = NT_STATUS_EVENT_PENDING;
     513             : }
     514             : 
     515         713 : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq)
     516             : {
     517         713 :         struct tevent_req *req = tevent_req_callback_data(
     518             :                 subreq, struct tevent_req);
     519         713 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     520             :                 req, struct g_lock_lock_cb_watch_data_state);
     521           0 :         NTSTATUS status;
     522         713 :         uint64_t instance = 0;
     523             : 
     524         713 :         status = dbwrap_watched_watch_recv(
     525             :                 subreq, &instance, &state->blockerdead, &state->blocker);
     526         713 :         TALLOC_FREE(subreq);
     527         713 :         if (tevent_req_nterror(req, status)) {
     528           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
     529             :                           nt_errstr(status));
     530         266 :                 return;
     531             :         }
     532             : 
     533         713 :         state->watch_instance = instance;
     534             : 
     535         713 :         status = dbwrap_do_locked(
     536         713 :                 state->ctx->db, state->key, g_lock_lock_cb_watch_data_done_fn, req);
     537         713 :         if (tevent_req_nterror(req, status)) {
     538           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
     539           0 :                 return;
     540             :         }
     541         713 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
     542         266 :                 return;
     543             :         }
     544         447 :         if (tevent_req_nterror(req, state->status)) {
     545           0 :                 return;
     546             :         }
     547         447 :         tevent_req_done(req);
     548             : }
     549             : 
     550         447 : NTSTATUS g_lock_lock_cb_watch_data_recv(
     551             :         struct tevent_req *req,
     552             :         bool *blockerdead,
     553             :         struct server_id *blocker)
     554             : {
     555         447 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     556             :                 req, struct g_lock_lock_cb_watch_data_state);
     557           0 :         NTSTATUS status;
     558             : 
     559         447 :         if (tevent_req_is_nterror(req, &status)) {
     560           0 :                 return status;
     561             :         }
     562         447 :         if (blockerdead != NULL) {
     563         447 :                 *blockerdead = state->blockerdead;
     564             :         }
     565         447 :         if (blocker != NULL) {
     566         447 :                 *blocker = state->blocker;
     567             :         }
     568             : 
     569         447 :         return NT_STATUS_OK;
     570             : }
     571             : 
     572        2542 : void g_lock_lock_cb_wake_watchers(struct g_lock_lock_cb_state *cb_state)
     573             : {
     574        2542 :         struct g_lock *lck = cb_state->lck;
     575             : 
     576        2542 :         lck->unique_data_epoch = generate_unique_u64(lck->unique_data_epoch);
     577        2542 :         cb_state->modified = true;
     578        2542 : }
     579             : 
     580      948829 : static NTSTATUS g_lock_lock_cb_run_and_store(struct g_lock_lock_cb_state *cb_state)
     581             : {
     582      948829 :         struct g_lock *lck = cb_state->lck;
     583      948829 :         NTSTATUS success_status = NT_STATUS_OK;
     584        2314 :         NTSTATUS status;
     585             : 
     586      948829 :         if (cb_state->cb_fn != NULL) {
     587             : 
     588      936974 :                 SMB_ASSERT(lck->num_shared == 0);
     589      936974 :                 SMB_ASSERT(cb_state->new_shared == NULL);
     590             : 
     591      936974 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     592      936974 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     593      936974 :                         dbwrap_lock_order_lock(name, cb_state->ctx->lock_order);
     594             :                 }
     595             : 
     596      936974 :                 cb_state->ctx->busy = true;
     597      936974 :                 cb_state->cb_fn(cb_state, cb_state->cb_private);
     598      936974 :                 cb_state->ctx->busy = false;
     599             : 
     600      936974 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     601      936974 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     602      936974 :                         dbwrap_lock_order_unlock(name, cb_state->ctx->lock_order);
     603             :                 }
     604             :         }
     605             : 
     606      948829 :         if (cb_state->unlock) {
     607             :                 /*
     608             :                  * Unlocked should wake up watchers.
     609             :                  *
     610             :                  * We no longer need the lock, so
     611             :                  * force a wakeup of the next watchers,
     612             :                  * even if we don't do any update.
     613             :                  */
     614      457686 :                 dbwrap_watched_watch_reset_alerting(cb_state->rec);
     615      457686 :                 dbwrap_watched_watch_force_alerting(cb_state->rec);
     616      457686 :                 if (!cb_state->modified) {
     617             :                         /*
     618             :                          * The record was not changed at
     619             :                          * all, so we can also avoid
     620             :                          * storing the lck.unique_lock_epoch
     621             :                          * change
     622             :                          */
     623       12569 :                         return NT_STATUS_WAS_UNLOCKED;
     624             :                 }
     625      445117 :                 lck->exclusive = (struct server_id) { .pid = 0 };
     626      445117 :                 cb_state->new_shared = NULL;
     627             : 
     628      445117 :                 if (lck->datalen == 0) {
     629      243782 :                         if (!cb_state->existed) {
     630           0 :                                 return NT_STATUS_WAS_UNLOCKED;
     631             :                         }
     632             : 
     633      243782 :                         status = dbwrap_record_delete(cb_state->rec);
     634      243782 :                         if (!NT_STATUS_IS_OK(status)) {
     635           0 :                                 DBG_WARNING("dbwrap_record_delete() failed: %s\n",
     636             :                                     nt_errstr(status));
     637           0 :                                 return status;
     638             :                         }
     639      243782 :                         return NT_STATUS_WAS_UNLOCKED;
     640             :                 }
     641             : 
     642      200531 :                 success_status = NT_STATUS_WAS_UNLOCKED;
     643             :         }
     644             : 
     645      692478 :         status = g_lock_store(cb_state->rec,
     646             :                               cb_state->lck,
     647             :                               cb_state->new_shared,
     648             :                               NULL, 0);
     649      692478 :         if (!NT_STATUS_IS_OK(status)) {
     650           0 :                 DBG_WARNING("g_lock_store() failed: %s\n",
     651             :                             nt_errstr(status));
     652           0 :                 return status;
     653             :         }
     654             : 
     655      692478 :         return success_status;
     656             : }
     657             : 
     658             : struct g_lock_lock_state {
     659             :         struct tevent_context *ev;
     660             :         struct g_lock_ctx *ctx;
     661             :         TDB_DATA key;
     662             :         enum g_lock_type type;
     663             :         bool retry;
     664             :         g_lock_lock_cb_fn_t cb_fn;
     665             :         void *cb_private;
     666             : };
     667             : 
     668             : struct g_lock_lock_fn_state {
     669             :         struct g_lock_lock_state *req_state;
     670             :         struct server_id *dead_blocker;
     671             : 
     672             :         struct tevent_req *watch_req;
     673             :         uint64_t watch_instance;
     674             :         NTSTATUS status;
     675             : };
     676             : 
     677             : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
     678             : 
     679         323 : static NTSTATUS g_lock_trylock(
     680             :         struct db_record *rec,
     681             :         struct g_lock_lock_fn_state *state,
     682             :         TDB_DATA data,
     683             :         struct server_id *blocker)
     684             : {
     685         323 :         struct g_lock_lock_state *req_state = state->req_state;
     686         323 :         struct server_id self = messaging_server_id(req_state->ctx->msg);
     687         323 :         enum g_lock_type type = req_state->type;
     688         323 :         bool retry = req_state->retry;
     689         323 :         struct g_lock lck = { .exclusive.pid = 0 };
     690         646 :         struct g_lock_lock_cb_state cb_state = {
     691         323 :                 .ctx = req_state->ctx,
     692             :                 .rec = rec,
     693             :                 .lck = &lck,
     694         323 :                 .cb_fn = req_state->cb_fn,
     695         323 :                 .cb_private = req_state->cb_private,
     696         323 :                 .existed = data.dsize != 0,
     697         323 :                 .update_mem_ctx = talloc_tos(),
     698             :         };
     699          13 :         struct server_id_buf tmp;
     700          13 :         NTSTATUS status;
     701          13 :         bool ok;
     702             : 
     703         323 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
     704         323 :         if (!ok) {
     705           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     706           0 :                 DBG_DEBUG("g_lock_parse failed\n");
     707           0 :                 return NT_STATUS_INTERNAL_DB_CORRUPTION;
     708             :         }
     709             : 
     710         323 :         g_lock_cleanup_dead(&lck, state->dead_blocker);
     711             : 
     712         323 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
     713             : 
     714         323 :         if (lck.exclusive.pid != 0) {
     715          22 :                 bool self_exclusive = server_id_equal(&self, &lck.exclusive);
     716             : 
     717          22 :                 if (!self_exclusive) {
     718          20 :                         bool exists = serverid_exists(&lck.exclusive);
     719          20 :                         if (!exists) {
     720           0 :                                 lck.exclusive = (struct server_id) { .pid=0 };
     721           0 :                                 goto noexclusive;
     722             :                         }
     723             : 
     724          20 :                         DBG_DEBUG("%s has an exclusive lock\n",
     725             :                                   server_id_str_buf(lck.exclusive, &tmp));
     726             : 
     727          20 :                         if (type == G_LOCK_DOWNGRADE) {
     728           0 :                                 struct server_id_buf tmp2;
     729             : 
     730           0 :                                 dbwrap_watched_watch_remove_instance(rec,
     731             :                                                 state->watch_instance);
     732             : 
     733           0 :                                 DBG_DEBUG("%s: Trying to downgrade %s\n",
     734             :                                           server_id_str_buf(self, &tmp),
     735             :                                           server_id_str_buf(
     736             :                                                   lck.exclusive, &tmp2));
     737           0 :                                 return NT_STATUS_NOT_LOCKED;
     738             :                         }
     739             : 
     740          20 :                         if (type == G_LOCK_UPGRADE) {
     741           1 :                                 ssize_t shared_idx;
     742             : 
     743           1 :                                 dbwrap_watched_watch_remove_instance(rec,
     744             :                                                 state->watch_instance);
     745             : 
     746           1 :                                 shared_idx = g_lock_find_shared(&lck, &self);
     747             : 
     748           1 :                                 if (shared_idx == -1) {
     749           0 :                                         DBG_DEBUG("Trying to upgrade %s "
     750             :                                                   "without "
     751             :                                                   "existing shared lock\n",
     752             :                                                   server_id_str_buf(
     753             :                                                           self, &tmp));
     754           0 :                                         return NT_STATUS_NOT_LOCKED;
     755             :                                 }
     756             : 
     757             :                                 /*
     758             :                                  * We're trying to upgrade, and the
     759             :                                  * exclusive lock is taken by someone
     760             :                                  * else. This means that someone else
     761             :                                  * is waiting for us to give up our
     762             :                                  * shared lock. If we now also wait
     763             :                                  * for someone to give their shared
     764             :                                  * lock, we will deadlock.
     765             :                                  */
     766             : 
     767           1 :                                 DBG_DEBUG("Trying to upgrade %s while "
     768             :                                           "someone else is also "
     769             :                                           "trying to upgrade\n",
     770             :                                           server_id_str_buf(self, &tmp));
     771           1 :                                 return NT_STATUS_POSSIBLE_DEADLOCK;
     772             :                         }
     773             : 
     774          19 :                         DBG_DEBUG("Waiting for lck.exclusive=%s\n",
     775             :                                   server_id_str_buf(lck.exclusive, &tmp));
     776             : 
     777             :                         /*
     778             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     779             :                          * and need to monitor the record.
     780             :                          *
     781             :                          * If we don't have a watcher instance yet,
     782             :                          * we should add one.
     783             :                          */
     784          19 :                         if (state->watch_instance == 0) {
     785          17 :                                 state->watch_instance =
     786          17 :                                         dbwrap_watched_watch_add_instance(rec);
     787             :                         }
     788             : 
     789          19 :                         *blocker = lck.exclusive;
     790          19 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     791             :                 }
     792             : 
     793           2 :                 if (type == G_LOCK_DOWNGRADE) {
     794           0 :                         DBG_DEBUG("Downgrading %s from WRITE to READ\n",
     795             :                                   server_id_str_buf(self, &tmp));
     796             : 
     797           0 :                         lck.exclusive = (struct server_id) { .pid = 0 };
     798           0 :                         goto do_shared;
     799             :                 }
     800             : 
     801           2 :                 if (!retry) {
     802           1 :                         dbwrap_watched_watch_remove_instance(rec,
     803             :                                                 state->watch_instance);
     804             : 
     805           1 :                         DBG_DEBUG("%s already locked by self\n",
     806             :                                   server_id_str_buf(self, &tmp));
     807           1 :                         return NT_STATUS_WAS_LOCKED;
     808             :                 }
     809             : 
     810           1 :                 g_lock_cleanup_shared(&lck);
     811             : 
     812           1 :                 if (lck.num_shared != 0) {
     813           0 :                         g_lock_get_shared(&lck, 0, blocker);
     814             : 
     815           0 :                         DBG_DEBUG("Continue waiting for shared lock %s\n",
     816             :                                   server_id_str_buf(*blocker, &tmp));
     817             : 
     818             :                         /*
     819             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     820             :                          * and need to monitor the record.
     821             :                          *
     822             :                          * If we don't have a watcher instance yet,
     823             :                          * we should add one.
     824             :                          */
     825           0 :                         if (state->watch_instance == 0) {
     826           0 :                                 state->watch_instance =
     827           0 :                                         dbwrap_watched_watch_add_instance(rec);
     828             :                         }
     829             : 
     830           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     831             :                 }
     832             : 
     833             :                 /*
     834             :                  * Retry after a conflicting lock was released..
     835             :                  * All pending readers are gone so we got the lock...
     836             :                  */
     837           1 :                 goto got_lock;
     838             :         }
     839             : 
     840         301 : noexclusive:
     841             : 
     842         301 :         if (type == G_LOCK_UPGRADE) {
     843           3 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     844             : 
     845           3 :                 if (shared_idx == -1) {
     846           0 :                         dbwrap_watched_watch_remove_instance(rec,
     847             :                                                 state->watch_instance);
     848             : 
     849           0 :                         DBG_DEBUG("Trying to upgrade %s without "
     850             :                                   "existing shared lock\n",
     851             :                                   server_id_str_buf(self, &tmp));
     852           0 :                         return NT_STATUS_NOT_LOCKED;
     853             :                 }
     854             : 
     855           3 :                 g_lock_del_shared(&lck, shared_idx);
     856           3 :                 type = G_LOCK_WRITE;
     857             :         }
     858             : 
     859         301 :         if (type == G_LOCK_WRITE) {
     860         301 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     861             : 
     862         301 :                 if (shared_idx != -1) {
     863           0 :                         dbwrap_watched_watch_remove_instance(rec,
     864             :                                                 state->watch_instance);
     865           0 :                         DBG_DEBUG("Trying to writelock existing shared %s\n",
     866             :                                   server_id_str_buf(self, &tmp));
     867           0 :                         return NT_STATUS_WAS_LOCKED;
     868             :                 }
     869             : 
     870         301 :                 lck.exclusive = self;
     871             : 
     872         301 :                 g_lock_cleanup_shared(&lck);
     873             : 
     874         301 :                 if (lck.num_shared == 0) {
     875             :                         /*
     876             :                          * If we store ourself as exclusive writer,
     877             :                          * without any pending readers ...
     878             :                          */
     879         297 :                         goto got_lock;
     880             :                 }
     881             : 
     882           4 :                 if (state->watch_instance == 0) {
     883             :                         /*
     884             :                          * Here we have lck.num_shared != 0.
     885             :                          *
     886             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     887             :                          * below.
     888             :                          *
     889             :                          * And don't have a watcher instance yet!
     890             :                          *
     891             :                          * We add it here before g_lock_store()
     892             :                          * in order to trigger just one
     893             :                          * low level dbwrap_do_locked() call.
     894             :                          */
     895           4 :                         state->watch_instance =
     896           4 :                                 dbwrap_watched_watch_add_instance(rec);
     897             :                 }
     898             : 
     899           4 :                 status = g_lock_store(rec, &lck, NULL, NULL, 0);
     900           4 :                 if (!NT_STATUS_IS_OK(status)) {
     901           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     902             :                                   nt_errstr(status));
     903           0 :                         return status;
     904             :                 }
     905             : 
     906           4 :                 talloc_set_destructor(
     907             :                         req_state, g_lock_lock_state_destructor);
     908             : 
     909           4 :                 g_lock_get_shared(&lck, 0, blocker);
     910             : 
     911           4 :                 DBG_DEBUG("Waiting for %zu shared locks, "
     912             :                           "picking blocker %s\n",
     913             :                           lck.num_shared,
     914             :                           server_id_str_buf(*blocker, &tmp));
     915             : 
     916           4 :                 return NT_STATUS_LOCK_NOT_GRANTED;
     917             :         }
     918             : 
     919           0 : do_shared:
     920             : 
     921           0 :         g_lock_cleanup_shared(&lck);
     922           0 :         cb_state.new_shared = &self;
     923           0 :         goto got_lock;
     924             : 
     925         298 : got_lock:
     926             :         /*
     927             :          * We got the lock we asked for, so we no
     928             :          * longer need to monitor the record.
     929             :          */
     930         298 :         dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     931             : 
     932         298 :         status = g_lock_lock_cb_run_and_store(&cb_state);
     933         298 :         if (!NT_STATUS_IS_OK(status) &&
     934           6 :             !NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED))
     935             :         {
     936           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
     937             :                             nt_errstr(status));
     938           0 :                 return status;
     939             :         }
     940             : 
     941         298 :         talloc_set_destructor(req_state, NULL);
     942         298 :         return status;
     943             : }
     944             : 
     945         323 : static void g_lock_lock_fn(
     946             :         struct db_record *rec,
     947             :         TDB_DATA value,
     948             :         void *private_data)
     949             : {
     950         323 :         struct g_lock_lock_fn_state *state = private_data;
     951         323 :         struct server_id blocker = {0};
     952             : 
     953             :         /*
     954             :          * We're trying to get a lock and if we are
     955             :          * successful in doing that, we should not
     956             :          * wakeup any other waiters, all they would
     957             :          * find is that we're holding a lock they
     958             :          * are conflicting with.
     959             :          */
     960         323 :         dbwrap_watched_watch_skip_alerting(rec);
     961             : 
     962         323 :         state->status = g_lock_trylock(rec, state, value, &blocker);
     963         323 :         if (!NT_STATUS_IS_OK(state->status)) {
     964          31 :                 DBG_DEBUG("g_lock_trylock returned %s\n",
     965             :                           nt_errstr(state->status));
     966             :         }
     967         323 :         if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
     968         300 :                 return;
     969             :         }
     970             : 
     971          46 :         state->watch_req = dbwrap_watched_watch_send(
     972          23 :                 state->req_state, state->req_state->ev, rec, state->watch_instance, blocker);
     973          23 :         if (state->watch_req == NULL) {
     974           0 :                 state->status = NT_STATUS_NO_MEMORY;
     975             :         }
     976             : }
     977             : 
     978           2 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
     979             : {
     980           2 :         NTSTATUS status = g_lock_unlock(s->ctx, s->key);
     981           2 :         if (!NT_STATUS_IS_OK(status)) {
     982           0 :                 DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
     983             :         }
     984           2 :         return 0;
     985             : }
     986             : 
     987             : static void g_lock_lock_retry(struct tevent_req *subreq);
     988             : 
     989         305 : struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
     990             :                                     struct tevent_context *ev,
     991             :                                     struct g_lock_ctx *ctx,
     992             :                                     TDB_DATA key,
     993             :                                     enum g_lock_type type,
     994             :                                     g_lock_lock_cb_fn_t cb_fn,
     995             :                                     void *cb_private)
     996             : {
     997          11 :         struct tevent_req *req;
     998          11 :         struct g_lock_lock_state *state;
     999          11 :         struct g_lock_lock_fn_state fn_state;
    1000          11 :         NTSTATUS status;
    1001          11 :         bool ok;
    1002             : 
    1003         305 :         SMB_ASSERT(!ctx->busy);
    1004             : 
    1005         305 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
    1006         305 :         if (req == NULL) {
    1007           0 :                 return NULL;
    1008             :         }
    1009         305 :         state->ev = ev;
    1010         305 :         state->ctx = ctx;
    1011         305 :         state->key = key;
    1012         305 :         state->type = type;
    1013         305 :         state->cb_fn = cb_fn;
    1014         305 :         state->cb_private = cb_private;
    1015             : 
    1016         305 :         fn_state = (struct g_lock_lock_fn_state) {
    1017             :                 .req_state = state,
    1018             :         };
    1019             : 
    1020             :         /*
    1021             :          * We allow a cn_fn only for G_LOCK_WRITE for now.
    1022             :          *
    1023             :          * It's all we currently need and it makes a few things
    1024             :          * easier to implement.
    1025             :          */
    1026         305 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1027           0 :                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_6);
    1028           0 :                 return tevent_req_post(req, ev);
    1029             :         }
    1030             : 
    1031         305 :         status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
    1032         305 :         if (tevent_req_nterror(req, status)) {
    1033           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1034             :                           nt_errstr(status));
    1035           0 :                 return tevent_req_post(req, ev);
    1036             :         }
    1037             : 
    1038         305 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1039         282 :                 tevent_req_done(req);
    1040         282 :                 return tevent_req_post(req, ev);
    1041             :         }
    1042          23 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1043           2 :                 tevent_req_nterror(req, fn_state.status);
    1044           2 :                 return tevent_req_post(req, ev);
    1045             :         }
    1046             : 
    1047          21 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1048           0 :                 return tevent_req_post(req, ev);
    1049             :         }
    1050             : 
    1051          56 :         ok = tevent_req_set_endtime(
    1052             :                 fn_state.watch_req,
    1053          21 :                 state->ev,
    1054          21 :                 timeval_current_ofs(5 + generate_random() % 5, 0));
    1055          21 :         if (!ok) {
    1056           0 :                 tevent_req_oom(req);
    1057           0 :                 return tevent_req_post(req, ev);
    1058             :         }
    1059          21 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1060             : 
    1061          21 :         return req;
    1062             : }
    1063             : 
    1064          18 : static void g_lock_lock_retry(struct tevent_req *subreq)
    1065             : {
    1066          18 :         struct tevent_req *req = tevent_req_callback_data(
    1067             :                 subreq, struct tevent_req);
    1068          18 :         struct g_lock_lock_state *state = tevent_req_data(
    1069             :                 req, struct g_lock_lock_state);
    1070           2 :         struct g_lock_lock_fn_state fn_state;
    1071          18 :         struct server_id blocker = { .pid = 0 };
    1072          18 :         bool blockerdead = false;
    1073           2 :         NTSTATUS status;
    1074          18 :         uint64_t instance = 0;
    1075             : 
    1076          18 :         status = dbwrap_watched_watch_recv(subreq, &instance, &blockerdead, &blocker);
    1077          18 :         DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
    1078          18 :         TALLOC_FREE(subreq);
    1079             : 
    1080          18 :         if (!NT_STATUS_IS_OK(status) &&
    1081           0 :             !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
    1082           0 :                 tevent_req_nterror(req, status);
    1083           0 :                 return;
    1084             :         }
    1085             : 
    1086          18 :         state->retry = true;
    1087             : 
    1088          20 :         fn_state = (struct g_lock_lock_fn_state) {
    1089             :                 .req_state = state,
    1090          18 :                 .dead_blocker = blockerdead ? &blocker : NULL,
    1091             :                 .watch_instance = instance,
    1092             :         };
    1093             : 
    1094          18 :         status = dbwrap_do_locked(state->ctx->db, state->key,
    1095             :                                   g_lock_lock_fn, &fn_state);
    1096          18 :         if (tevent_req_nterror(req, status)) {
    1097           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1098             :                           nt_errstr(status));
    1099           0 :                 return;
    1100             :         }
    1101             : 
    1102          18 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1103          10 :                 tevent_req_done(req);
    1104          10 :                 return;
    1105             :         }
    1106           8 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1107           6 :                 tevent_req_nterror(req, fn_state.status);
    1108           6 :                 return;
    1109             :         }
    1110             : 
    1111           2 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1112           0 :                 return;
    1113             :         }
    1114             : 
    1115           2 :         if (!tevent_req_set_endtime(
    1116             :                     fn_state.watch_req, state->ev,
    1117           2 :                     timeval_current_ofs(5 + generate_random() % 5, 0))) {
    1118           0 :                 return;
    1119             :         }
    1120           2 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1121             : }
    1122             : 
    1123         304 : NTSTATUS g_lock_lock_recv(struct tevent_req *req)
    1124             : {
    1125         304 :         struct g_lock_lock_state *state = tevent_req_data(
    1126             :                 req, struct g_lock_lock_state);
    1127         304 :         struct g_lock_ctx *ctx = state->ctx;
    1128          10 :         NTSTATUS status;
    1129             : 
    1130         304 :         if (tevent_req_is_nterror(req, &status)) {
    1131          12 :                 if (NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED)) {
    1132           6 :                         return NT_STATUS_OK;
    1133             :                 }
    1134           6 :                 return status;
    1135             :         }
    1136             : 
    1137         292 :         if ((ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) &&
    1138          11 :             ((state->type == G_LOCK_READ) ||
    1139          11 :              (state->type == G_LOCK_WRITE))) {
    1140          11 :                 const char *name = dbwrap_name(ctx->db);
    1141          11 :                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1142             :         }
    1143             : 
    1144         292 :         return NT_STATUS_OK;
    1145             : }
    1146             : 
    1147             : struct g_lock_lock_simple_state {
    1148             :         struct g_lock_ctx *ctx;
    1149             :         struct server_id me;
    1150             :         enum g_lock_type type;
    1151             :         NTSTATUS status;
    1152             :         g_lock_lock_cb_fn_t cb_fn;
    1153             :         void *cb_private;
    1154             : };
    1155             : 
    1156      948553 : static void g_lock_lock_simple_fn(
    1157             :         struct db_record *rec,
    1158             :         TDB_DATA value,
    1159             :         void *private_data)
    1160             : {
    1161      948553 :         struct g_lock_lock_simple_state *state = private_data;
    1162        2315 :         struct server_id_buf buf;
    1163      948553 :         struct g_lock lck = { .exclusive.pid = 0 };
    1164     1897106 :         struct g_lock_lock_cb_state cb_state = {
    1165      948553 :                 .ctx = state->ctx,
    1166             :                 .rec = rec,
    1167             :                 .lck = &lck,
    1168      948553 :                 .cb_fn = state->cb_fn,
    1169      948553 :                 .cb_private = state->cb_private,
    1170      948553 :                 .existed = value.dsize != 0,
    1171      948553 :                 .update_mem_ctx = talloc_tos(),
    1172             :         };
    1173        2315 :         bool ok;
    1174             : 
    1175      948553 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1176      948553 :         if (!ok) {
    1177           0 :                 DBG_DEBUG("g_lock_parse failed\n");
    1178           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1179           0 :                 return;
    1180             :         }
    1181             : 
    1182      948553 :         if (lck.exclusive.pid != 0) {
    1183          20 :                 DBG_DEBUG("locked by %s\n",
    1184             :                           server_id_str_buf(lck.exclusive, &buf));
    1185          20 :                 goto not_granted;
    1186             :         }
    1187             : 
    1188      948533 :         if (state->type == G_LOCK_WRITE) {
    1189      948515 :                 if (lck.num_shared != 0) {
    1190           2 :                         DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
    1191           2 :                         goto not_granted;
    1192             :                 }
    1193      948513 :                 lck.exclusive = state->me;
    1194          18 :         } else if (state->type == G_LOCK_READ) {
    1195          18 :                 g_lock_cleanup_shared(&lck);
    1196          18 :                 cb_state.new_shared = &state->me;
    1197             :         } else {
    1198           0 :                 smb_panic(__location__);
    1199             :         }
    1200             : 
    1201      948531 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1202             : 
    1203             :         /*
    1204             :          * We are going to store us as owner,
    1205             :          * so we got what we were waiting for.
    1206             :          *
    1207             :          * So we no longer need to monitor the
    1208             :          * record.
    1209             :          */
    1210      948531 :         dbwrap_watched_watch_skip_alerting(rec);
    1211             : 
    1212      948531 :         state->status = g_lock_lock_cb_run_and_store(&cb_state);
    1213      948531 :         if (!NT_STATUS_IS_OK(state->status) &&
    1214      456211 :             !NT_STATUS_EQUAL(state->status, NT_STATUS_WAS_UNLOCKED))
    1215             :         {
    1216           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
    1217             :                             nt_errstr(state->status));
    1218           0 :                 return;
    1219             :         }
    1220             : 
    1221      946221 :         return;
    1222             : 
    1223          22 : not_granted:
    1224          22 :         state->status = NT_STATUS_LOCK_NOT_GRANTED;
    1225             : }
    1226             : 
    1227      948556 : NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
    1228             :                      enum g_lock_type type, struct timeval timeout,
    1229             :                      g_lock_lock_cb_fn_t cb_fn,
    1230             :                      void *cb_private)
    1231             : {
    1232        2318 :         TALLOC_CTX *frame;
    1233        2318 :         struct tevent_context *ev;
    1234        2318 :         struct tevent_req *req;
    1235        2318 :         struct timeval end;
    1236        2318 :         NTSTATUS status;
    1237             : 
    1238      948556 :         SMB_ASSERT(!ctx->busy);
    1239             : 
    1240             :         /*
    1241             :          * We allow a cn_fn only for G_LOCK_WRITE for now.
    1242             :          *
    1243             :          * It's all we currently need and it makes a few things
    1244             :          * easier to implement.
    1245             :          */
    1246      948556 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1247           0 :                 return NT_STATUS_INVALID_PARAMETER_5;
    1248             :         }
    1249             : 
    1250      948556 :         if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
    1251             :                 /*
    1252             :                  * This is an abstraction violation: Normally we do
    1253             :                  * the sync wrappers around async functions with full
    1254             :                  * nested event contexts. However, this is used in
    1255             :                  * very hot code paths, so avoid the event context
    1256             :                  * creation for the good path where there's no lock
    1257             :                  * contention. My benchmark gave a factor of 2
    1258             :                  * improvement for lock/unlock.
    1259             :                  */
    1260     1897106 :                 struct g_lock_lock_simple_state state = {
    1261             :                         .ctx = ctx,
    1262      948553 :                         .me = messaging_server_id(ctx->msg),
    1263             :                         .type = type,
    1264             :                         .cb_fn = cb_fn,
    1265             :                         .cb_private = cb_private,
    1266             :                 };
    1267      948553 :                 status = dbwrap_do_locked(
    1268             :                         ctx->db, key, g_lock_lock_simple_fn, &state);
    1269      948553 :                 if (!NT_STATUS_IS_OK(status)) {
    1270           0 :                         DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
    1271             :                                   nt_errstr(status));
    1272      948531 :                         return status;
    1273             :                 }
    1274             : 
    1275      948553 :                 DBG_DEBUG("status=%s, state.status=%s\n",
    1276             :                           nt_errstr(status),
    1277             :                           nt_errstr(state.status));
    1278             : 
    1279      948553 :                 if (NT_STATUS_IS_OK(state.status)) {
    1280      490851 :                         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1281      490822 :                                 const char *name = dbwrap_name(ctx->db);
    1282      490822 :                                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1283             :                         }
    1284      490851 :                         return NT_STATUS_OK;
    1285             :                 }
    1286      457702 :                 if (NT_STATUS_EQUAL(state.status, NT_STATUS_WAS_UNLOCKED)) {
    1287             :                         /* without dbwrap_lock_order_lock() */
    1288      457680 :                         return NT_STATUS_OK;
    1289             :                 }
    1290          22 :                 if (!NT_STATUS_EQUAL(
    1291             :                             state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1292           0 :                         return state.status;
    1293             :                 }
    1294             : 
    1295          22 :                 if (timeval_is_zero(&timeout)) {
    1296           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
    1297             :                 }
    1298             : 
    1299             :                 /*
    1300             :                  * Fall back to the full g_lock_trylock logic,
    1301             :                  * g_lock_lock_simple_fn() called above only covers
    1302             :                  * the uncontended path.
    1303             :                  */
    1304             :         }
    1305             : 
    1306          25 :         frame = talloc_stackframe();
    1307          25 :         status = NT_STATUS_NO_MEMORY;
    1308             : 
    1309          25 :         ev = samba_tevent_context_init(frame);
    1310          25 :         if (ev == NULL) {
    1311           0 :                 goto fail;
    1312             :         }
    1313          25 :         req = g_lock_lock_send(frame, ev, ctx, key, type, cb_fn, cb_private);
    1314          25 :         if (req == NULL) {
    1315           0 :                 goto fail;
    1316             :         }
    1317          25 :         end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
    1318          25 :         if (!tevent_req_set_endtime(req, ev, end)) {
    1319           0 :                 goto fail;
    1320             :         }
    1321          25 :         if (!tevent_req_poll_ntstatus(req, ev, &status)) {
    1322           0 :                 goto fail;
    1323             :         }
    1324          25 :         status = g_lock_lock_recv(req);
    1325          25 :  fail:
    1326          25 :         TALLOC_FREE(frame);
    1327          25 :         return status;
    1328             : }
    1329             : 
    1330             : struct g_lock_unlock_state {
    1331             :         struct server_id self;
    1332             :         NTSTATUS status;
    1333             : };
    1334             : 
    1335      491129 : static void g_lock_unlock_fn(
    1336             :         struct db_record *rec,
    1337             :         TDB_DATA value,
    1338             :         void *private_data)
    1339             : {
    1340      491129 :         struct g_lock_unlock_state *state = private_data;
    1341         831 :         struct server_id_buf tmp1, tmp2;
    1342         831 :         struct g_lock lck;
    1343         831 :         size_t i;
    1344         831 :         bool ok, exclusive;
    1345             : 
    1346      491129 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1347      491129 :         if (!ok) {
    1348           0 :                 DBG_DEBUG("g_lock_parse() failed\n");
    1349           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1350           0 :                 return;
    1351             :         }
    1352             : 
    1353      491129 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1354             : 
    1355      491974 :         for (i=0; i<lck.num_shared; i++) {
    1356          20 :                 struct server_id shared;
    1357          20 :                 g_lock_get_shared(&lck, i, &shared);
    1358          20 :                 if (server_id_equal(&state->self, &shared)) {
    1359           0 :                         break;
    1360             :                 }
    1361             :         }
    1362             : 
    1363      491129 :         if (i < lck.num_shared) {
    1364           6 :                 if (exclusive) {
    1365           0 :                         DBG_DEBUG("%s both exclusive and shared (%zu)\n",
    1366             :                                   server_id_str_buf(state->self, &tmp1),
    1367             :                                   i);
    1368           0 :                         state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1369           0 :                         return;
    1370             :                 }
    1371           6 :                 g_lock_del_shared(&lck, i);
    1372             :         } else {
    1373      491123 :                 if (!exclusive) {
    1374           1 :                         DBG_DEBUG("Lock not found, self=%s, lck.exclusive=%s, "
    1375             :                                   "num_shared=%zu\n",
    1376             :                                   server_id_str_buf(state->self, &tmp1),
    1377             :                                   server_id_str_buf(lck.exclusive, &tmp2),
    1378             :                                   lck.num_shared);
    1379           1 :                         state->status = NT_STATUS_NOT_FOUND;
    1380           1 :                         return;
    1381             :                 }
    1382      491122 :                 lck.exclusive = (struct server_id) { .pid = 0 };
    1383             :         }
    1384             : 
    1385      491128 :         if ((lck.exclusive.pid == 0) &&
    1386      491128 :             (lck.num_shared == 0) &&
    1387      491121 :             (lck.datalen == 0)) {
    1388      170682 :                 state->status = dbwrap_record_delete(rec);
    1389      170682 :                 return;
    1390             :         }
    1391             : 
    1392      320446 :         if (!exclusive && lck.exclusive.pid != 0) {
    1393             :                 /*
    1394             :                  * We only had a read lock and there's
    1395             :                  * someone waiting for an exclusive lock.
    1396             :                  *
    1397             :                  * Don't alert the exclusive lock waiter
    1398             :                  * if there are still other read lock holders.
    1399             :                  */
    1400           0 :                 g_lock_cleanup_shared(&lck);
    1401           0 :                 if (lck.num_shared != 0) {
    1402           0 :                         dbwrap_watched_watch_skip_alerting(rec);
    1403             :                 }
    1404             :         }
    1405             : 
    1406      320446 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1407             : 
    1408      320446 :         state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1409             : }
    1410             : 
    1411      491129 : NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
    1412             : {
    1413      491129 :         struct g_lock_unlock_state state = {
    1414      491129 :                 .self = messaging_server_id(ctx->msg),
    1415             :         };
    1416         831 :         NTSTATUS status;
    1417             : 
    1418      491129 :         SMB_ASSERT(!ctx->busy);
    1419             : 
    1420      491129 :         status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
    1421      491129 :         if (!NT_STATUS_IS_OK(status)) {
    1422           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1423             :                             nt_errstr(status));
    1424           0 :                 return status;
    1425             :         }
    1426      491129 :         if (!NT_STATUS_IS_OK(state.status)) {
    1427           1 :                 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
    1428             :                             nt_errstr(state.status));
    1429           1 :                 return state.status;
    1430             :         }
    1431             : 
    1432      491128 :         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1433      490833 :                 const char *name = dbwrap_name(ctx->db);
    1434      490833 :                 dbwrap_lock_order_unlock(name, ctx->lock_order);
    1435             :         }
    1436             : 
    1437      491128 :         return NT_STATUS_OK;
    1438             : }
    1439             : 
    1440             : struct g_lock_writev_data_state {
    1441             :         TDB_DATA key;
    1442             :         struct server_id self;
    1443             :         const TDB_DATA *dbufs;
    1444             :         size_t num_dbufs;
    1445             :         NTSTATUS status;
    1446             : };
    1447             : 
    1448      172281 : static void g_lock_writev_data_fn(
    1449             :         struct db_record *rec,
    1450             :         TDB_DATA value,
    1451             :         void *private_data)
    1452             : {
    1453      172281 :         struct g_lock_writev_data_state *state = private_data;
    1454         356 :         struct g_lock lck;
    1455         356 :         bool exclusive;
    1456         356 :         bool ok;
    1457             : 
    1458             :         /*
    1459             :          * We're holding an exclusive write lock.
    1460             :          *
    1461             :          * Now we're updating the content of the record.
    1462             :          *
    1463             :          * We should not wakeup any other waiters, all they
    1464             :          * would find is that we're still holding a lock they
    1465             :          * are conflicting with.
    1466             :          */
    1467      172281 :         dbwrap_watched_watch_skip_alerting(rec);
    1468             : 
    1469      172281 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1470      172281 :         if (!ok) {
    1471           0 :                 DBG_DEBUG("g_lock_parse for %s failed\n",
    1472             :                           tdb_data_dbg(state->key));
    1473           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1474           0 :                 return;
    1475             :         }
    1476             : 
    1477      172281 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1478             : 
    1479             :         /*
    1480             :          * Make sure we're really exclusive. We are marked as
    1481             :          * exclusive when we are waiting for an exclusive lock
    1482             :          */
    1483      172281 :         exclusive &= (lck.num_shared == 0);
    1484             : 
    1485      172281 :         if (!exclusive) {
    1486           1 :                 struct server_id_buf buf1, buf2;
    1487           1 :                 DBG_DEBUG("Not locked by us: self=%s, lck.exclusive=%s, "
    1488             :                           "lck.num_shared=%zu\n",
    1489             :                           server_id_str_buf(state->self, &buf1),
    1490             :                           server_id_str_buf(lck.exclusive, &buf2),
    1491             :                           lck.num_shared);
    1492           1 :                 state->status = NT_STATUS_NOT_LOCKED;
    1493           1 :                 return;
    1494             :         }
    1495             : 
    1496      172280 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1497      172280 :         lck.data = NULL;
    1498      172280 :         lck.datalen = 0;
    1499      172280 :         state->status = g_lock_store(
    1500             :                 rec, &lck, NULL, state->dbufs, state->num_dbufs);
    1501             : }
    1502             : 
    1503      172281 : NTSTATUS g_lock_writev_data(
    1504             :         struct g_lock_ctx *ctx,
    1505             :         TDB_DATA key,
    1506             :         const TDB_DATA *dbufs,
    1507             :         size_t num_dbufs)
    1508             : {
    1509      344562 :         struct g_lock_writev_data_state state = {
    1510             :                 .key = key,
    1511      172281 :                 .self = messaging_server_id(ctx->msg),
    1512             :                 .dbufs = dbufs,
    1513             :                 .num_dbufs = num_dbufs,
    1514             :         };
    1515         356 :         NTSTATUS status;
    1516             : 
    1517      172281 :         SMB_ASSERT(!ctx->busy);
    1518             : 
    1519      172281 :         status = dbwrap_do_locked(
    1520             :                 ctx->db, key, g_lock_writev_data_fn, &state);
    1521      172281 :         if (!NT_STATUS_IS_OK(status)) {
    1522           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1523             :                             nt_errstr(status));
    1524           0 :                 return status;
    1525             :         }
    1526      172281 :         if (!NT_STATUS_IS_OK(state.status)) {
    1527           1 :                 DBG_WARNING("g_lock_writev_data_fn failed: %s\n",
    1528             :                             nt_errstr(state.status));
    1529           1 :                 return state.status;
    1530             :         }
    1531             : 
    1532      172280 :         return NT_STATUS_OK;
    1533             : }
    1534             : 
    1535           4 : NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
    1536             :                            const uint8_t *buf, size_t buflen)
    1537             : {
    1538           4 :         TDB_DATA dbuf = {
    1539             :                 .dptr = discard_const_p(uint8_t, buf),
    1540             :                 .dsize = buflen,
    1541             :         };
    1542           4 :         return g_lock_writev_data(ctx, key, &dbuf, 1);
    1543             : }
    1544             : 
    1545             : struct g_lock_locks_state {
    1546             :         int (*fn)(TDB_DATA key, void *private_data);
    1547             :         void *private_data;
    1548             : };
    1549             : 
    1550       18777 : static int g_lock_locks_fn(struct db_record *rec, void *priv)
    1551             : {
    1552           0 :         TDB_DATA key;
    1553       18777 :         struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
    1554             : 
    1555       18777 :         key = dbwrap_record_get_key(rec);
    1556       18777 :         return state->fn(key, state->private_data);
    1557             : }
    1558             : 
    1559        6333 : int g_lock_locks(struct g_lock_ctx *ctx,
    1560             :                  int (*fn)(TDB_DATA key, void *private_data),
    1561             :                  void *private_data)
    1562             : {
    1563           0 :         struct g_lock_locks_state state;
    1564           0 :         NTSTATUS status;
    1565           0 :         int count;
    1566             : 
    1567        6333 :         SMB_ASSERT(!ctx->busy);
    1568             : 
    1569        6333 :         state.fn = fn;
    1570        6333 :         state.private_data = private_data;
    1571             : 
    1572        6333 :         status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
    1573        6333 :         if (!NT_STATUS_IS_OK(status)) {
    1574           0 :                 return -1;
    1575             :         }
    1576        6333 :         return count;
    1577             : }
    1578             : 
    1579             : struct g_lock_dump_state {
    1580             :         TALLOC_CTX *mem_ctx;
    1581             :         TDB_DATA key;
    1582             :         void (*fn)(struct server_id exclusive,
    1583             :                    size_t num_shared,
    1584             :                    const struct server_id *shared,
    1585             :                    const uint8_t *data,
    1586             :                    size_t datalen,
    1587             :                    void *private_data);
    1588             :         void *private_data;
    1589             :         NTSTATUS status;
    1590             :         enum dbwrap_req_state req_state;
    1591             : };
    1592             : 
    1593      262815 : static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
    1594             :                            void *private_data)
    1595             : {
    1596      262815 :         struct g_lock_dump_state *state = private_data;
    1597      262815 :         struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
    1598      262815 :         struct server_id *shared = NULL;
    1599        1377 :         size_t i;
    1600        1377 :         bool ok;
    1601             : 
    1602      262815 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
    1603      262815 :         if (!ok) {
    1604           0 :                 DBG_DEBUG("g_lock_parse failed for %s\n",
    1605             :                           tdb_data_dbg(state->key));
    1606           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1607           0 :                 return;
    1608             :         }
    1609             : 
    1610      262815 :         if (lck.num_shared > 0) {
    1611           7 :                 shared = talloc_array(
    1612             :                         state->mem_ctx, struct server_id, lck.num_shared);
    1613           7 :                 if (shared == NULL) {
    1614           0 :                         DBG_DEBUG("talloc failed\n");
    1615           0 :                         state->status = NT_STATUS_NO_MEMORY;
    1616           0 :                         return;
    1617             :                 }
    1618             :         }
    1619             : 
    1620      262833 :         for (i=0; i<lck.num_shared; i++) {
    1621          18 :                 g_lock_get_shared(&lck, i, &shared[i]);
    1622             :         }
    1623             : 
    1624      262815 :         state->fn(lck.exclusive,
    1625             :                   lck.num_shared,
    1626             :                   shared,
    1627      262815 :                   lck.data,
    1628             :                   lck.datalen,
    1629             :                   state->private_data);
    1630             : 
    1631      262815 :         TALLOC_FREE(shared);
    1632             : 
    1633      262815 :         state->status = NT_STATUS_OK;
    1634             : }
    1635             : 
    1636     1087765 : NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
    1637             :                      void (*fn)(struct server_id exclusive,
    1638             :                                 size_t num_shared,
    1639             :                                 const struct server_id *shared,
    1640             :                                 const uint8_t *data,
    1641             :                                 size_t datalen,
    1642             :                                 void *private_data),
    1643             :                      void *private_data)
    1644             : {
    1645     1087765 :         struct g_lock_dump_state state = {
    1646             :                 .mem_ctx = ctx, .key = key,
    1647             :                 .fn = fn, .private_data = private_data
    1648             :         };
    1649        1812 :         NTSTATUS status;
    1650             : 
    1651     1087765 :         SMB_ASSERT(!ctx->busy);
    1652             : 
    1653     1087765 :         status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
    1654     1087765 :         if (!NT_STATUS_IS_OK(status)) {
    1655      824950 :                 DBG_DEBUG("dbwrap_parse_record returned %s\n",
    1656             :                           nt_errstr(status));
    1657      824950 :                 return status;
    1658             :         }
    1659      262815 :         if (!NT_STATUS_IS_OK(state.status)) {
    1660           0 :                 DBG_DEBUG("g_lock_dump_fn returned %s\n",
    1661             :                           nt_errstr(state.status));
    1662           0 :                 return state.status;
    1663             :         }
    1664      262815 :         return NT_STATUS_OK;
    1665             : }
    1666             : 
    1667             : static void g_lock_dump_done(struct tevent_req *subreq);
    1668             : 
    1669           0 : struct tevent_req *g_lock_dump_send(
    1670             :         TALLOC_CTX *mem_ctx,
    1671             :         struct tevent_context *ev,
    1672             :         struct g_lock_ctx *ctx,
    1673             :         TDB_DATA key,
    1674             :         void (*fn)(struct server_id exclusive,
    1675             :                    size_t num_shared,
    1676             :                    const struct server_id *shared,
    1677             :                    const uint8_t *data,
    1678             :                    size_t datalen,
    1679             :                    void *private_data),
    1680             :         void *private_data)
    1681             : {
    1682           0 :         struct tevent_req *req = NULL, *subreq = NULL;
    1683           0 :         struct g_lock_dump_state *state = NULL;
    1684             : 
    1685           0 :         SMB_ASSERT(!ctx->busy);
    1686             : 
    1687           0 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_dump_state);
    1688           0 :         if (req == NULL) {
    1689           0 :                 return NULL;
    1690             :         }
    1691           0 :         state->mem_ctx = state;
    1692           0 :         state->key = key;
    1693           0 :         state->fn = fn;
    1694           0 :         state->private_data = private_data;
    1695             : 
    1696           0 :         SMB_ASSERT(!ctx->busy);
    1697             : 
    1698           0 :         subreq = dbwrap_parse_record_send(
    1699             :                 state,
    1700             :                 ev,
    1701             :                 ctx->db,
    1702             :                 key,
    1703             :                 g_lock_dump_fn,
    1704             :                 state,
    1705           0 :                 &state->req_state);
    1706           0 :         if (tevent_req_nomem(subreq, req)) {
    1707           0 :                 return tevent_req_post(req, ev);
    1708             :         }
    1709           0 :         tevent_req_set_callback(subreq, g_lock_dump_done, req);
    1710           0 :         return req;
    1711             : }
    1712             : 
    1713           0 : static void g_lock_dump_done(struct tevent_req *subreq)
    1714             : {
    1715           0 :         struct tevent_req *req = tevent_req_callback_data(
    1716             :                 subreq, struct tevent_req);
    1717           0 :         struct g_lock_dump_state *state = tevent_req_data(
    1718             :                 req, struct g_lock_dump_state);
    1719           0 :         NTSTATUS status;
    1720             : 
    1721           0 :         status = dbwrap_parse_record_recv(subreq);
    1722           0 :         TALLOC_FREE(subreq);
    1723           0 :         if (tevent_req_nterror(req, status) ||
    1724           0 :             tevent_req_nterror(req, state->status)) {
    1725           0 :                 return;
    1726             :         }
    1727           0 :         tevent_req_done(req);
    1728             : }
    1729             : 
    1730           0 : NTSTATUS g_lock_dump_recv(struct tevent_req *req)
    1731             : {
    1732           0 :         return tevent_req_simple_recv_ntstatus(req);
    1733             : }
    1734             : 
    1735      193204 : int g_lock_seqnum(struct g_lock_ctx *ctx)
    1736             : {
    1737      193204 :         return dbwrap_get_seqnum(ctx->db);
    1738             : }
    1739             : 
    1740             : struct g_lock_watch_data_state {
    1741             :         struct tevent_context *ev;
    1742             :         struct g_lock_ctx *ctx;
    1743             :         TDB_DATA key;
    1744             :         struct server_id blocker;
    1745             :         bool blockerdead;
    1746             :         uint64_t unique_lock_epoch;
    1747             :         uint64_t unique_data_epoch;
    1748             :         uint64_t watch_instance;
    1749             :         NTSTATUS status;
    1750             : };
    1751             : 
    1752             : static void g_lock_watch_data_done(struct tevent_req *subreq);
    1753             : 
    1754         653 : static void g_lock_watch_data_send_fn(
    1755             :         struct db_record *rec,
    1756             :         TDB_DATA value,
    1757             :         void *private_data)
    1758             : {
    1759         653 :         struct tevent_req *req = talloc_get_type_abort(
    1760             :                 private_data, struct tevent_req);
    1761         653 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1762             :                 req, struct g_lock_watch_data_state);
    1763         653 :         struct tevent_req *subreq = NULL;
    1764           3 :         struct g_lock lck;
    1765           3 :         bool ok;
    1766             : 
    1767         653 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1768         653 :         if (!ok) {
    1769           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1770           0 :                 return;
    1771             :         }
    1772         653 :         state->unique_lock_epoch = lck.unique_lock_epoch;
    1773         653 :         state->unique_data_epoch = lck.unique_data_epoch;
    1774             : 
    1775         653 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
    1776             : 
    1777         653 :         subreq = dbwrap_watched_watch_send(
    1778             :                 state, state->ev, rec, 0, state->blocker);
    1779         653 :         if (subreq == NULL) {
    1780           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1781           0 :                 return;
    1782             :         }
    1783         653 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1784             : 
    1785         653 :         state->status = NT_STATUS_EVENT_PENDING;
    1786             : }
    1787             : 
    1788         653 : struct tevent_req *g_lock_watch_data_send(
    1789             :         TALLOC_CTX *mem_ctx,
    1790             :         struct tevent_context *ev,
    1791             :         struct g_lock_ctx *ctx,
    1792             :         TDB_DATA key,
    1793             :         struct server_id blocker)
    1794             : {
    1795         653 :         struct tevent_req *req = NULL;
    1796         653 :         struct g_lock_watch_data_state *state = NULL;
    1797           3 :         NTSTATUS status;
    1798             : 
    1799         653 :         SMB_ASSERT(!ctx->busy);
    1800             : 
    1801         653 :         req = tevent_req_create(
    1802             :                 mem_ctx, &state, struct g_lock_watch_data_state);
    1803         653 :         if (req == NULL) {
    1804           0 :                 return NULL;
    1805             :         }
    1806         653 :         state->ev = ev;
    1807         653 :         state->ctx = ctx;
    1808         653 :         state->blocker = blocker;
    1809             : 
    1810         653 :         state->key = tdb_data_talloc_copy(state, key);
    1811         653 :         if (tevent_req_nomem(state->key.dptr, req)) {
    1812           0 :                 return tevent_req_post(req, ev);
    1813             :         }
    1814             : 
    1815         653 :         status = dbwrap_do_locked(
    1816             :                 ctx->db, key, g_lock_watch_data_send_fn, req);
    1817         653 :         if (tevent_req_nterror(req, status)) {
    1818           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1819           0 :                 return tevent_req_post(req, ev);
    1820             :         }
    1821             : 
    1822         653 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1823         650 :                 return req;
    1824             :         }
    1825           0 :         if (tevent_req_nterror(req, state->status)) {
    1826           0 :                 return tevent_req_post(req, ev);
    1827             :         }
    1828           0 :         tevent_req_done(req);
    1829           0 :         return tevent_req_post(req, ev);
    1830             : }
    1831             : 
    1832        1003 : static void g_lock_watch_data_done_fn(
    1833             :         struct db_record *rec,
    1834             :         TDB_DATA value,
    1835             :         void *private_data)
    1836             : {
    1837        1003 :         struct tevent_req *req = talloc_get_type_abort(
    1838             :                 private_data, struct tevent_req);
    1839        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1840             :                 req, struct g_lock_watch_data_state);
    1841        1003 :         struct tevent_req *subreq = NULL;
    1842           3 :         struct g_lock lck;
    1843           3 :         bool ok;
    1844             : 
    1845        1003 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1846        1003 :         if (!ok) {
    1847           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1848           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1849           0 :                 return;
    1850             :         }
    1851             : 
    1852        1003 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
    1853          87 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1854          87 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
    1855             :                           "state->unique_data_epoch=%"PRIu64"\n",
    1856             :                           lck.unique_data_epoch,
    1857             :                           state->unique_data_epoch);
    1858          87 :                 state->status = NT_STATUS_OK;
    1859          87 :                 return;
    1860             :         }
    1861             : 
    1862             :         /*
    1863             :          * The lock epoch changed, so we better
    1864             :          * remove ourself from the waiter list
    1865             :          * (most likely the first position)
    1866             :          * and re-add us at the end of the list.
    1867             :          *
    1868             :          * This gives other lock waiters a change
    1869             :          * to make progress.
    1870             :          *
    1871             :          * Otherwise we'll keep our waiter instance alive,
    1872             :          * keep waiting (most likely at first position).
    1873             :          */
    1874         916 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
    1875         830 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1876         830 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
    1877         830 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
    1878             :         }
    1879             : 
    1880         916 :         subreq = dbwrap_watched_watch_send(
    1881             :                 state, state->ev, rec, state->watch_instance, state->blocker);
    1882         916 :         if (subreq == NULL) {
    1883           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1884           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1885           0 :                 return;
    1886             :         }
    1887         916 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1888             : 
    1889         916 :         state->status = NT_STATUS_EVENT_PENDING;
    1890             : }
    1891             : 
    1892        1003 : static void g_lock_watch_data_done(struct tevent_req *subreq)
    1893             : {
    1894        1003 :         struct tevent_req *req = tevent_req_callback_data(
    1895             :                 subreq, struct tevent_req);
    1896        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1897             :                 req, struct g_lock_watch_data_state);
    1898           3 :         NTSTATUS status;
    1899        1003 :         uint64_t instance = 0;
    1900             : 
    1901        1003 :         status = dbwrap_watched_watch_recv(
    1902             :                 subreq, &instance, &state->blockerdead, &state->blocker);
    1903        1003 :         TALLOC_FREE(subreq);
    1904        1003 :         if (tevent_req_nterror(req, status)) {
    1905           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
    1906             :                           nt_errstr(status));
    1907         916 :                 return;
    1908             :         }
    1909             : 
    1910        1003 :         state->watch_instance = instance;
    1911             : 
    1912        1003 :         status = dbwrap_do_locked(
    1913        1003 :                 state->ctx->db, state->key, g_lock_watch_data_done_fn, req);
    1914        1003 :         if (tevent_req_nterror(req, status)) {
    1915           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1916           0 :                 return;
    1917             :         }
    1918        1003 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1919         914 :                 return;
    1920             :         }
    1921          87 :         if (tevent_req_nterror(req, state->status)) {
    1922           0 :                 return;
    1923             :         }
    1924          87 :         tevent_req_done(req);
    1925             : }
    1926             : 
    1927          86 : NTSTATUS g_lock_watch_data_recv(
    1928             :         struct tevent_req *req,
    1929             :         bool *blockerdead,
    1930             :         struct server_id *blocker)
    1931             : {
    1932          86 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1933             :                 req, struct g_lock_watch_data_state);
    1934           0 :         NTSTATUS status;
    1935             : 
    1936          86 :         if (tevent_req_is_nterror(req, &status)) {
    1937           0 :                 return status;
    1938             :         }
    1939          86 :         if (blockerdead != NULL) {
    1940          86 :                 *blockerdead = state->blockerdead;
    1941             :         }
    1942          86 :         if (blocker != NULL) {
    1943          86 :                 *blocker = state->blocker;
    1944             :         }
    1945             : 
    1946          86 :         return NT_STATUS_OK;
    1947             : }
    1948             : 
    1949        2328 : static void g_lock_wake_watchers_fn(
    1950             :         struct db_record *rec,
    1951             :         TDB_DATA value,
    1952             :         void *private_data)
    1953             : {
    1954        2328 :         struct g_lock lck = { .exclusive.pid = 0 };
    1955          10 :         NTSTATUS status;
    1956          10 :         bool ok;
    1957             : 
    1958        2328 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1959        2328 :         if (!ok) {
    1960           0 :                 DBG_WARNING("g_lock_parse failed\n");
    1961           0 :                 return;
    1962             :         }
    1963             : 
    1964        2328 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1965             : 
    1966        2328 :         status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1967        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1968           0 :                 DBG_WARNING("g_lock_store failed: %s\n", nt_errstr(status));
    1969           0 :                 return;
    1970             :         }
    1971             : }
    1972             : 
    1973        2328 : void g_lock_wake_watchers(struct g_lock_ctx *ctx, TDB_DATA key)
    1974             : {
    1975          10 :         NTSTATUS status;
    1976             : 
    1977        2328 :         SMB_ASSERT(!ctx->busy);
    1978             : 
    1979        2328 :         status = dbwrap_do_locked(ctx->db, key, g_lock_wake_watchers_fn, NULL);
    1980        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1981           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n",
    1982             :                           nt_errstr(status));
    1983             :         }
    1984        2328 : }

Generated by: LCOV version 1.14