LCOV - code coverage report
Current view: top level - source3/lib - g_lock.c (source / functions) Hit Total Coverage
Test: coverage report for master 98b443d9 Lines: 686 880 78.0 %
Date: 2024-05-31 13:13:24 Functions: 44 47 93.6 %

          Line data    Source code
       1             : /*
       2             :    Unix SMB/CIFS implementation.
       3             :    global locks based on dbwrap and messaging
       4             :    Copyright (C) 2009 by Volker Lendecke
       5             : 
       6             :    This program is free software; you can redistribute it and/or modify
       7             :    it under the terms of the GNU General Public License as published by
       8             :    the Free Software Foundation; either version 3 of the License, or
       9             :    (at your option) any later version.
      10             : 
      11             :    This program is distributed in the hope that it will be useful,
      12             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :    GNU General Public License for more details.
      15             : 
      16             :    You should have received a copy of the GNU General Public License
      17             :    along with this program.  If not, see <http://www.gnu.org/licenses/>.
      18             : */
      19             : 
      20             : #include "replace.h"
      21             : #include "system/filesys.h"
      22             : #include "lib/util/server_id.h"
      23             : #include "lib/util/debug.h"
      24             : #include "lib/util/talloc_stack.h"
      25             : #include "lib/util/samba_util.h"
      26             : #include "lib/util_path.h"
      27             : #include "dbwrap/dbwrap.h"
      28             : #include "dbwrap/dbwrap_open.h"
      29             : #include "dbwrap/dbwrap_watch.h"
      30             : #include "g_lock.h"
      31             : #include "util_tdb.h"
      32             : #include "../lib/util/tevent_ntstatus.h"
      33             : #include "messages.h"
      34             : #include "serverid.h"
      35             : 
      36             : struct g_lock_ctx {
      37             :         struct db_context *db;
      38             :         struct messaging_context *msg;
      39             :         enum dbwrap_lock_order lock_order;
      40             :         bool busy;
      41             : };
      42             : 
      43             : struct g_lock {
      44             :         struct server_id exclusive;
      45             :         size_t num_shared;
      46             :         uint8_t *shared;
      47             :         uint64_t unique_lock_epoch;
      48             :         uint64_t unique_data_epoch;
      49             :         size_t datalen;
      50             :         uint8_t *data;
      51             : };
      52             : 
      53     1877609 : static bool g_lock_parse(uint8_t *buf, size_t buflen, struct g_lock *lck)
      54             : {
      55        4908 :         struct server_id exclusive;
      56        4908 :         size_t num_shared, shared_len;
      57        4908 :         uint64_t unique_lock_epoch;
      58        4908 :         uint64_t unique_data_epoch;
      59             : 
      60     1877609 :         if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
      61             :                       sizeof(uint64_t) +     /* unique_lock_epoch */
      62             :                       sizeof(uint64_t) +     /* unique_data_epoch */
      63             :                       sizeof(uint32_t))) {   /* num_shared */
      64     1248957 :                 struct g_lock ret = {
      65             :                         .exclusive.pid = 0,
      66      416319 :                         .unique_lock_epoch = generate_unique_u64(0),
      67      416319 :                         .unique_data_epoch = generate_unique_u64(0),
      68             :                 };
      69      416319 :                 *lck = ret;
      70      416319 :                 return true;
      71             :         }
      72             : 
      73     1461290 :         server_id_get(&exclusive, buf);
      74     1461290 :         buf += SERVER_ID_BUF_LENGTH;
      75     1461290 :         buflen -= SERVER_ID_BUF_LENGTH;
      76             : 
      77     1461290 :         unique_lock_epoch = BVAL(buf, 0);
      78     1461290 :         buf += sizeof(uint64_t);
      79     1461290 :         buflen -= sizeof(uint64_t);
      80             : 
      81     1461290 :         unique_data_epoch = BVAL(buf, 0);
      82     1461290 :         buf += sizeof(uint64_t);
      83     1461290 :         buflen -= sizeof(uint64_t);
      84             : 
      85     1461290 :         num_shared = IVAL(buf, 0);
      86     1461290 :         buf += sizeof(uint32_t);
      87     1461290 :         buflen -= sizeof(uint32_t);
      88             : 
      89     1461290 :         if (num_shared > buflen/SERVER_ID_BUF_LENGTH) {
      90           0 :                 DBG_DEBUG("num_shared=%zu, buflen=%zu\n",
      91             :                           num_shared,
      92             :                           buflen);
      93           0 :                 return false;
      94             :         }
      95             : 
      96     1461290 :         shared_len = num_shared * SERVER_ID_BUF_LENGTH;
      97             : 
      98     1461290 :         *lck = (struct g_lock) {
      99             :                 .exclusive = exclusive,
     100             :                 .num_shared = num_shared,
     101             :                 .shared = buf,
     102             :                 .unique_lock_epoch = unique_lock_epoch,
     103             :                 .unique_data_epoch = unique_data_epoch,
     104     1461290 :                 .datalen = buflen-shared_len,
     105     1461290 :                 .data = buf+shared_len,
     106             :         };
     107             : 
     108     1461290 :         return true;
     109             : }
     110             : 
     111          71 : static void g_lock_get_shared(const struct g_lock *lck,
     112             :                               size_t i,
     113             :                               struct server_id *shared)
     114             : {
     115          66 :         if (i >= lck->num_shared) {
     116           0 :                 abort();
     117             :         }
     118          70 :         server_id_get(shared, lck->shared + i*SERVER_ID_BUF_LENGTH);
     119          66 : }
     120             : 
     121          17 : static void g_lock_del_shared(struct g_lock *lck, size_t i)
     122             : {
     123          17 :         if (i >= lck->num_shared) {
     124           0 :                 abort();
     125             :         }
     126          17 :         lck->num_shared -= 1;
     127          17 :         if (i < lck->num_shared) {
     128          21 :                 memcpy(lck->shared + i*SERVER_ID_BUF_LENGTH,
     129           4 :                        lck->shared + lck->num_shared*SERVER_ID_BUF_LENGTH,
     130             :                        SERVER_ID_BUF_LENGTH);
     131             :         }
     132          17 : }
     133             : 
     134     1184632 : static NTSTATUS g_lock_store(
     135             :         struct db_record *rec,
     136             :         struct g_lock *lck,
     137             :         struct server_id *new_shared,
     138             :         const TDB_DATA *new_dbufs,
     139             :         size_t num_new_dbufs)
     140     1184632 : {
     141        2508 :         uint8_t exclusive[SERVER_ID_BUF_LENGTH];
     142        2508 :         uint8_t seqnum_buf[sizeof(uint64_t)*2];
     143        2508 :         uint8_t sizebuf[sizeof(uint32_t)];
     144        2508 :         uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
     145             : 
     146     1184632 :         struct TDB_DATA dbufs[6 + num_new_dbufs];
     147             : 
     148     1184632 :         dbufs[0] = (TDB_DATA) {
     149             :                 .dptr = exclusive, .dsize = sizeof(exclusive),
     150             :         };
     151     1184632 :         dbufs[1] = (TDB_DATA) {
     152             :                 .dptr = seqnum_buf, .dsize = sizeof(seqnum_buf),
     153             :         };
     154     1184632 :         dbufs[2] = (TDB_DATA) {
     155             :                 .dptr = sizebuf, .dsize = sizeof(sizebuf),
     156             :         };
     157     1184632 :         dbufs[3] = (TDB_DATA) {
     158     1184632 :                 .dptr = lck->shared,
     159     1184632 :                 .dsize = lck->num_shared * SERVER_ID_BUF_LENGTH,
     160             :         };
     161     1184632 :         dbufs[4] = (TDB_DATA) { 0 };
     162     1184632 :         dbufs[5] = (TDB_DATA) {
     163     1184632 :                 .dptr = lck->data, .dsize = lck->datalen,
     164             :         };
     165             : 
     166     1184632 :         if (num_new_dbufs != 0) {
     167        1937 :                 memcpy(&dbufs[6],
     168             :                        new_dbufs,
     169             :                        num_new_dbufs * sizeof(TDB_DATA));
     170             :         }
     171             : 
     172     1184632 :         server_id_put(exclusive, lck->exclusive);
     173     1184632 :         SBVAL(seqnum_buf, 0, lck->unique_lock_epoch);
     174     1184632 :         SBVAL(seqnum_buf, 8, lck->unique_data_epoch);
     175             : 
     176     1184632 :         if (new_shared != NULL) {
     177          18 :                 if (lck->num_shared >= UINT32_MAX) {
     178           0 :                         return NT_STATUS_BUFFER_OVERFLOW;
     179             :                 }
     180             : 
     181          18 :                 server_id_put(new_shared_buf, *new_shared);
     182             : 
     183          18 :                 dbufs[4] = (TDB_DATA) {
     184             :                         .dptr = new_shared_buf,
     185             :                         .dsize = sizeof(new_shared_buf),
     186             :                 };
     187             : 
     188          18 :                 lck->num_shared += 1;
     189             :         }
     190             : 
     191     1184632 :         SIVAL(sizebuf, 0, lck->num_shared);
     192             : 
     193     1184632 :         return dbwrap_record_storev(rec, dbufs, ARRAY_SIZE(dbufs), 0);
     194             : }
     195             : 
     196         355 : struct g_lock_ctx *g_lock_ctx_init_backend(
     197             :         TALLOC_CTX *mem_ctx,
     198             :         struct messaging_context *msg,
     199             :         struct db_context **backend)
     200             : {
     201          21 :         struct g_lock_ctx *result;
     202             : 
     203         355 :         result = talloc_zero(mem_ctx, struct g_lock_ctx);
     204         355 :         if (result == NULL) {
     205           0 :                 return NULL;
     206             :         }
     207         355 :         result->msg = msg;
     208         355 :         result->lock_order = DBWRAP_LOCK_ORDER_NONE;
     209             : 
     210         355 :         result->db = db_open_watched(result, backend, msg);
     211         355 :         if (result->db == NULL) {
     212           0 :                 DBG_WARNING("db_open_watched failed\n");
     213           0 :                 TALLOC_FREE(result);
     214           0 :                 return NULL;
     215             :         }
     216         334 :         return result;
     217             : }
     218             : 
     219         196 : void g_lock_set_lock_order(struct g_lock_ctx *ctx,
     220             :                            enum dbwrap_lock_order lock_order)
     221             : {
     222         196 :         ctx->lock_order = lock_order;
     223         196 : }
     224             : 
     225         159 : struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
     226             :                                    struct messaging_context *msg)
     227             : {
     228         159 :         char *db_path = NULL;
     229         159 :         struct db_context *backend = NULL;
     230         159 :         struct g_lock_ctx *ctx = NULL;
     231             : 
     232         159 :         db_path = lock_path(mem_ctx, "g_lock.tdb");
     233         159 :         if (db_path == NULL) {
     234           0 :                 return NULL;
     235             :         }
     236             : 
     237         159 :         backend = db_open(
     238             :                 mem_ctx,
     239             :                 db_path,
     240             :                 0,
     241             :                 TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH|TDB_VOLATILE,
     242             :                 O_RDWR|O_CREAT,
     243             :                 0600,
     244             :                 DBWRAP_LOCK_ORDER_3,
     245             :                 DBWRAP_FLAG_NONE);
     246         159 :         TALLOC_FREE(db_path);
     247         159 :         if (backend == NULL) {
     248           0 :                 DBG_WARNING("Could not open g_lock.tdb\n");
     249           0 :                 return NULL;
     250             :         }
     251             : 
     252         159 :         ctx = g_lock_ctx_init_backend(mem_ctx, msg, &backend);
     253         159 :         return ctx;
     254             : }
     255             : 
     256         330 : static void g_lock_cleanup_dead(
     257             :         struct g_lock *lck,
     258             :         struct server_id *dead_blocker)
     259             : {
     260          13 :         bool exclusive_died;
     261          13 :         struct server_id_buf tmp;
     262             : 
     263         330 :         if (dead_blocker == NULL) {
     264         328 :                 return;
     265             :         }
     266             : 
     267           2 :         exclusive_died = server_id_equal(dead_blocker, &lck->exclusive);
     268             : 
     269           2 :         if (exclusive_died) {
     270           1 :                 DBG_DEBUG("Exclusive holder %s died\n",
     271             :                           server_id_str_buf(lck->exclusive, &tmp));
     272           1 :                 lck->exclusive.pid = 0;
     273             :         }
     274             : 
     275           2 :         if (lck->num_shared != 0) {
     276           1 :                 bool shared_died;
     277           1 :                 struct server_id shared;
     278             : 
     279           1 :                 g_lock_get_shared(lck, 0, &shared);
     280           1 :                 shared_died = server_id_equal(dead_blocker, &shared);
     281             : 
     282           1 :                 if (shared_died) {
     283           1 :                         DBG_DEBUG("Shared holder %s died\n",
     284             :                                   server_id_str_buf(shared, &tmp));
     285           1 :                         g_lock_del_shared(lck, 0);
     286             :                 }
     287             :         }
     288             : }
     289             : 
     290         310 : static ssize_t g_lock_find_shared(
     291             :         struct g_lock *lck,
     292             :         const struct server_id *self)
     293             : {
     294          11 :         size_t i;
     295             : 
     296         317 :         for (i=0; i<lck->num_shared; i++) {
     297          11 :                 struct server_id shared;
     298          11 :                 bool same;
     299             : 
     300          11 :                 g_lock_get_shared(lck, i, &shared);
     301             : 
     302          11 :                 same = server_id_equal(self, &shared);
     303          11 :                 if (same) {
     304           4 :                         return i;
     305             :                 }
     306             :         }
     307             : 
     308         299 :         return -1;
     309             : }
     310             : 
     311         325 : static void g_lock_cleanup_shared(struct g_lock *lck)
     312             : {
     313          26 :         size_t i;
     314          26 :         struct server_id check;
     315          26 :         bool exists;
     316             : 
     317         325 :         if (lck->num_shared == 0) {
     318         307 :                 return;
     319             :         }
     320             : 
     321             :         /*
     322             :          * Read locks can stay around forever if the process dies. Do
     323             :          * a heuristic check for process existence: Check one random
     324             :          * process for existence. Hopefully this will keep runaway
     325             :          * read locks under control.
     326             :          */
     327          18 :         i = generate_random() % lck->num_shared;
     328          18 :         g_lock_get_shared(lck, i, &check);
     329             : 
     330          18 :         exists = serverid_exists(&check);
     331          18 :         if (!exists) {
     332           7 :                 struct server_id_buf tmp;
     333           7 :                 DBG_DEBUG("Shared locker %s died -- removing\n",
     334             :                           server_id_str_buf(check, &tmp));
     335           7 :                 g_lock_del_shared(lck, i);
     336             :         }
     337             : }
     338             : 
     339             : struct g_lock_lock_cb_state {
     340             :         struct g_lock_ctx *ctx;
     341             :         struct db_record *rec;
     342             :         struct g_lock *lck;
     343             :         struct server_id *new_shared;
     344             :         g_lock_lock_cb_fn_t cb_fn;
     345             :         void *cb_private;
     346             :         TALLOC_CTX *update_mem_ctx;
     347             :         TDB_DATA updated_data;
     348             :         bool existed;
     349             :         bool modified;
     350             :         bool unlock;
     351             : };
     352             : 
     353     3248772 : NTSTATUS g_lock_lock_cb_dump(struct g_lock_lock_cb_state *cb_state,
     354             :                              void (*fn)(struct server_id exclusive,
     355             :                                         size_t num_shared,
     356             :                                         const struct server_id *shared,
     357             :                                         const uint8_t *data,
     358             :                                         size_t datalen,
     359             :                                         void *private_data),
     360             :                              void *private_data)
     361             : {
     362     3248772 :         struct g_lock *lck = cb_state->lck;
     363             : 
     364             :         /* We allow a cb_fn only for G_LOCK_WRITE for now... */
     365     3248772 :         SMB_ASSERT(lck->num_shared == 0);
     366             : 
     367     3248772 :         fn(lck->exclusive,
     368             :            0, /* num_shared */
     369             :            NULL, /* shared */
     370     3248772 :            lck->data,
     371             :            lck->datalen,
     372             :            private_data);
     373             : 
     374     3248772 :         return NT_STATUS_OK;
     375             : }
     376             : 
     377      753801 : NTSTATUS g_lock_lock_cb_writev(struct g_lock_lock_cb_state *cb_state,
     378             :                                const TDB_DATA *dbufs,
     379             :                                size_t num_dbufs)
     380             : {
     381        1915 :         NTSTATUS status;
     382             : 
     383      753801 :         status = dbwrap_merge_dbufs(&cb_state->updated_data,
     384             :                                     cb_state->update_mem_ctx,
     385             :                                     dbufs, num_dbufs);
     386      753801 :         if (!NT_STATUS_IS_OK(status)) {
     387           0 :                 return status;
     388             :         }
     389             : 
     390      753801 :         cb_state->modified = true;
     391      753801 :         cb_state->lck->data = cb_state->updated_data.dptr;
     392      753801 :         cb_state->lck->datalen = cb_state->updated_data.dsize;
     393             : 
     394      753801 :         return NT_STATUS_OK;
     395             : }
     396             : 
     397      454744 : void g_lock_lock_cb_unlock(struct g_lock_lock_cb_state *cb_state)
     398             : {
     399      454744 :         cb_state->unlock = true;
     400      454744 : }
     401             : 
     402             : struct g_lock_lock_cb_watch_data_state {
     403             :         struct tevent_context *ev;
     404             :         struct g_lock_ctx *ctx;
     405             :         TDB_DATA key;
     406             :         struct server_id blocker;
     407             :         bool blockerdead;
     408             :         uint64_t unique_lock_epoch;
     409             :         uint64_t unique_data_epoch;
     410             :         uint64_t watch_instance;
     411             :         NTSTATUS status;
     412             : };
     413             : 
     414             : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq);
     415             : 
     416         499 : struct tevent_req *g_lock_lock_cb_watch_data_send(
     417             :         TALLOC_CTX *mem_ctx,
     418             :         struct tevent_context *ev,
     419             :         struct g_lock_lock_cb_state *cb_state,
     420             :         struct server_id blocker)
     421             : {
     422         499 :         struct tevent_req *req = NULL;
     423         499 :         struct g_lock_lock_cb_watch_data_state *state = NULL;
     424         499 :         struct tevent_req *subreq = NULL;
     425         499 :         TDB_DATA key = dbwrap_record_get_key(cb_state->rec);
     426             : 
     427         499 :         req = tevent_req_create(
     428             :                 mem_ctx, &state, struct g_lock_lock_cb_watch_data_state);
     429         499 :         if (req == NULL) {
     430           0 :                 return NULL;
     431             :         }
     432         499 :         state->ev = ev;
     433         499 :         state->ctx = cb_state->ctx;
     434         499 :         state->blocker = blocker;
     435             : 
     436         499 :         state->key = tdb_data_talloc_copy(state, key);
     437         499 :         if (tevent_req_nomem(state->key.dptr, req)) {
     438           0 :                 return tevent_req_post(req, ev);
     439             :         }
     440             : 
     441         499 :         state->unique_lock_epoch = cb_state->lck->unique_lock_epoch;
     442         499 :         state->unique_data_epoch = cb_state->lck->unique_data_epoch;
     443             : 
     444         499 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
     445             : 
     446         499 :         subreq = dbwrap_watched_watch_send(
     447         499 :                 state, state->ev, cb_state->rec, 0, state->blocker);
     448         499 :         if (tevent_req_nomem(subreq, req)) {
     449           0 :                 return tevent_req_post(req, ev);
     450             :         }
     451         499 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     452             : 
     453         499 :         return req;
     454             : }
     455             : 
     456         712 : static void g_lock_lock_cb_watch_data_done_fn(
     457             :         struct db_record *rec,
     458             :         TDB_DATA value,
     459             :         void *private_data)
     460             : {
     461         712 :         struct tevent_req *req = talloc_get_type_abort(
     462             :                 private_data, struct tevent_req);
     463         712 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     464             :                 req, struct g_lock_lock_cb_watch_data_state);
     465         712 :         struct tevent_req *subreq = NULL;
     466           0 :         struct g_lock lck;
     467           0 :         bool ok;
     468             : 
     469         712 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
     470         712 :         if (!ok) {
     471           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     472           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
     473           0 :                 return;
     474             :         }
     475             : 
     476         712 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
     477         447 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     478         447 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
     479             :                           "state->unique_data_epoch=%"PRIu64"\n",
     480             :                           lck.unique_data_epoch,
     481             :                           state->unique_data_epoch);
     482         447 :                 state->status = NT_STATUS_OK;
     483         447 :                 return;
     484             :         }
     485             : 
     486             :         /*
     487             :          * The lock epoch changed, so we better
     488             :          * remove ourself from the waiter list
     489             :          * (most likely the first position)
     490             :          * and re-add us at the end of the list.
     491             :          *
     492             :          * This gives other lock waiters a change
     493             :          * to make progress.
     494             :          *
     495             :          * Otherwise we'll keep our waiter instance alive,
     496             :          * keep waiting (most likely at first position).
     497             :          */
     498         265 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
     499         241 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     500         241 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
     501         241 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
     502             :         }
     503             : 
     504         265 :         subreq = dbwrap_watched_watch_send(
     505             :                 state, state->ev, rec, state->watch_instance, state->blocker);
     506         265 :         if (subreq == NULL) {
     507           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     508           0 :                 state->status = NT_STATUS_NO_MEMORY;
     509           0 :                 return;
     510             :         }
     511         265 :         tevent_req_set_callback(subreq, g_lock_lock_cb_watch_data_done, req);
     512             : 
     513         265 :         state->status = NT_STATUS_EVENT_PENDING;
     514             : }
     515             : 
     516         712 : static void g_lock_lock_cb_watch_data_done(struct tevent_req *subreq)
     517             : {
     518         712 :         struct tevent_req *req = tevent_req_callback_data(
     519             :                 subreq, struct tevent_req);
     520         712 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     521             :                 req, struct g_lock_lock_cb_watch_data_state);
     522           0 :         NTSTATUS status;
     523         712 :         uint64_t instance = 0;
     524             : 
     525         712 :         status = dbwrap_watched_watch_recv(
     526             :                 subreq, &instance, &state->blockerdead, &state->blocker);
     527         712 :         TALLOC_FREE(subreq);
     528         712 :         if (tevent_req_nterror(req, status)) {
     529           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
     530             :                           nt_errstr(status));
     531         265 :                 return;
     532             :         }
     533             : 
     534         712 :         state->watch_instance = instance;
     535             : 
     536         712 :         status = dbwrap_do_locked(
     537         712 :                 state->ctx->db, state->key, g_lock_lock_cb_watch_data_done_fn, req);
     538         712 :         if (tevent_req_nterror(req, status)) {
     539           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
     540           0 :                 return;
     541             :         }
     542         712 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
     543         265 :                 return;
     544             :         }
     545         447 :         if (tevent_req_nterror(req, state->status)) {
     546           0 :                 return;
     547             :         }
     548         447 :         tevent_req_done(req);
     549             : }
     550             : 
     551         447 : NTSTATUS g_lock_lock_cb_watch_data_recv(
     552             :         struct tevent_req *req,
     553             :         bool *blockerdead,
     554             :         struct server_id *blocker)
     555             : {
     556         447 :         struct g_lock_lock_cb_watch_data_state *state = tevent_req_data(
     557             :                 req, struct g_lock_lock_cb_watch_data_state);
     558           0 :         NTSTATUS status;
     559             : 
     560         447 :         if (tevent_req_is_nterror(req, &status)) {
     561           0 :                 return status;
     562             :         }
     563         447 :         if (blockerdead != NULL) {
     564         447 :                 *blockerdead = state->blockerdead;
     565             :         }
     566         447 :         if (blocker != NULL) {
     567         447 :                 *blocker = state->blocker;
     568             :         }
     569             : 
     570         447 :         return NT_STATUS_OK;
     571             : }
     572             : 
     573        2542 : void g_lock_lock_cb_wake_watchers(struct g_lock_lock_cb_state *cb_state)
     574             : {
     575        2542 :         struct g_lock *lck = cb_state->lck;
     576             : 
     577        2542 :         lck->unique_data_epoch = generate_unique_u64(lck->unique_data_epoch);
     578        2542 :         cb_state->modified = true;
     579        2542 : }
     580             : 
     581      946140 : static NTSTATUS g_lock_lock_cb_run_and_store(struct g_lock_lock_cb_state *cb_state)
     582             : {
     583      946140 :         struct g_lock *lck = cb_state->lck;
     584      946140 :         NTSTATUS success_status = NT_STATUS_OK;
     585        2314 :         NTSTATUS status;
     586             : 
     587      946140 :         if (cb_state->cb_fn != NULL) {
     588             : 
     589      934285 :                 SMB_ASSERT(lck->num_shared == 0);
     590      934285 :                 SMB_ASSERT(cb_state->new_shared == NULL);
     591             : 
     592      934285 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     593      934285 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     594      934285 :                         dbwrap_lock_order_lock(name, cb_state->ctx->lock_order);
     595             :                 }
     596             : 
     597      934285 :                 cb_state->ctx->busy = true;
     598      934285 :                 cb_state->cb_fn(cb_state, cb_state->cb_private);
     599      934285 :                 cb_state->ctx->busy = false;
     600             : 
     601      934285 :                 if (cb_state->ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
     602      934285 :                         const char *name = dbwrap_name(cb_state->ctx->db);
     603      934285 :                         dbwrap_lock_order_unlock(name, cb_state->ctx->lock_order);
     604             :                 }
     605             :         }
     606             : 
     607      946140 :         if (cb_state->unlock) {
     608             :                 /*
     609             :                  * Unlocked should wake up watchers.
     610             :                  *
     611             :                  * We no longer need the lock, so
     612             :                  * force a wakeup of the next watchers,
     613             :                  * even if we don't do any update.
     614             :                  */
     615      454744 :                 dbwrap_watched_watch_reset_alerting(cb_state->rec);
     616      454744 :                 dbwrap_watched_watch_force_alerting(cb_state->rec);
     617      454744 :                 if (!cb_state->modified) {
     618             :                         /*
     619             :                          * The record was not changed at
     620             :                          * all, so we can also avoid
     621             :                          * storing the lck.unique_lock_epoch
     622             :                          * change
     623             :                          */
     624       12597 :                         return NT_STATUS_WAS_UNLOCKED;
     625             :                 }
     626      442147 :                 lck->exclusive = (struct server_id) { .pid = 0 };
     627      442147 :                 cb_state->new_shared = NULL;
     628             : 
     629      442147 :                 if (lck->datalen == 0) {
     630      244222 :                         if (!cb_state->existed) {
     631           0 :                                 return NT_STATUS_WAS_UNLOCKED;
     632             :                         }
     633             : 
     634      244222 :                         status = dbwrap_record_delete(cb_state->rec);
     635      244222 :                         if (!NT_STATUS_IS_OK(status)) {
     636           0 :                                 DBG_WARNING("dbwrap_record_delete() failed: %s\n",
     637             :                                     nt_errstr(status));
     638           0 :                                 return status;
     639             :                         }
     640      244222 :                         return NT_STATUS_WAS_UNLOCKED;
     641             :                 }
     642             : 
     643      197121 :                 success_status = NT_STATUS_WAS_UNLOCKED;
     644             :         }
     645             : 
     646      689321 :         status = g_lock_store(cb_state->rec,
     647             :                               cb_state->lck,
     648             :                               cb_state->new_shared,
     649             :                               NULL, 0);
     650      689321 :         if (!NT_STATUS_IS_OK(status)) {
     651           0 :                 DBG_WARNING("g_lock_store() failed: %s\n",
     652             :                             nt_errstr(status));
     653           0 :                 return status;
     654             :         }
     655             : 
     656      689321 :         return success_status;
     657             : }
     658             : 
     659             : struct g_lock_lock_state {
     660             :         struct tevent_context *ev;
     661             :         struct g_lock_ctx *ctx;
     662             :         TDB_DATA key;
     663             :         enum g_lock_type type;
     664             :         bool retry;
     665             :         g_lock_lock_cb_fn_t cb_fn;
     666             :         void *cb_private;
     667             : };
     668             : 
     669             : struct g_lock_lock_fn_state {
     670             :         struct g_lock_lock_state *req_state;
     671             :         struct server_id *dead_blocker;
     672             : 
     673             :         struct tevent_req *watch_req;
     674             :         uint64_t watch_instance;
     675             :         NTSTATUS status;
     676             : };
     677             : 
     678             : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s);
     679             : 
     680         330 : static NTSTATUS g_lock_trylock(
     681             :         struct db_record *rec,
     682             :         struct g_lock_lock_fn_state *state,
     683             :         TDB_DATA data,
     684             :         struct server_id *blocker)
     685             : {
     686         330 :         struct g_lock_lock_state *req_state = state->req_state;
     687         330 :         struct server_id self = messaging_server_id(req_state->ctx->msg);
     688         330 :         enum g_lock_type type = req_state->type;
     689         330 :         bool retry = req_state->retry;
     690         330 :         struct g_lock lck = { .exclusive.pid = 0 };
     691         660 :         struct g_lock_lock_cb_state cb_state = {
     692         330 :                 .ctx = req_state->ctx,
     693             :                 .rec = rec,
     694             :                 .lck = &lck,
     695         330 :                 .cb_fn = req_state->cb_fn,
     696         330 :                 .cb_private = req_state->cb_private,
     697         330 :                 .existed = data.dsize != 0,
     698         330 :                 .update_mem_ctx = talloc_tos(),
     699             :         };
     700          13 :         struct server_id_buf tmp;
     701          13 :         NTSTATUS status;
     702          13 :         bool ok;
     703             : 
     704         330 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
     705         330 :         if (!ok) {
     706           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     707           0 :                 DBG_DEBUG("g_lock_parse failed\n");
     708           0 :                 return NT_STATUS_INTERNAL_DB_CORRUPTION;
     709             :         }
     710             : 
     711         330 :         g_lock_cleanup_dead(&lck, state->dead_blocker);
     712             : 
     713         330 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
     714             : 
     715         330 :         if (lck.exclusive.pid != 0) {
     716          24 :                 bool self_exclusive = server_id_equal(&self, &lck.exclusive);
     717             : 
     718          24 :                 if (!self_exclusive) {
     719          22 :                         bool exists = serverid_exists(&lck.exclusive);
     720          22 :                         if (!exists) {
     721           0 :                                 lck.exclusive = (struct server_id) { .pid=0 };
     722           0 :                                 goto noexclusive;
     723             :                         }
     724             : 
     725          22 :                         DBG_DEBUG("%s has an exclusive lock\n",
     726             :                                   server_id_str_buf(lck.exclusive, &tmp));
     727             : 
     728          22 :                         if (type == G_LOCK_DOWNGRADE) {
     729           0 :                                 struct server_id_buf tmp2;
     730             : 
     731           0 :                                 dbwrap_watched_watch_remove_instance(rec,
     732             :                                                 state->watch_instance);
     733             : 
     734           0 :                                 DBG_DEBUG("%s: Trying to downgrade %s\n",
     735             :                                           server_id_str_buf(self, &tmp),
     736             :                                           server_id_str_buf(
     737             :                                                   lck.exclusive, &tmp2));
     738           0 :                                 return NT_STATUS_NOT_LOCKED;
     739             :                         }
     740             : 
     741          22 :                         if (type == G_LOCK_UPGRADE) {
     742           1 :                                 ssize_t shared_idx;
     743             : 
     744           1 :                                 dbwrap_watched_watch_remove_instance(rec,
     745             :                                                 state->watch_instance);
     746             : 
     747           1 :                                 shared_idx = g_lock_find_shared(&lck, &self);
     748             : 
     749           1 :                                 if (shared_idx == -1) {
     750           0 :                                         DBG_DEBUG("Trying to upgrade %s "
     751             :                                                   "without "
     752             :                                                   "existing shared lock\n",
     753             :                                                   server_id_str_buf(
     754             :                                                           self, &tmp));
     755           0 :                                         return NT_STATUS_NOT_LOCKED;
     756             :                                 }
     757             : 
     758             :                                 /*
     759             :                                  * We're trying to upgrade, and the
     760             :                                  * exclusive lock is taken by someone
     761             :                                  * else. This means that someone else
     762             :                                  * is waiting for us to give up our
     763             :                                  * shared lock. If we now also wait
     764             :                                  * for someone to give their shared
     765             :                                  * lock, we will deadlock.
     766             :                                  */
     767             : 
     768           1 :                                 DBG_DEBUG("Trying to upgrade %s while "
     769             :                                           "someone else is also "
     770             :                                           "trying to upgrade\n",
     771             :                                           server_id_str_buf(self, &tmp));
     772           1 :                                 return NT_STATUS_POSSIBLE_DEADLOCK;
     773             :                         }
     774             : 
     775          21 :                         DBG_DEBUG("Waiting for lck.exclusive=%s\n",
     776             :                                   server_id_str_buf(lck.exclusive, &tmp));
     777             : 
     778             :                         /*
     779             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     780             :                          * and need to monitor the record.
     781             :                          *
     782             :                          * If we don't have a watcher instance yet,
     783             :                          * we should add one.
     784             :                          */
     785          21 :                         if (state->watch_instance == 0) {
     786          21 :                                 state->watch_instance =
     787          21 :                                         dbwrap_watched_watch_add_instance(rec);
     788             :                         }
     789             : 
     790          21 :                         *blocker = lck.exclusive;
     791          21 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     792             :                 }
     793             : 
     794           2 :                 if (type == G_LOCK_DOWNGRADE) {
     795           0 :                         DBG_DEBUG("Downgrading %s from WRITE to READ\n",
     796             :                                   server_id_str_buf(self, &tmp));
     797             : 
     798           0 :                         lck.exclusive = (struct server_id) { .pid = 0 };
     799           0 :                         goto do_shared;
     800             :                 }
     801             : 
     802           2 :                 if (!retry) {
     803           1 :                         dbwrap_watched_watch_remove_instance(rec,
     804             :                                                 state->watch_instance);
     805             : 
     806           1 :                         DBG_DEBUG("%s already locked by self\n",
     807             :                                   server_id_str_buf(self, &tmp));
     808           1 :                         return NT_STATUS_WAS_LOCKED;
     809             :                 }
     810             : 
     811           1 :                 g_lock_cleanup_shared(&lck);
     812             : 
     813           1 :                 if (lck.num_shared != 0) {
     814           0 :                         g_lock_get_shared(&lck, 0, blocker);
     815             : 
     816           0 :                         DBG_DEBUG("Continue waiting for shared lock %s\n",
     817             :                                   server_id_str_buf(*blocker, &tmp));
     818             : 
     819             :                         /*
     820             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     821             :                          * and need to monitor the record.
     822             :                          *
     823             :                          * If we don't have a watcher instance yet,
     824             :                          * we should add one.
     825             :                          */
     826           0 :                         if (state->watch_instance == 0) {
     827           0 :                                 state->watch_instance =
     828           0 :                                         dbwrap_watched_watch_add_instance(rec);
     829             :                         }
     830             : 
     831           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
     832             :                 }
     833             : 
     834             :                 /*
     835             :                  * Retry after a conflicting lock was released..
     836             :                  * All pending readers are gone so we got the lock...
     837             :                  */
     838           1 :                 goto got_lock;
     839             :         }
     840             : 
     841         306 : noexclusive:
     842             : 
     843         306 :         if (type == G_LOCK_UPGRADE) {
     844           3 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     845             : 
     846           3 :                 if (shared_idx == -1) {
     847           0 :                         dbwrap_watched_watch_remove_instance(rec,
     848             :                                                 state->watch_instance);
     849             : 
     850           0 :                         DBG_DEBUG("Trying to upgrade %s without "
     851             :                                   "existing shared lock\n",
     852             :                                   server_id_str_buf(self, &tmp));
     853           0 :                         return NT_STATUS_NOT_LOCKED;
     854             :                 }
     855             : 
     856           3 :                 g_lock_del_shared(&lck, shared_idx);
     857           3 :                 type = G_LOCK_WRITE;
     858             :         }
     859             : 
     860         306 :         if (type == G_LOCK_WRITE) {
     861         306 :                 ssize_t shared_idx = g_lock_find_shared(&lck, &self);
     862             : 
     863         306 :                 if (shared_idx != -1) {
     864           0 :                         dbwrap_watched_watch_remove_instance(rec,
     865             :                                                 state->watch_instance);
     866           0 :                         DBG_DEBUG("Trying to writelock existing shared %s\n",
     867             :                                   server_id_str_buf(self, &tmp));
     868           0 :                         return NT_STATUS_WAS_LOCKED;
     869             :                 }
     870             : 
     871         306 :                 lck.exclusive = self;
     872             : 
     873         306 :                 g_lock_cleanup_shared(&lck);
     874             : 
     875         306 :                 if (lck.num_shared == 0) {
     876             :                         /*
     877             :                          * If we store ourself as exclusive writer,
     878             :                          * without any pending readers ...
     879             :                          */
     880         302 :                         goto got_lock;
     881             :                 }
     882             : 
     883           4 :                 if (state->watch_instance == 0) {
     884             :                         /*
     885             :                          * Here we have lck.num_shared != 0.
     886             :                          *
     887             :                          * We will return NT_STATUS_LOCK_NOT_GRANTED
     888             :                          * below.
     889             :                          *
     890             :                          * And don't have a watcher instance yet!
     891             :                          *
     892             :                          * We add it here before g_lock_store()
     893             :                          * in order to trigger just one
     894             :                          * low level dbwrap_do_locked() call.
     895             :                          */
     896           4 :                         state->watch_instance =
     897           4 :                                 dbwrap_watched_watch_add_instance(rec);
     898             :                 }
     899             : 
     900           4 :                 status = g_lock_store(rec, &lck, NULL, NULL, 0);
     901           4 :                 if (!NT_STATUS_IS_OK(status)) {
     902           0 :                         DBG_DEBUG("g_lock_store() failed: %s\n",
     903             :                                   nt_errstr(status));
     904           0 :                         return status;
     905             :                 }
     906             : 
     907           4 :                 talloc_set_destructor(
     908             :                         req_state, g_lock_lock_state_destructor);
     909             : 
     910           4 :                 g_lock_get_shared(&lck, 0, blocker);
     911             : 
     912           4 :                 DBG_DEBUG("Waiting for %zu shared locks, "
     913             :                           "picking blocker %s\n",
     914             :                           lck.num_shared,
     915             :                           server_id_str_buf(*blocker, &tmp));
     916             : 
     917           4 :                 return NT_STATUS_LOCK_NOT_GRANTED;
     918             :         }
     919             : 
     920           0 : do_shared:
     921             : 
     922           0 :         g_lock_cleanup_shared(&lck);
     923           0 :         cb_state.new_shared = &self;
     924           0 :         goto got_lock;
     925             : 
     926         303 : got_lock:
     927             :         /*
     928             :          * We got the lock we asked for, so we no
     929             :          * longer need to monitor the record.
     930             :          */
     931         303 :         dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
     932             : 
     933         303 :         status = g_lock_lock_cb_run_and_store(&cb_state);
     934         303 :         if (!NT_STATUS_IS_OK(status) &&
     935           7 :             !NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED))
     936             :         {
     937           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
     938             :                             nt_errstr(status));
     939           0 :                 return status;
     940             :         }
     941             : 
     942         303 :         talloc_set_destructor(req_state, NULL);
     943         303 :         return status;
     944             : }
     945             : 
     946         330 : static void g_lock_lock_fn(
     947             :         struct db_record *rec,
     948             :         TDB_DATA value,
     949             :         void *private_data)
     950             : {
     951         330 :         struct g_lock_lock_fn_state *state = private_data;
     952         330 :         struct server_id blocker = {0};
     953             : 
     954             :         /*
     955             :          * We're trying to get a lock and if we are
     956             :          * successful in doing that, we should not
     957             :          * wakeup any other waiters, all they would
     958             :          * find is that we're holding a lock they
     959             :          * are conflicting with.
     960             :          */
     961         330 :         dbwrap_watched_watch_skip_alerting(rec);
     962             : 
     963         330 :         state->status = g_lock_trylock(rec, state, value, &blocker);
     964         330 :         if (!NT_STATUS_IS_OK(state->status)) {
     965          34 :                 DBG_DEBUG("g_lock_trylock returned %s\n",
     966             :                           nt_errstr(state->status));
     967             :         }
     968         330 :         if (!NT_STATUS_EQUAL(state->status, NT_STATUS_LOCK_NOT_GRANTED)) {
     969         305 :                 return;
     970             :         }
     971             : 
     972          50 :         state->watch_req = dbwrap_watched_watch_send(
     973          25 :                 state->req_state, state->req_state->ev, rec, state->watch_instance, blocker);
     974          25 :         if (state->watch_req == NULL) {
     975           0 :                 state->status = NT_STATUS_NO_MEMORY;
     976             :         }
     977             : }
     978             : 
     979           2 : static int g_lock_lock_state_destructor(struct g_lock_lock_state *s)
     980             : {
     981           2 :         NTSTATUS status = g_lock_unlock(s->ctx, s->key);
     982           2 :         if (!NT_STATUS_IS_OK(status)) {
     983           0 :                 DBG_DEBUG("g_lock_unlock failed: %s\n", nt_errstr(status));
     984             :         }
     985           2 :         return 0;
     986             : }
     987             : 
     988             : static void g_lock_lock_retry(struct tevent_req *subreq);
     989             : 
     990         310 : struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
     991             :                                     struct tevent_context *ev,
     992             :                                     struct g_lock_ctx *ctx,
     993             :                                     TDB_DATA key,
     994             :                                     enum g_lock_type type,
     995             :                                     g_lock_lock_cb_fn_t cb_fn,
     996             :                                     void *cb_private)
     997             : {
     998          11 :         struct tevent_req *req;
     999          11 :         struct g_lock_lock_state *state;
    1000          11 :         struct g_lock_lock_fn_state fn_state;
    1001          11 :         NTSTATUS status;
    1002          11 :         bool ok;
    1003             : 
    1004         310 :         SMB_ASSERT(!ctx->busy);
    1005             : 
    1006         310 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_lock_state);
    1007         310 :         if (req == NULL) {
    1008           0 :                 return NULL;
    1009             :         }
    1010         310 :         state->ev = ev;
    1011         310 :         state->ctx = ctx;
    1012         310 :         state->key = key;
    1013         310 :         state->type = type;
    1014         310 :         state->cb_fn = cb_fn;
    1015         310 :         state->cb_private = cb_private;
    1016             : 
    1017         310 :         fn_state = (struct g_lock_lock_fn_state) {
    1018             :                 .req_state = state,
    1019             :         };
    1020             : 
    1021             :         /*
    1022             :          * We allow a cb_fn only for G_LOCK_WRITE for now.
    1023             :          *
    1024             :          * It's all we currently need and it makes a few things
    1025             :          * easier to implement.
    1026             :          */
    1027         310 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1028           0 :                 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_6);
    1029           0 :                 return tevent_req_post(req, ev);
    1030             :         }
    1031             : 
    1032         310 :         status = dbwrap_do_locked(ctx->db, key, g_lock_lock_fn, &fn_state);
    1033         310 :         if (tevent_req_nterror(req, status)) {
    1034           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1035             :                           nt_errstr(status));
    1036           0 :                 return tevent_req_post(req, ev);
    1037             :         }
    1038             : 
    1039         310 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1040         282 :                 tevent_req_done(req);
    1041         282 :                 return tevent_req_post(req, ev);
    1042             :         }
    1043          28 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1044           3 :                 tevent_req_nterror(req, fn_state.status);
    1045           3 :                 return tevent_req_post(req, ev);
    1046             :         }
    1047             : 
    1048          25 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1049           0 :                 return tevent_req_post(req, ev);
    1050             :         }
    1051             : 
    1052          64 :         ok = tevent_req_set_endtime(
    1053             :                 fn_state.watch_req,
    1054          25 :                 state->ev,
    1055          25 :                 timeval_current_ofs(5 + generate_random() % 5, 0));
    1056          25 :         if (!ok) {
    1057           0 :                 tevent_req_oom(req);
    1058           0 :                 return tevent_req_post(req, ev);
    1059             :         }
    1060          25 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1061             : 
    1062          25 :         return req;
    1063             : }
    1064             : 
    1065          20 : static void g_lock_lock_retry(struct tevent_req *subreq)
    1066             : {
    1067          20 :         struct tevent_req *req = tevent_req_callback_data(
    1068             :                 subreq, struct tevent_req);
    1069          20 :         struct g_lock_lock_state *state = tevent_req_data(
    1070             :                 req, struct g_lock_lock_state);
    1071           2 :         struct g_lock_lock_fn_state fn_state;
    1072          20 :         struct server_id blocker = { .pid = 0 };
    1073          20 :         bool blockerdead = false;
    1074           2 :         NTSTATUS status;
    1075          20 :         uint64_t instance = 0;
    1076             : 
    1077          20 :         status = dbwrap_watched_watch_recv(subreq, &instance, &blockerdead, &blocker);
    1078          20 :         DBG_DEBUG("watch_recv returned %s\n", nt_errstr(status));
    1079          20 :         TALLOC_FREE(subreq);
    1080             : 
    1081          20 :         if (!NT_STATUS_IS_OK(status) &&
    1082           0 :             !NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
    1083           0 :                 tevent_req_nterror(req, status);
    1084           0 :                 return;
    1085             :         }
    1086             : 
    1087          20 :         state->retry = true;
    1088             : 
    1089          22 :         fn_state = (struct g_lock_lock_fn_state) {
    1090             :                 .req_state = state,
    1091          20 :                 .dead_blocker = blockerdead ? &blocker : NULL,
    1092             :                 .watch_instance = instance,
    1093             :         };
    1094             : 
    1095          20 :         status = dbwrap_do_locked(state->ctx->db, state->key,
    1096             :                                   g_lock_lock_fn, &fn_state);
    1097          20 :         if (tevent_req_nterror(req, status)) {
    1098           0 :                 DBG_DEBUG("dbwrap_do_locked failed: %s\n",
    1099             :                           nt_errstr(status));
    1100           0 :                 return;
    1101             :         }
    1102             : 
    1103          20 :         if (NT_STATUS_IS_OK(fn_state.status)) {
    1104          14 :                 tevent_req_done(req);
    1105          14 :                 return;
    1106             :         }
    1107           6 :         if (!NT_STATUS_EQUAL(fn_state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1108           6 :                 tevent_req_nterror(req, fn_state.status);
    1109           6 :                 return;
    1110             :         }
    1111             : 
    1112           0 :         if (tevent_req_nomem(fn_state.watch_req, req)) {
    1113           0 :                 return;
    1114             :         }
    1115             : 
    1116           0 :         if (!tevent_req_set_endtime(
    1117             :                     fn_state.watch_req, state->ev,
    1118           0 :                     timeval_current_ofs(5 + generate_random() % 5, 0))) {
    1119           0 :                 return;
    1120             :         }
    1121           0 :         tevent_req_set_callback(fn_state.watch_req, g_lock_lock_retry, req);
    1122             : }
    1123             : 
    1124         309 : NTSTATUS g_lock_lock_recv(struct tevent_req *req)
    1125             : {
    1126         309 :         struct g_lock_lock_state *state = tevent_req_data(
    1127             :                 req, struct g_lock_lock_state);
    1128         309 :         struct g_lock_ctx *ctx = state->ctx;
    1129          10 :         NTSTATUS status;
    1130             : 
    1131         309 :         if (tevent_req_is_nterror(req, &status)) {
    1132          13 :                 if (NT_STATUS_EQUAL(status, NT_STATUS_WAS_UNLOCKED)) {
    1133           7 :                         return NT_STATUS_OK;
    1134             :                 }
    1135           6 :                 return status;
    1136             :         }
    1137             : 
    1138         296 :         if ((ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) &&
    1139          15 :             ((state->type == G_LOCK_READ) ||
    1140          15 :              (state->type == G_LOCK_WRITE))) {
    1141          15 :                 const char *name = dbwrap_name(ctx->db);
    1142          15 :                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1143             :         }
    1144             : 
    1145         296 :         return NT_STATUS_OK;
    1146             : }
    1147             : 
    1148             : struct g_lock_lock_simple_state {
    1149             :         struct g_lock_ctx *ctx;
    1150             :         struct server_id me;
    1151             :         enum g_lock_type type;
    1152             :         NTSTATUS status;
    1153             :         g_lock_lock_cb_fn_t cb_fn;
    1154             :         void *cb_private;
    1155             : };
    1156             : 
    1157      945864 : static void g_lock_lock_simple_fn(
    1158             :         struct db_record *rec,
    1159             :         TDB_DATA value,
    1160             :         void *private_data)
    1161             : {
    1162      945864 :         struct g_lock_lock_simple_state *state = private_data;
    1163        2315 :         struct server_id_buf buf;
    1164      945864 :         struct g_lock lck = { .exclusive.pid = 0 };
    1165     1891728 :         struct g_lock_lock_cb_state cb_state = {
    1166      945864 :                 .ctx = state->ctx,
    1167             :                 .rec = rec,
    1168             :                 .lck = &lck,
    1169      945864 :                 .cb_fn = state->cb_fn,
    1170      945864 :                 .cb_private = state->cb_private,
    1171      945864 :                 .existed = value.dsize != 0,
    1172      945864 :                 .update_mem_ctx = talloc_tos(),
    1173             :         };
    1174        2315 :         bool ok;
    1175             : 
    1176      945864 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1177      945864 :         if (!ok) {
    1178           0 :                 DBG_DEBUG("g_lock_parse failed\n");
    1179           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1180           0 :                 return;
    1181             :         }
    1182             : 
    1183      945864 :         if (lck.exclusive.pid != 0) {
    1184          25 :                 DBG_DEBUG("locked by %s\n",
    1185             :                           server_id_str_buf(lck.exclusive, &buf));
    1186          25 :                 goto not_granted;
    1187             :         }
    1188             : 
    1189      945839 :         if (state->type == G_LOCK_WRITE) {
    1190      945821 :                 if (lck.num_shared != 0) {
    1191           2 :                         DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
    1192           2 :                         goto not_granted;
    1193             :                 }
    1194      945819 :                 lck.exclusive = state->me;
    1195          18 :         } else if (state->type == G_LOCK_READ) {
    1196          18 :                 g_lock_cleanup_shared(&lck);
    1197          18 :                 cb_state.new_shared = &state->me;
    1198             :         } else {
    1199           0 :                 smb_panic(__location__);
    1200             :         }
    1201             : 
    1202      945837 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1203             : 
    1204             :         /*
    1205             :          * We are going to store us as owner,
    1206             :          * so we got what we were waiting for.
    1207             :          *
    1208             :          * So we no longer need to monitor the
    1209             :          * record.
    1210             :          */
    1211      945837 :         dbwrap_watched_watch_skip_alerting(rec);
    1212             : 
    1213      945837 :         state->status = g_lock_lock_cb_run_and_store(&cb_state);
    1214      945837 :         if (!NT_STATUS_IS_OK(state->status) &&
    1215      453268 :             !NT_STATUS_EQUAL(state->status, NT_STATUS_WAS_UNLOCKED))
    1216             :         {
    1217           0 :                 DBG_WARNING("g_lock_lock_cb_run_and_store() failed: %s\n",
    1218             :                             nt_errstr(state->status));
    1219           0 :                 return;
    1220             :         }
    1221             : 
    1222      943527 :         return;
    1223             : 
    1224          27 : not_granted:
    1225          27 :         state->status = NT_STATUS_LOCK_NOT_GRANTED;
    1226             : }
    1227             : 
    1228      945867 : NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
    1229             :                      enum g_lock_type type, struct timeval timeout,
    1230             :                      g_lock_lock_cb_fn_t cb_fn,
    1231             :                      void *cb_private)
    1232             : {
    1233        2318 :         TALLOC_CTX *frame;
    1234        2318 :         struct tevent_context *ev;
    1235        2318 :         struct tevent_req *req;
    1236        2318 :         struct timeval end;
    1237        2318 :         NTSTATUS status;
    1238             : 
    1239      945867 :         SMB_ASSERT(!ctx->busy);
    1240             : 
    1241             :         /*
    1242             :          * We allow a cb_fn only for G_LOCK_WRITE for now.
    1243             :          *
    1244             :          * It's all we currently need and it makes a few things
    1245             :          * easier to implement.
    1246             :          */
    1247      945867 :         if (unlikely(cb_fn != NULL && type != G_LOCK_WRITE)) {
    1248           0 :                 return NT_STATUS_INVALID_PARAMETER_5;
    1249             :         }
    1250             : 
    1251      945867 :         if ((type == G_LOCK_READ) || (type == G_LOCK_WRITE)) {
    1252             :                 /*
    1253             :                  * This is an abstraction violation: Normally we do
    1254             :                  * the sync wrappers around async functions with full
    1255             :                  * nested event contexts. However, this is used in
    1256             :                  * very hot code paths, so avoid the event context
    1257             :                  * creation for the good path where there's no lock
    1258             :                  * contention. My benchmark gave a factor of 2
    1259             :                  * improvement for lock/unlock.
    1260             :                  */
    1261     1891728 :                 struct g_lock_lock_simple_state state = {
    1262             :                         .ctx = ctx,
    1263      945864 :                         .me = messaging_server_id(ctx->msg),
    1264             :                         .type = type,
    1265             :                         .cb_fn = cb_fn,
    1266             :                         .cb_private = cb_private,
    1267             :                 };
    1268      945864 :                 status = dbwrap_do_locked(
    1269             :                         ctx->db, key, g_lock_lock_simple_fn, &state);
    1270      945864 :                 if (!NT_STATUS_IS_OK(status)) {
    1271           0 :                         DBG_DEBUG("dbwrap_do_locked() failed: %s\n",
    1272             :                                   nt_errstr(status));
    1273      945837 :                         return status;
    1274             :                 }
    1275             : 
    1276      945864 :                 DBG_DEBUG("status=%s, state.status=%s\n",
    1277             :                           nt_errstr(status),
    1278             :                           nt_errstr(state.status));
    1279             : 
    1280      945864 :                 if (NT_STATUS_IS_OK(state.status)) {
    1281      491100 :                         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1282      491071 :                                 const char *name = dbwrap_name(ctx->db);
    1283      491071 :                                 dbwrap_lock_order_lock(name, ctx->lock_order);
    1284             :                         }
    1285      491100 :                         return NT_STATUS_OK;
    1286             :                 }
    1287      454764 :                 if (NT_STATUS_EQUAL(state.status, NT_STATUS_WAS_UNLOCKED)) {
    1288             :                         /* without dbwrap_lock_order_lock() */
    1289      454737 :                         return NT_STATUS_OK;
    1290             :                 }
    1291          27 :                 if (!NT_STATUS_EQUAL(
    1292             :                             state.status, NT_STATUS_LOCK_NOT_GRANTED)) {
    1293           0 :                         return state.status;
    1294             :                 }
    1295             : 
    1296          27 :                 if (timeval_is_zero(&timeout)) {
    1297           0 :                         return NT_STATUS_LOCK_NOT_GRANTED;
    1298             :                 }
    1299             : 
    1300             :                 /*
    1301             :                  * Fall back to the full g_lock_trylock logic,
    1302             :                  * g_lock_lock_simple_fn() called above only covers
    1303             :                  * the uncontended path.
    1304             :                  */
    1305             :         }
    1306             : 
    1307          30 :         frame = talloc_stackframe();
    1308          30 :         status = NT_STATUS_NO_MEMORY;
    1309             : 
    1310          30 :         ev = samba_tevent_context_init(frame);
    1311          30 :         if (ev == NULL) {
    1312           0 :                 goto fail;
    1313             :         }
    1314          30 :         req = g_lock_lock_send(frame, ev, ctx, key, type, cb_fn, cb_private);
    1315          30 :         if (req == NULL) {
    1316           0 :                 goto fail;
    1317             :         }
    1318          30 :         end = timeval_current_ofs(timeout.tv_sec, timeout.tv_usec);
    1319          30 :         if (!tevent_req_set_endtime(req, ev, end)) {
    1320           0 :                 goto fail;
    1321             :         }
    1322          30 :         if (!tevent_req_poll_ntstatus(req, ev, &status)) {
    1323           0 :                 goto fail;
    1324             :         }
    1325          30 :         status = g_lock_lock_recv(req);
    1326          30 :  fail:
    1327          30 :         TALLOC_FREE(frame);
    1328          30 :         return status;
    1329             : }
    1330             : 
    1331             : struct g_lock_unlock_state {
    1332             :         struct server_id self;
    1333             :         NTSTATUS status;
    1334             : };
    1335             : 
    1336      491382 : static void g_lock_unlock_fn(
    1337             :         struct db_record *rec,
    1338             :         TDB_DATA value,
    1339             :         void *private_data)
    1340             : {
    1341      491382 :         struct g_lock_unlock_state *state = private_data;
    1342         831 :         struct server_id_buf tmp1, tmp2;
    1343         831 :         struct g_lock lck;
    1344         831 :         size_t i;
    1345         831 :         bool ok, exclusive;
    1346             : 
    1347      491382 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1348      491382 :         if (!ok) {
    1349           0 :                 DBG_DEBUG("g_lock_parse() failed\n");
    1350           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1351           0 :                 return;
    1352             :         }
    1353             : 
    1354      491382 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1355             : 
    1356      492226 :         for (i=0; i<lck.num_shared; i++) {
    1357          19 :                 struct server_id shared;
    1358          19 :                 g_lock_get_shared(&lck, i, &shared);
    1359          19 :                 if (server_id_equal(&state->self, &shared)) {
    1360           0 :                         break;
    1361             :                 }
    1362             :         }
    1363             : 
    1364      491382 :         if (i < lck.num_shared) {
    1365           6 :                 if (exclusive) {
    1366           0 :                         DBG_DEBUG("%s both exclusive and shared (%zu)\n",
    1367             :                                   server_id_str_buf(state->self, &tmp1),
    1368             :                                   i);
    1369           0 :                         state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1370           0 :                         return;
    1371             :                 }
    1372           6 :                 g_lock_del_shared(&lck, i);
    1373             :         } else {
    1374      491376 :                 if (!exclusive) {
    1375           1 :                         DBG_DEBUG("Lock not found, self=%s, lck.exclusive=%s, "
    1376             :                                   "num_shared=%zu\n",
    1377             :                                   server_id_str_buf(state->self, &tmp1),
    1378             :                                   server_id_str_buf(lck.exclusive, &tmp2),
    1379             :                                   lck.num_shared);
    1380           1 :                         state->status = NT_STATUS_NOT_FOUND;
    1381           1 :                         return;
    1382             :                 }
    1383      491375 :                 lck.exclusive = (struct server_id) { .pid = 0 };
    1384             :         }
    1385             : 
    1386      491381 :         if ((lck.exclusive.pid == 0) &&
    1387      491381 :             (lck.num_shared == 0) &&
    1388      491374 :             (lck.datalen == 0)) {
    1389      170776 :                 state->status = dbwrap_record_delete(rec);
    1390      170776 :                 return;
    1391             :         }
    1392             : 
    1393      320605 :         if (!exclusive && lck.exclusive.pid != 0) {
    1394             :                 /*
    1395             :                  * We only had a read lock and there's
    1396             :                  * someone waiting for an exclusive lock.
    1397             :                  *
    1398             :                  * Don't alert the exclusive lock waiter
    1399             :                  * if there are still other read lock holders.
    1400             :                  */
    1401           0 :                 g_lock_cleanup_shared(&lck);
    1402           0 :                 if (lck.num_shared != 0) {
    1403           0 :                         dbwrap_watched_watch_skip_alerting(rec);
    1404             :                 }
    1405             :         }
    1406             : 
    1407      320605 :         lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
    1408             : 
    1409      320605 :         state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1410             : }
    1411             : 
    1412      491382 : NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key)
    1413             : {
    1414      491382 :         struct g_lock_unlock_state state = {
    1415      491382 :                 .self = messaging_server_id(ctx->msg),
    1416             :         };
    1417         831 :         NTSTATUS status;
    1418             : 
    1419      491382 :         SMB_ASSERT(!ctx->busy);
    1420             : 
    1421      491382 :         status = dbwrap_do_locked(ctx->db, key, g_lock_unlock_fn, &state);
    1422      491382 :         if (!NT_STATUS_IS_OK(status)) {
    1423           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1424             :                             nt_errstr(status));
    1425           0 :                 return status;
    1426             :         }
    1427      491382 :         if (!NT_STATUS_IS_OK(state.status)) {
    1428           1 :                 DBG_WARNING("g_lock_unlock_fn failed: %s\n",
    1429             :                             nt_errstr(state.status));
    1430           1 :                 return state.status;
    1431             :         }
    1432             : 
    1433      491381 :         if (ctx->lock_order != DBWRAP_LOCK_ORDER_NONE) {
    1434      491086 :                 const char *name = dbwrap_name(ctx->db);
    1435      491086 :                 dbwrap_lock_order_unlock(name, ctx->lock_order);
    1436             :         }
    1437             : 
    1438      491381 :         return NT_STATUS_OK;
    1439             : }
    1440             : 
    1441             : struct g_lock_writev_data_state {
    1442             :         TDB_DATA key;
    1443             :         struct server_id self;
    1444             :         const TDB_DATA *dbufs;
    1445             :         size_t num_dbufs;
    1446             :         NTSTATUS status;
    1447             : };
    1448             : 
    1449      172375 : static void g_lock_writev_data_fn(
    1450             :         struct db_record *rec,
    1451             :         TDB_DATA value,
    1452             :         void *private_data)
    1453             : {
    1454      172375 :         struct g_lock_writev_data_state *state = private_data;
    1455         356 :         struct g_lock lck;
    1456         356 :         bool exclusive;
    1457         356 :         bool ok;
    1458             : 
    1459             :         /*
    1460             :          * We're holding an exclusive write lock.
    1461             :          *
    1462             :          * Now we're updating the content of the record.
    1463             :          *
    1464             :          * We should not wakeup any other waiters, all they
    1465             :          * would find is that we're still holding a lock they
    1466             :          * are conflicting with.
    1467             :          */
    1468      172375 :         dbwrap_watched_watch_skip_alerting(rec);
    1469             : 
    1470      172375 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1471      172375 :         if (!ok) {
    1472           0 :                 DBG_DEBUG("g_lock_parse for %s failed\n",
    1473             :                           tdb_data_dbg(state->key));
    1474           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1475           0 :                 return;
    1476             :         }
    1477             : 
    1478      172375 :         exclusive = server_id_equal(&state->self, &lck.exclusive);
    1479             : 
    1480             :         /*
    1481             :          * Make sure we're really exclusive. We are marked as
    1482             :          * exclusive when we are waiting for an exclusive lock
    1483             :          */
    1484      172375 :         exclusive &= (lck.num_shared == 0);
    1485             : 
    1486      172375 :         if (!exclusive) {
    1487           1 :                 struct server_id_buf buf1, buf2;
    1488           1 :                 DBG_DEBUG("Not locked by us: self=%s, lck.exclusive=%s, "
    1489             :                           "lck.num_shared=%zu\n",
    1490             :                           server_id_str_buf(state->self, &buf1),
    1491             :                           server_id_str_buf(lck.exclusive, &buf2),
    1492             :                           lck.num_shared);
    1493           1 :                 state->status = NT_STATUS_NOT_LOCKED;
    1494           1 :                 return;
    1495             :         }
    1496             : 
    1497      172374 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1498      172374 :         lck.data = NULL;
    1499      172374 :         lck.datalen = 0;
    1500      172374 :         state->status = g_lock_store(
    1501             :                 rec, &lck, NULL, state->dbufs, state->num_dbufs);
    1502             : }
    1503             : 
    1504      172375 : NTSTATUS g_lock_writev_data(
    1505             :         struct g_lock_ctx *ctx,
    1506             :         TDB_DATA key,
    1507             :         const TDB_DATA *dbufs,
    1508             :         size_t num_dbufs)
    1509             : {
    1510      344750 :         struct g_lock_writev_data_state state = {
    1511             :                 .key = key,
    1512      172375 :                 .self = messaging_server_id(ctx->msg),
    1513             :                 .dbufs = dbufs,
    1514             :                 .num_dbufs = num_dbufs,
    1515             :         };
    1516         356 :         NTSTATUS status;
    1517             : 
    1518      172375 :         SMB_ASSERT(!ctx->busy);
    1519             : 
    1520      172375 :         status = dbwrap_do_locked(
    1521             :                 ctx->db, key, g_lock_writev_data_fn, &state);
    1522      172375 :         if (!NT_STATUS_IS_OK(status)) {
    1523           0 :                 DBG_WARNING("dbwrap_do_locked failed: %s\n",
    1524             :                             nt_errstr(status));
    1525           0 :                 return status;
    1526             :         }
    1527      172375 :         if (!NT_STATUS_IS_OK(state.status)) {
    1528           1 :                 DBG_WARNING("g_lock_writev_data_fn failed: %s\n",
    1529             :                             nt_errstr(state.status));
    1530           1 :                 return state.status;
    1531             :         }
    1532             : 
    1533      172374 :         return NT_STATUS_OK;
    1534             : }
    1535             : 
    1536           4 : NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
    1537             :                            const uint8_t *buf, size_t buflen)
    1538             : {
    1539           4 :         TDB_DATA dbuf = {
    1540             :                 .dptr = discard_const_p(uint8_t, buf),
    1541             :                 .dsize = buflen,
    1542             :         };
    1543           4 :         return g_lock_writev_data(ctx, key, &dbuf, 1);
    1544             : }
    1545             : 
    1546             : struct g_lock_locks_state {
    1547             :         int (*fn)(TDB_DATA key, void *private_data);
    1548             :         void *private_data;
    1549             : };
    1550             : 
    1551       18817 : static int g_lock_locks_fn(struct db_record *rec, void *priv)
    1552             : {
    1553           0 :         TDB_DATA key;
    1554       18817 :         struct g_lock_locks_state *state = (struct g_lock_locks_state *)priv;
    1555             : 
    1556       18817 :         key = dbwrap_record_get_key(rec);
    1557       18817 :         return state->fn(key, state->private_data);
    1558             : }
    1559             : 
    1560        6373 : int g_lock_locks(struct g_lock_ctx *ctx,
    1561             :                  int (*fn)(TDB_DATA key, void *private_data),
    1562             :                  void *private_data)
    1563             : {
    1564           0 :         struct g_lock_locks_state state;
    1565           0 :         NTSTATUS status;
    1566           0 :         int count;
    1567             : 
    1568        6373 :         SMB_ASSERT(!ctx->busy);
    1569             : 
    1570        6373 :         state.fn = fn;
    1571        6373 :         state.private_data = private_data;
    1572             : 
    1573        6373 :         status = dbwrap_traverse_read(ctx->db, g_lock_locks_fn, &state, &count);
    1574        6373 :         if (!NT_STATUS_IS_OK(status)) {
    1575           0 :                 return -1;
    1576             :         }
    1577        6373 :         return count;
    1578             : }
    1579             : 
    1580             : struct g_lock_dump_state {
    1581             :         TALLOC_CTX *mem_ctx;
    1582             :         TDB_DATA key;
    1583             :         void (*fn)(struct server_id exclusive,
    1584             :                    size_t num_shared,
    1585             :                    const struct server_id *shared,
    1586             :                    const uint8_t *data,
    1587             :                    size_t datalen,
    1588             :                    void *private_data);
    1589             :         void *private_data;
    1590             :         NTSTATUS status;
    1591             :         enum dbwrap_req_state req_state;
    1592             : };
    1593             : 
    1594      262962 : static void g_lock_dump_fn(TDB_DATA key, TDB_DATA data,
    1595             :                            void *private_data)
    1596             : {
    1597      262962 :         struct g_lock_dump_state *state = private_data;
    1598      262962 :         struct g_lock lck = (struct g_lock) { .exclusive.pid = 0 };
    1599      262962 :         struct server_id *shared = NULL;
    1600        1377 :         size_t i;
    1601        1377 :         bool ok;
    1602             : 
    1603      262962 :         ok = g_lock_parse(data.dptr, data.dsize, &lck);
    1604      262962 :         if (!ok) {
    1605           0 :                 DBG_DEBUG("g_lock_parse failed for %s\n",
    1606             :                           tdb_data_dbg(state->key));
    1607           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1608           0 :                 return;
    1609             :         }
    1610             : 
    1611      262962 :         if (lck.num_shared > 0) {
    1612           7 :                 shared = talloc_array(
    1613             :                         state->mem_ctx, struct server_id, lck.num_shared);
    1614           7 :                 if (shared == NULL) {
    1615           0 :                         DBG_DEBUG("talloc failed\n");
    1616           0 :                         state->status = NT_STATUS_NO_MEMORY;
    1617           0 :                         return;
    1618             :                 }
    1619             :         }
    1620             : 
    1621      262980 :         for (i=0; i<lck.num_shared; i++) {
    1622          18 :                 g_lock_get_shared(&lck, i, &shared[i]);
    1623             :         }
    1624             : 
    1625      262962 :         state->fn(lck.exclusive,
    1626             :                   lck.num_shared,
    1627             :                   shared,
    1628      262962 :                   lck.data,
    1629             :                   lck.datalen,
    1630             :                   state->private_data);
    1631             : 
    1632      262962 :         TALLOC_FREE(shared);
    1633             : 
    1634      262962 :         state->status = NT_STATUS_OK;
    1635             : }
    1636             : 
    1637     1088193 : NTSTATUS g_lock_dump(struct g_lock_ctx *ctx, TDB_DATA key,
    1638             :                      void (*fn)(struct server_id exclusive,
    1639             :                                 size_t num_shared,
    1640             :                                 const struct server_id *shared,
    1641             :                                 const uint8_t *data,
    1642             :                                 size_t datalen,
    1643             :                                 void *private_data),
    1644             :                      void *private_data)
    1645             : {
    1646     1088193 :         struct g_lock_dump_state state = {
    1647             :                 .mem_ctx = ctx, .key = key,
    1648             :                 .fn = fn, .private_data = private_data
    1649             :         };
    1650        1812 :         NTSTATUS status;
    1651             : 
    1652     1088193 :         SMB_ASSERT(!ctx->busy);
    1653             : 
    1654     1088193 :         status = dbwrap_parse_record(ctx->db, key, g_lock_dump_fn, &state);
    1655     1088193 :         if (!NT_STATUS_IS_OK(status)) {
    1656      825231 :                 DBG_DEBUG("dbwrap_parse_record returned %s\n",
    1657             :                           nt_errstr(status));
    1658      825231 :                 return status;
    1659             :         }
    1660      262962 :         if (!NT_STATUS_IS_OK(state.status)) {
    1661           0 :                 DBG_DEBUG("g_lock_dump_fn returned %s\n",
    1662             :                           nt_errstr(state.status));
    1663           0 :                 return state.status;
    1664             :         }
    1665      262962 :         return NT_STATUS_OK;
    1666             : }
    1667             : 
    1668             : static void g_lock_dump_done(struct tevent_req *subreq);
    1669             : 
    1670           0 : struct tevent_req *g_lock_dump_send(
    1671             :         TALLOC_CTX *mem_ctx,
    1672             :         struct tevent_context *ev,
    1673             :         struct g_lock_ctx *ctx,
    1674             :         TDB_DATA key,
    1675             :         void (*fn)(struct server_id exclusive,
    1676             :                    size_t num_shared,
    1677             :                    const struct server_id *shared,
    1678             :                    const uint8_t *data,
    1679             :                    size_t datalen,
    1680             :                    void *private_data),
    1681             :         void *private_data)
    1682             : {
    1683           0 :         struct tevent_req *req = NULL, *subreq = NULL;
    1684           0 :         struct g_lock_dump_state *state = NULL;
    1685             : 
    1686           0 :         SMB_ASSERT(!ctx->busy);
    1687             : 
    1688           0 :         req = tevent_req_create(mem_ctx, &state, struct g_lock_dump_state);
    1689           0 :         if (req == NULL) {
    1690           0 :                 return NULL;
    1691             :         }
    1692           0 :         state->mem_ctx = state;
    1693           0 :         state->key = key;
    1694           0 :         state->fn = fn;
    1695           0 :         state->private_data = private_data;
    1696             : 
    1697           0 :         SMB_ASSERT(!ctx->busy);
    1698             : 
    1699           0 :         subreq = dbwrap_parse_record_send(
    1700             :                 state,
    1701             :                 ev,
    1702             :                 ctx->db,
    1703             :                 key,
    1704             :                 g_lock_dump_fn,
    1705             :                 state,
    1706           0 :                 &state->req_state);
    1707           0 :         if (tevent_req_nomem(subreq, req)) {
    1708           0 :                 return tevent_req_post(req, ev);
    1709             :         }
    1710           0 :         tevent_req_set_callback(subreq, g_lock_dump_done, req);
    1711           0 :         return req;
    1712             : }
    1713             : 
    1714           0 : static void g_lock_dump_done(struct tevent_req *subreq)
    1715             : {
    1716           0 :         struct tevent_req *req = tevent_req_callback_data(
    1717             :                 subreq, struct tevent_req);
    1718           0 :         struct g_lock_dump_state *state = tevent_req_data(
    1719             :                 req, struct g_lock_dump_state);
    1720           0 :         NTSTATUS status;
    1721             : 
    1722           0 :         status = dbwrap_parse_record_recv(subreq);
    1723           0 :         TALLOC_FREE(subreq);
    1724           0 :         if (tevent_req_nterror(req, status) ||
    1725           0 :             tevent_req_nterror(req, state->status)) {
    1726           0 :                 return;
    1727             :         }
    1728           0 :         tevent_req_done(req);
    1729             : }
    1730             : 
    1731           0 : NTSTATUS g_lock_dump_recv(struct tevent_req *req)
    1732             : {
    1733           0 :         return tevent_req_simple_recv_ntstatus(req);
    1734             : }
    1735             : 
    1736      193157 : int g_lock_seqnum(struct g_lock_ctx *ctx)
    1737             : {
    1738      193157 :         return dbwrap_get_seqnum(ctx->db);
    1739             : }
    1740             : 
    1741             : struct g_lock_watch_data_state {
    1742             :         struct tevent_context *ev;
    1743             :         struct g_lock_ctx *ctx;
    1744             :         TDB_DATA key;
    1745             :         struct server_id blocker;
    1746             :         bool blockerdead;
    1747             :         uint64_t unique_lock_epoch;
    1748             :         uint64_t unique_data_epoch;
    1749             :         uint64_t watch_instance;
    1750             :         NTSTATUS status;
    1751             : };
    1752             : 
    1753             : static void g_lock_watch_data_done(struct tevent_req *subreq);
    1754             : 
    1755         653 : static void g_lock_watch_data_send_fn(
    1756             :         struct db_record *rec,
    1757             :         TDB_DATA value,
    1758             :         void *private_data)
    1759             : {
    1760         653 :         struct tevent_req *req = talloc_get_type_abort(
    1761             :                 private_data, struct tevent_req);
    1762         653 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1763             :                 req, struct g_lock_watch_data_state);
    1764         653 :         struct tevent_req *subreq = NULL;
    1765           3 :         struct g_lock lck;
    1766           3 :         bool ok;
    1767             : 
    1768         653 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1769         653 :         if (!ok) {
    1770           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1771           0 :                 return;
    1772             :         }
    1773         653 :         state->unique_lock_epoch = lck.unique_lock_epoch;
    1774         653 :         state->unique_data_epoch = lck.unique_data_epoch;
    1775             : 
    1776         653 :         DBG_DEBUG("state->unique_data_epoch=%"PRIu64"\n", state->unique_data_epoch);
    1777             : 
    1778         653 :         subreq = dbwrap_watched_watch_send(
    1779             :                 state, state->ev, rec, 0, state->blocker);
    1780         653 :         if (subreq == NULL) {
    1781           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1782           0 :                 return;
    1783             :         }
    1784         653 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1785             : 
    1786         653 :         state->status = NT_STATUS_EVENT_PENDING;
    1787             : }
    1788             : 
    1789         653 : struct tevent_req *g_lock_watch_data_send(
    1790             :         TALLOC_CTX *mem_ctx,
    1791             :         struct tevent_context *ev,
    1792             :         struct g_lock_ctx *ctx,
    1793             :         TDB_DATA key,
    1794             :         struct server_id blocker)
    1795             : {
    1796         653 :         struct tevent_req *req = NULL;
    1797         653 :         struct g_lock_watch_data_state *state = NULL;
    1798           3 :         NTSTATUS status;
    1799             : 
    1800         653 :         SMB_ASSERT(!ctx->busy);
    1801             : 
    1802         653 :         req = tevent_req_create(
    1803             :                 mem_ctx, &state, struct g_lock_watch_data_state);
    1804         653 :         if (req == NULL) {
    1805           0 :                 return NULL;
    1806             :         }
    1807         653 :         state->ev = ev;
    1808         653 :         state->ctx = ctx;
    1809         653 :         state->blocker = blocker;
    1810             : 
    1811         653 :         state->key = tdb_data_talloc_copy(state, key);
    1812         653 :         if (tevent_req_nomem(state->key.dptr, req)) {
    1813           0 :                 return tevent_req_post(req, ev);
    1814             :         }
    1815             : 
    1816         653 :         status = dbwrap_do_locked(
    1817             :                 ctx->db, key, g_lock_watch_data_send_fn, req);
    1818         653 :         if (tevent_req_nterror(req, status)) {
    1819           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1820           0 :                 return tevent_req_post(req, ev);
    1821             :         }
    1822             : 
    1823         653 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1824         650 :                 return req;
    1825             :         }
    1826           0 :         if (tevent_req_nterror(req, state->status)) {
    1827           0 :                 return tevent_req_post(req, ev);
    1828             :         }
    1829           0 :         tevent_req_done(req);
    1830           0 :         return tevent_req_post(req, ev);
    1831             : }
    1832             : 
    1833        1003 : static void g_lock_watch_data_done_fn(
    1834             :         struct db_record *rec,
    1835             :         TDB_DATA value,
    1836             :         void *private_data)
    1837             : {
    1838        1003 :         struct tevent_req *req = talloc_get_type_abort(
    1839             :                 private_data, struct tevent_req);
    1840        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1841             :                 req, struct g_lock_watch_data_state);
    1842        1003 :         struct tevent_req *subreq = NULL;
    1843           3 :         struct g_lock lck;
    1844           3 :         bool ok;
    1845             : 
    1846        1003 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1847        1003 :         if (!ok) {
    1848           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1849           0 :                 state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
    1850           0 :                 return;
    1851             :         }
    1852             : 
    1853        1003 :         if (lck.unique_data_epoch != state->unique_data_epoch) {
    1854          87 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1855          87 :                 DBG_DEBUG("lck.unique_data_epoch=%"PRIu64", "
    1856             :                           "state->unique_data_epoch=%"PRIu64"\n",
    1857             :                           lck.unique_data_epoch,
    1858             :                           state->unique_data_epoch);
    1859          87 :                 state->status = NT_STATUS_OK;
    1860          87 :                 return;
    1861             :         }
    1862             : 
    1863             :         /*
    1864             :          * The lock epoch changed, so we better
    1865             :          * remove ourself from the waiter list
    1866             :          * (most likely the first position)
    1867             :          * and re-add us at the end of the list.
    1868             :          *
    1869             :          * This gives other lock waiters a change
    1870             :          * to make progress.
    1871             :          *
    1872             :          * Otherwise we'll keep our waiter instance alive,
    1873             :          * keep waiting (most likely at first position).
    1874             :          */
    1875         916 :         if (lck.unique_lock_epoch != state->unique_lock_epoch) {
    1876         830 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1877         830 :                 state->watch_instance = dbwrap_watched_watch_add_instance(rec);
    1878         830 :                 state->unique_lock_epoch = lck.unique_lock_epoch;
    1879             :         }
    1880             : 
    1881         916 :         subreq = dbwrap_watched_watch_send(
    1882             :                 state, state->ev, rec, state->watch_instance, state->blocker);
    1883         916 :         if (subreq == NULL) {
    1884           0 :                 dbwrap_watched_watch_remove_instance(rec, state->watch_instance);
    1885           0 :                 state->status = NT_STATUS_NO_MEMORY;
    1886           0 :                 return;
    1887             :         }
    1888         916 :         tevent_req_set_callback(subreq, g_lock_watch_data_done, req);
    1889             : 
    1890         916 :         state->status = NT_STATUS_EVENT_PENDING;
    1891             : }
    1892             : 
    1893        1003 : static void g_lock_watch_data_done(struct tevent_req *subreq)
    1894             : {
    1895        1003 :         struct tevent_req *req = tevent_req_callback_data(
    1896             :                 subreq, struct tevent_req);
    1897        1003 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1898             :                 req, struct g_lock_watch_data_state);
    1899           3 :         NTSTATUS status;
    1900        1003 :         uint64_t instance = 0;
    1901             : 
    1902        1003 :         status = dbwrap_watched_watch_recv(
    1903             :                 subreq, &instance, &state->blockerdead, &state->blocker);
    1904        1003 :         TALLOC_FREE(subreq);
    1905        1003 :         if (tevent_req_nterror(req, status)) {
    1906           0 :                 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
    1907             :                           nt_errstr(status));
    1908         916 :                 return;
    1909             :         }
    1910             : 
    1911        1003 :         state->watch_instance = instance;
    1912             : 
    1913        1003 :         status = dbwrap_do_locked(
    1914        1003 :                 state->ctx->db, state->key, g_lock_watch_data_done_fn, req);
    1915        1003 :         if (tevent_req_nterror(req, status)) {
    1916           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
    1917           0 :                 return;
    1918             :         }
    1919        1003 :         if (NT_STATUS_EQUAL(state->status, NT_STATUS_EVENT_PENDING)) {
    1920         914 :                 return;
    1921             :         }
    1922          87 :         if (tevent_req_nterror(req, state->status)) {
    1923           0 :                 return;
    1924             :         }
    1925          87 :         tevent_req_done(req);
    1926             : }
    1927             : 
    1928          86 : NTSTATUS g_lock_watch_data_recv(
    1929             :         struct tevent_req *req,
    1930             :         bool *blockerdead,
    1931             :         struct server_id *blocker)
    1932             : {
    1933          86 :         struct g_lock_watch_data_state *state = tevent_req_data(
    1934             :                 req, struct g_lock_watch_data_state);
    1935           0 :         NTSTATUS status;
    1936             : 
    1937          86 :         if (tevent_req_is_nterror(req, &status)) {
    1938           0 :                 return status;
    1939             :         }
    1940          86 :         if (blockerdead != NULL) {
    1941          86 :                 *blockerdead = state->blockerdead;
    1942             :         }
    1943          86 :         if (blocker != NULL) {
    1944          86 :                 *blocker = state->blocker;
    1945             :         }
    1946             : 
    1947          86 :         return NT_STATUS_OK;
    1948             : }
    1949             : 
    1950        2328 : static void g_lock_wake_watchers_fn(
    1951             :         struct db_record *rec,
    1952             :         TDB_DATA value,
    1953             :         void *private_data)
    1954             : {
    1955        2328 :         struct g_lock lck = { .exclusive.pid = 0 };
    1956          10 :         NTSTATUS status;
    1957          10 :         bool ok;
    1958             : 
    1959        2328 :         ok = g_lock_parse(value.dptr, value.dsize, &lck);
    1960        2328 :         if (!ok) {
    1961           0 :                 DBG_WARNING("g_lock_parse failed\n");
    1962           0 :                 return;
    1963             :         }
    1964             : 
    1965        2328 :         lck.unique_data_epoch = generate_unique_u64(lck.unique_data_epoch);
    1966             : 
    1967        2328 :         status = g_lock_store(rec, &lck, NULL, NULL, 0);
    1968        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1969           0 :                 DBG_WARNING("g_lock_store failed: %s\n", nt_errstr(status));
    1970           0 :                 return;
    1971             :         }
    1972             : }
    1973             : 
    1974        2328 : void g_lock_wake_watchers(struct g_lock_ctx *ctx, TDB_DATA key)
    1975             : {
    1976          10 :         NTSTATUS status;
    1977             : 
    1978        2328 :         SMB_ASSERT(!ctx->busy);
    1979             : 
    1980        2328 :         status = dbwrap_do_locked(ctx->db, key, g_lock_wake_watchers_fn, NULL);
    1981        2328 :         if (!NT_STATUS_IS_OK(status)) {
    1982           0 :                 DBG_DEBUG("dbwrap_do_locked returned %s\n",
    1983             :                           nt_errstr(status));
    1984             :         }
    1985        2328 : }

Generated by: LCOV version 1.14