Line data Source code
1 : /*
2 : * RPC host
3 : *
4 : * Implements samba-dcerpcd service.
5 : *
6 : * This program is free software; you can redistribute it and/or modify
7 : * it under the terms of the GNU General Public License as published by
8 : * the Free Software Foundation; either version 3 of the License, or
9 : * (at your option) any later version.
10 : *
11 : * This program is distributed in the hope that it will be useful,
12 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : * GNU General Public License for more details.
15 : *
16 : * You should have received a copy of the GNU General Public License
17 : * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 : */
19 :
20 : /*
21 : * This binary has two usage modes:
22 : *
23 : * In the normal case when invoked from smbd or winbind it is given a
24 : * directory to scan via --libexec-rpcds and will invoke on demand any
25 : * binaries it finds there starting with rpcd_ when a named pipe
26 : * connection is requested.
27 : *
28 : * In the second mode it can be started explicitly from system startup
29 : * scripts.
30 : *
31 : * When Samba is set up as an Active Directory Domain Controller the
32 : * normal samba binary overrides and provides DCERPC services, whilst
33 : * allowing samba-dcerpcd to provide the services that smbd used to
34 : * provide in that set-up, such as SRVSVC.
35 : *
36 : * The second mode can also be useful for use outside of the Samba framework,
37 : * for example, use with the Linux kernel SMB2 server ksmbd. In this mode
38 : * it behaves like inetd and listens on sockets on behalf of RPC server
39 : * implementations.
40 : */
41 :
42 : #include "replace.h"
43 : #include <fnmatch.h>
44 : #include "lib/cmdline/cmdline.h"
45 : #include "lib/cmdline/closefrom_except.h"
46 : #include "source3/include/includes.h"
47 : #include "source3/include/auth.h"
48 : #include "rpc_sock_helper.h"
49 : #include "messages.h"
50 : #include "lib/util_file.h"
51 : #include "lib/util/tevent_unix.h"
52 : #include "lib/util/tevent_ntstatus.h"
53 : #include "lib/util/smb_strtox.h"
54 : #include "lib/util/debug.h"
55 : #include "lib/util/server_id.h"
56 : #include "lib/util/util_tdb.h"
57 : #include "lib/util/util_file.h"
58 : #include "lib/tdb_wrap/tdb_wrap.h"
59 : #include "lib/async_req/async_sock.h"
60 : #include "librpc/rpc/dcerpc_util.h"
61 : #include "lib/tsocket/tsocket.h"
62 : #include "libcli/named_pipe_auth/npa_tstream.h"
63 : #include "librpc/gen_ndr/ndr_rpc_host.h"
64 : #include "source3/param/loadparm.h"
65 : #include "source3/lib/global_contexts.h"
66 : #include "lib/util/strv.h"
67 : #include "lib/util/pidfile.h"
68 : #include "source3/rpc_client/cli_pipe.h"
69 : #include "librpc/gen_ndr/ndr_epmapper.h"
70 : #include "librpc/gen_ndr/ndr_epmapper_c.h"
71 : #include "nsswitch/winbind_client.h"
72 : #include "libcli/security/dom_sid.h"
73 : #include "libcli/security/security_token.h"
74 :
75 : extern bool override_logfile;
76 :
77 : struct rpc_server;
78 : struct rpc_work_process;
79 :
80 : /*
81 : * samba-dcerpcd state to keep track of rpcd_* servers.
82 : */
83 : struct rpc_host {
84 : struct messaging_context *msg_ctx;
85 : struct rpc_server **servers;
86 : struct tdb_wrap *epmdb;
87 :
88 : int worker_stdin[2];
89 :
90 : bool np_helper;
91 :
92 : /*
93 : * If we're started with --np-helper but nobody contacts us,
94 : * we need to exit after a while. This will be deleted once
95 : * the first real client connects and our self-exit mechanism
96 : * when we don't have any worker processes left kicks in.
97 : */
98 : struct tevent_timer *np_helper_shutdown;
99 : };
100 :
101 : /*
102 : * Map a RPC interface to a name. Used when filling the endpoint
103 : * mapper database
104 : */
105 : struct rpc_host_iface_name {
106 : struct ndr_syntax_id iface;
107 : char *name;
108 : };
109 :
110 : /*
111 : * rpc_host representation for listening sockets. ncacn_ip_tcp might
112 : * listen on multiple explicit IPs, all with the same port.
113 : */
114 : struct rpc_host_endpoint {
115 : struct rpc_server *server;
116 : struct dcerpc_binding *binding;
117 : struct ndr_syntax_id *interfaces;
118 : int *fds;
119 : size_t num_fds;
120 : };
121 :
122 : /*
123 : * Staging area until we sent the socket plus bind to the helper
124 : */
125 : struct rpc_host_pending_client {
126 : struct rpc_host_pending_client *prev, *next;
127 :
128 : /*
129 : * Pointer for the destructor to remove us from the list of
130 : * pending clients
131 : */
132 : struct rpc_server *server;
133 :
134 : /*
135 : * Waiter for client exit before a helper accepted the request
136 : */
137 : struct tevent_req *hangup_wait;
138 :
139 : /*
140 : * Info to pick the worker
141 : */
142 : struct ncacn_packet *bind_pkt;
143 :
144 : /*
145 : * This is what we send down to the worker
146 : */
147 : int sock;
148 : struct rpc_host_client *client;
149 : };
150 :
151 : /*
152 : * Representation of one worker process. For each rpcd_* executable
153 : * there will be more of than one of these.
154 : */
155 : struct rpc_work_process {
156 : pid_t pid;
157 :
158 : /*
159 : * !available means:
160 : *
161 : * Worker forked but did not send its initial status yet (not
162 : * yet initialized)
163 : *
164 : * Worker died, but we did not receive SIGCHLD yet. We noticed
165 : * it because we couldn't send it a message.
166 : */
167 : bool available;
168 :
169 : /*
170 : * Incremented by us when sending a client, decremented by
171 : * MSG_RPC_HOST_WORKER_STATUS sent by workers whenever a
172 : * client exits.
173 : */
174 : uint32_t num_associations;
175 : uint32_t num_connections;
176 :
177 : /*
178 : * Send SHUTDOWN to an idle child after a while
179 : */
180 : struct tevent_timer *exit_timer;
181 : };
182 :
183 : /*
184 : * State for a set of running instances of an rpcd_* server executable
185 : */
186 : struct rpc_server {
187 : struct rpc_host *host;
188 : /*
189 : * Index into the rpc_host_state->servers array
190 : */
191 : uint32_t server_index;
192 :
193 : const char *rpc_server_exe;
194 :
195 : struct rpc_host_endpoint **endpoints;
196 : struct rpc_host_iface_name *iface_names;
197 :
198 : size_t max_workers;
199 : size_t idle_seconds;
200 :
201 : /*
202 : * "workers" can be larger than "max_workers": Internal
203 : * connections require an idle worker to avoid deadlocks
204 : * between RPC servers: netlogon requires samr, everybody
205 : * requires winreg. And if a deep call in netlogon asks for a
206 : * samr connection, this must never end up in the same
207 : * process. named_pipe_auth_req_info8->need_idle_server is set
208 : * in those cases.
209 : */
210 : struct rpc_work_process *workers;
211 :
212 : struct rpc_host_pending_client *pending_clients;
213 : };
214 :
215 : struct rpc_server_get_endpoints_state {
216 : char **argl;
217 : char *ncalrpc_endpoint;
218 : enum dcerpc_transport_t only_transport;
219 :
220 : struct rpc_host_iface_name *iface_names;
221 : struct rpc_host_endpoint **endpoints;
222 :
223 : unsigned long num_workers;
224 : unsigned long idle_seconds;
225 : };
226 :
227 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq);
228 :
229 : /**
230 : * @brief Query interfaces from an rpcd helper
231 : *
232 : * Spawn a rpcd helper, ask it for the interfaces it serves via
233 : * --list-interfaces, parse the output
234 : *
235 : * @param[in] mem_ctx Memory context for the tevent_req
236 : * @param[in] ev Event context to run this on
237 : * @param[in] rpc_server_exe Binary to ask with --list-interfaces
238 : * @param[in] only_transport Filter out anything but this
239 : * @return The tevent_req representing this process
240 : */
241 :
242 768 : static struct tevent_req *rpc_server_get_endpoints_send(
243 : TALLOC_CTX *mem_ctx,
244 : struct tevent_context *ev,
245 : const char *rpc_server_exe,
246 : enum dcerpc_transport_t only_transport)
247 : {
248 768 : struct tevent_req *req = NULL, *subreq = NULL;
249 768 : struct rpc_server_get_endpoints_state *state = NULL;
250 768 : const char *progname = NULL;
251 :
252 768 : req = tevent_req_create(
253 : mem_ctx, &state, struct rpc_server_get_endpoints_state);
254 768 : if (req == NULL) {
255 0 : return NULL;
256 : }
257 768 : state->only_transport = only_transport;
258 :
259 768 : progname = strrchr(rpc_server_exe, '/');
260 768 : if (progname != NULL) {
261 768 : progname += 1;
262 : } else {
263 0 : progname = rpc_server_exe;
264 : }
265 :
266 768 : state->ncalrpc_endpoint = talloc_strdup(state, progname);
267 768 : if (tevent_req_nomem(state->ncalrpc_endpoint, req)) {
268 0 : return tevent_req_post(req, ev);
269 : }
270 :
271 768 : state->argl = talloc_array(state, char *, 4);
272 768 : if (tevent_req_nomem(state->argl, req)) {
273 0 : return tevent_req_post(req, ev);
274 : }
275 :
276 768 : state->argl = str_list_make_empty(state);
277 768 : str_list_add_printf(&state->argl, "%s", rpc_server_exe);
278 768 : str_list_add_printf(&state->argl, "--list-interfaces");
279 768 : str_list_add_printf(
280 768 : &state->argl, "--configfile=%s", get_dyn_CONFIGFILE());
281 :
282 768 : if (tevent_req_nomem(state->argl, req)) {
283 0 : return tevent_req_post(req, ev);
284 : }
285 :
286 768 : subreq = file_ploadv_send(state, ev, state->argl, 65536);
287 768 : if (tevent_req_nomem(subreq, req)) {
288 0 : return tevent_req_post(req, ev);
289 : }
290 768 : tevent_req_set_callback(subreq, rpc_server_get_endpoints_done, req);
291 768 : return req;
292 : }
293 :
294 : /*
295 : * Parse a line of format
296 : *
297 : * 338cd001-2244-31f1-aaaa-900038001003/0x00000001 winreg
298 : *
299 : * and add it to the "piface_names" array.
300 : */
301 :
302 1427 : static struct rpc_host_iface_name *rpc_exe_parse_iface_line(
303 : TALLOC_CTX *mem_ctx,
304 : struct rpc_host_iface_name **piface_names,
305 : const char *line)
306 : {
307 1427 : struct rpc_host_iface_name *iface_names = *piface_names;
308 1427 : struct rpc_host_iface_name *tmp = NULL, *result = NULL;
309 1427 : size_t i, num_ifaces = talloc_array_length(iface_names);
310 : struct ndr_syntax_id iface;
311 1427 : char *name = NULL;
312 : bool ok;
313 :
314 1427 : ok = ndr_syntax_id_from_string(line, &iface);
315 1427 : if (!ok) {
316 0 : DBG_WARNING("ndr_syntax_id_from_string() failed for: [%s]\n",
317 : line);
318 0 : return NULL;
319 : }
320 :
321 1427 : name = strchr(line, ' ');
322 1427 : if (name == NULL) {
323 0 : return NULL;
324 : }
325 1427 : name += 1;
326 :
327 3620 : for (i=0; i<num_ifaces; i++) {
328 2193 : result = &iface_names[i];
329 :
330 2193 : if (ndr_syntax_id_equal(&result->iface, &iface)) {
331 0 : return result;
332 : }
333 : }
334 :
335 1427 : if (num_ifaces + 1 < num_ifaces) {
336 0 : return NULL;
337 : }
338 :
339 1427 : name = talloc_strdup(mem_ctx, name);
340 1427 : if (name == NULL) {
341 0 : return NULL;
342 : }
343 :
344 1427 : tmp = talloc_realloc(
345 : mem_ctx,
346 : iface_names,
347 : struct rpc_host_iface_name,
348 : num_ifaces + 1);
349 1427 : if (tmp == NULL) {
350 0 : TALLOC_FREE(name);
351 0 : return NULL;
352 : }
353 1427 : iface_names = tmp;
354 :
355 1427 : result = &iface_names[num_ifaces];
356 :
357 1427 : *result = (struct rpc_host_iface_name) {
358 : .iface = iface,
359 1427 : .name = talloc_move(iface_names, &name),
360 : };
361 :
362 1427 : *piface_names = iface_names;
363 :
364 1427 : return result;
365 : }
366 :
367 1427 : static struct rpc_host_iface_name *rpc_host_iface_names_find(
368 : struct rpc_host_iface_name *iface_names,
369 : const struct ndr_syntax_id *iface)
370 : {
371 1427 : size_t i, num_iface_names = talloc_array_length(iface_names);
372 :
373 3620 : for (i=0; i<num_iface_names; i++) {
374 3620 : struct rpc_host_iface_name *iface_name = &iface_names[i];
375 :
376 3620 : if (ndr_syntax_id_equal(iface, &iface_name->iface)) {
377 1427 : return iface_name;
378 : }
379 : }
380 :
381 0 : return NULL;
382 : }
383 :
384 4211 : static bool dcerpc_binding_same_endpoint(
385 : const struct dcerpc_binding *b1, const struct dcerpc_binding *b2)
386 : {
387 4211 : enum dcerpc_transport_t t1 = dcerpc_binding_get_transport(b1);
388 4211 : enum dcerpc_transport_t t2 = dcerpc_binding_get_transport(b2);
389 4211 : const char *e1 = NULL, *e2 = NULL;
390 : int cmp;
391 :
392 4211 : if (t1 != t2) {
393 966 : return false;
394 : }
395 :
396 3245 : e1 = dcerpc_binding_get_string_option(b1, "endpoint");
397 3245 : e2 = dcerpc_binding_get_string_option(b2, "endpoint");
398 :
399 3245 : if ((e1 == NULL) && (e2 == NULL)) {
400 74 : return true;
401 : }
402 3171 : if ((e1 == NULL) || (e2 == NULL)) {
403 0 : return false;
404 : }
405 3171 : cmp = strcmp(e1, e2);
406 3171 : return (cmp == 0);
407 : }
408 :
409 : /**
410 : * @brief Filter whether we want to serve an endpoint
411 : *
412 : * samba-dcerpcd might want to serve all endpoints a rpcd reported to
413 : * us via --list-interfaces.
414 : *
415 : * In member mode, we only serve named pipes. Indicated by NCACN_NP
416 : * passed in via "only_transport".
417 : *
418 : * @param[in] binding Which binding is in question?
419 : * @param[in] only_transport Exclusive transport to serve
420 : * @return Do we want to serve "binding" from samba-dcerpcd?
421 : */
422 :
423 3591 : static bool rpc_host_serve_endpoint(
424 : struct dcerpc_binding *binding,
425 : enum dcerpc_transport_t only_transport)
426 : {
427 : enum dcerpc_transport_t transport =
428 3591 : dcerpc_binding_get_transport(binding);
429 :
430 3591 : if (only_transport == NCA_UNKNOWN) {
431 : /* no filter around */
432 686 : return true;
433 : }
434 :
435 2905 : if (transport != only_transport) {
436 : /* filter out */
437 1540 : return false;
438 : }
439 :
440 1365 : return true;
441 : }
442 :
443 3591 : static struct rpc_host_endpoint *rpc_host_endpoint_find(
444 : struct rpc_server_get_endpoints_state *state,
445 : const char *binding_string)
446 : {
447 3591 : size_t i, num_endpoints = talloc_array_length(state->endpoints);
448 3591 : struct rpc_host_endpoint **tmp = NULL, *ep = NULL;
449 : enum dcerpc_transport_t transport;
450 : NTSTATUS status;
451 : bool serve_this;
452 :
453 3591 : ep = talloc_zero(state, struct rpc_host_endpoint);
454 3591 : if (ep == NULL) {
455 0 : goto fail;
456 : }
457 :
458 3591 : status = dcerpc_parse_binding(ep, binding_string, &ep->binding);
459 3591 : if (!NT_STATUS_IS_OK(status)) {
460 0 : DBG_DEBUG("dcerpc_parse_binding(%s) failed: %s\n",
461 : binding_string,
462 : nt_errstr(status));
463 0 : goto fail;
464 : }
465 :
466 3591 : serve_this = rpc_host_serve_endpoint(
467 3591 : ep->binding, state->only_transport);
468 3591 : if (!serve_this) {
469 1540 : goto fail;
470 : }
471 :
472 2051 : transport = dcerpc_binding_get_transport(ep->binding);
473 :
474 2051 : if (transport == NCALRPC) {
475 202 : const char *ncalrpc_sock = dcerpc_binding_get_string_option(
476 202 : ep->binding, "endpoint");
477 :
478 202 : if (ncalrpc_sock == NULL) {
479 : /*
480 : * generic ncalrpc:, set program-specific
481 : * socket name. epmapper will redirect clients
482 : * properly.
483 : */
484 186 : status = dcerpc_binding_set_string_option(
485 186 : ep->binding,
486 : "endpoint",
487 186 : state->ncalrpc_endpoint);
488 186 : if (!NT_STATUS_IS_OK(status)) {
489 0 : DBG_DEBUG("dcerpc_binding_set_string_option "
490 : "failed: %s\n",
491 : nt_errstr(status));
492 0 : goto fail;
493 : }
494 : }
495 : }
496 :
497 5942 : for (i=0; i<num_endpoints; i++) {
498 :
499 4211 : bool ok = dcerpc_binding_same_endpoint(
500 4211 : ep->binding, state->endpoints[i]->binding);
501 :
502 4211 : if (ok) {
503 320 : TALLOC_FREE(ep);
504 320 : return state->endpoints[i];
505 : }
506 : }
507 :
508 1731 : if (num_endpoints + 1 < num_endpoints) {
509 0 : goto fail;
510 : }
511 :
512 1731 : tmp = talloc_realloc(
513 : state,
514 : state->endpoints,
515 : struct rpc_host_endpoint *,
516 : num_endpoints + 1);
517 1731 : if (tmp == NULL) {
518 0 : goto fail;
519 : }
520 1731 : state->endpoints = tmp;
521 1731 : state->endpoints[num_endpoints] = talloc_move(state->endpoints, &ep);
522 :
523 1731 : return state->endpoints[num_endpoints];
524 1540 : fail:
525 1540 : TALLOC_FREE(ep);
526 1540 : return NULL;
527 : }
528 :
529 2051 : static bool ndr_interfaces_add_unique(
530 : TALLOC_CTX *mem_ctx,
531 : struct ndr_syntax_id **pifaces,
532 : const struct ndr_syntax_id *iface)
533 : {
534 2051 : struct ndr_syntax_id *ifaces = *pifaces;
535 2051 : size_t i, num_ifaces = talloc_array_length(ifaces);
536 :
537 2507 : for (i=0; i<num_ifaces; i++) {
538 456 : if (ndr_syntax_id_equal(iface, &ifaces[i])) {
539 0 : return true;
540 : }
541 : }
542 :
543 2051 : if (num_ifaces + 1 < num_ifaces) {
544 0 : return false;
545 : }
546 2051 : ifaces = talloc_realloc(
547 : mem_ctx,
548 : ifaces,
549 : struct ndr_syntax_id,
550 : num_ifaces + 1);
551 2051 : if (ifaces == NULL) {
552 0 : return false;
553 : }
554 2051 : ifaces[num_ifaces] = *iface;
555 :
556 2051 : *pifaces = ifaces;
557 2051 : return true;
558 : }
559 :
560 : /*
561 : * Read the text reply from the rpcd_* process telling us what
562 : * endpoints it will serve when asked with --list-interfaces.
563 : */
564 768 : static void rpc_server_get_endpoints_done(struct tevent_req *subreq)
565 : {
566 768 : struct tevent_req *req = tevent_req_callback_data(
567 : subreq, struct tevent_req);
568 768 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
569 : req, struct rpc_server_get_endpoints_state);
570 768 : struct rpc_host_iface_name *iface = NULL;
571 768 : uint8_t *buf = NULL;
572 : size_t buflen;
573 768 : char **lines = NULL;
574 : int ret, i, num_lines;
575 :
576 768 : ret = file_ploadv_recv(subreq, state, &buf);
577 768 : TALLOC_FREE(subreq);
578 768 : if (tevent_req_error(req, ret)) {
579 0 : return;
580 : }
581 :
582 768 : buflen = talloc_get_size(buf);
583 768 : if (buflen == 0) {
584 0 : tevent_req_done(req);
585 0 : return;
586 : }
587 :
588 768 : lines = file_lines_parse((char *)buf, buflen, &num_lines, state);
589 768 : if (tevent_req_nomem(lines, req)) {
590 0 : return;
591 : }
592 :
593 768 : if (num_lines < 2) {
594 0 : DBG_DEBUG("Got %d lines, expected at least 2\n", num_lines);
595 0 : tevent_req_error(req, EINVAL);
596 0 : return;
597 : }
598 :
599 768 : state->num_workers = smb_strtoul(
600 : lines[0], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
601 768 : if (ret != 0) {
602 0 : DBG_DEBUG("Could not parse num_workers(%s): %s\n",
603 : lines[0],
604 : strerror(ret));
605 0 : tevent_req_error(req, ret);
606 0 : return;
607 : }
608 : /*
609 : * We need to limit the number of workers in order
610 : * to put the worker index into a 16-bit space,
611 : * in order to use a 16-bit association group space
612 : * per worker.
613 : */
614 768 : if (state->num_workers > 65536) {
615 0 : state->num_workers = 65536;
616 : }
617 :
618 1536 : state->idle_seconds = smb_strtoul(
619 768 : lines[1], NULL, 10, &ret, SMB_STR_FULL_STR_CONV);
620 768 : if (ret != 0) {
621 0 : DBG_DEBUG("Could not parse idle_seconds (%s): %s\n",
622 : lines[1],
623 : strerror(ret));
624 0 : tevent_req_error(req, ret);
625 0 : return;
626 : }
627 :
628 768 : DBG_DEBUG("num_workers=%lu, idle_seconds=%lu for %s\n",
629 : state->num_workers,
630 : state->idle_seconds,
631 : state->argl[0]);
632 :
633 5786 : for (i=2; i<num_lines; i++) {
634 5018 : char *line = lines[i];
635 5018 : struct rpc_host_endpoint *endpoint = NULL;
636 : bool ok;
637 :
638 5018 : if (line[0] != ' ') {
639 1427 : iface = rpc_exe_parse_iface_line(
640 : state, &state->iface_names, line);
641 1427 : if (iface == NULL) {
642 0 : DBG_WARNING(
643 : "rpc_exe_parse_iface_line failed "
644 : "for: [%s] from %s\n",
645 : line,
646 : state->argl[0]);
647 0 : tevent_req_oom(req);
648 0 : return;
649 : }
650 1427 : continue;
651 : }
652 :
653 3591 : if (iface == NULL) {
654 0 : DBG_DEBUG("Interface GUID line missing\n");
655 0 : tevent_req_error(req, EINVAL);
656 0 : return;
657 : }
658 :
659 3591 : endpoint = rpc_host_endpoint_find(state, line+1);
660 3591 : if (endpoint == NULL) {
661 1540 : DBG_DEBUG("rpc_host_endpoint_find for %s failed\n",
662 : line+1);
663 1540 : continue;
664 : }
665 :
666 2051 : ok = ndr_interfaces_add_unique(
667 : endpoint,
668 : &endpoint->interfaces,
669 2051 : &iface->iface);
670 2051 : if (!ok) {
671 0 : DBG_DEBUG("ndr_interfaces_add_unique failed\n");
672 0 : tevent_req_oom(req);
673 0 : return;
674 : }
675 : }
676 :
677 768 : tevent_req_done(req);
678 : }
679 :
680 : /**
681 : * @brief Receive output from --list-interfaces
682 : *
683 : * @param[in] req The async req that just finished
684 : * @param[in] mem_ctx Where to put the output on
685 : * @param[out] endpoints The endpoints to be listened on
686 : * @param[out] iface_names Annotation for epm_Lookup's epm_entry_t
687 : * @return 0/errno
688 : */
689 768 : static int rpc_server_get_endpoints_recv(
690 : struct tevent_req *req,
691 : TALLOC_CTX *mem_ctx,
692 : struct rpc_host_endpoint ***endpoints,
693 : struct rpc_host_iface_name **iface_names,
694 : size_t *num_workers,
695 : size_t *idle_seconds)
696 : {
697 768 : struct rpc_server_get_endpoints_state *state = tevent_req_data(
698 : req, struct rpc_server_get_endpoints_state);
699 : int err;
700 :
701 768 : if (tevent_req_is_unix_error(req, &err)) {
702 0 : tevent_req_received(req);
703 0 : return err;
704 : }
705 :
706 768 : *endpoints = talloc_move(mem_ctx, &state->endpoints);
707 768 : *iface_names = talloc_move(mem_ctx, &state->iface_names);
708 768 : *num_workers = state->num_workers;
709 768 : *idle_seconds = state->idle_seconds;
710 768 : tevent_req_received(req);
711 768 : return 0;
712 : }
713 :
714 : /*
715 : * For NCACN_NP we get the named pipe auth info from smbd, if a client
716 : * comes in via TCP or NCALPRC we need to invent it ourselves with
717 : * anonymous session info.
718 : */
719 :
720 936 : static NTSTATUS rpc_host_generate_npa_info8_from_sock(
721 : TALLOC_CTX *mem_ctx,
722 : enum dcerpc_transport_t transport,
723 : int sock,
724 : const struct samba_sockaddr *peer_addr,
725 : struct named_pipe_auth_req_info8 **pinfo8)
726 : {
727 936 : struct named_pipe_auth_req_info8 *info8 = NULL;
728 936 : struct samba_sockaddr local_addr = {
729 : .sa_socklen = sizeof(struct sockaddr_storage),
730 : };
731 936 : struct tsocket_address *taddr = NULL;
732 936 : char *remote_client_name = NULL;
733 936 : char *remote_client_addr = NULL;
734 936 : char *local_server_name = NULL;
735 936 : char *local_server_addr = NULL;
736 936 : char *(*tsocket_address_to_name_fn)(
737 : const struct tsocket_address *addr,
738 : TALLOC_CTX *mem_ctx) = NULL;
739 936 : NTSTATUS status = NT_STATUS_NO_MEMORY;
740 : int ret;
741 :
742 : /*
743 : * For NCACN_NP we get the npa info from smbd
744 : */
745 936 : SMB_ASSERT((transport == NCACN_IP_TCP) || (transport == NCALRPC));
746 :
747 936 : tsocket_address_to_name_fn = (transport == NCACN_IP_TCP) ?
748 936 : tsocket_address_inet_addr_string : tsocket_address_unix_path;
749 :
750 936 : info8 = talloc_zero(mem_ctx, struct named_pipe_auth_req_info8);
751 936 : if (info8 == NULL) {
752 0 : goto fail;
753 : }
754 936 : info8->session_info =
755 936 : talloc_zero(info8, struct auth_session_info_transport);
756 936 : if (info8->session_info == NULL) {
757 0 : goto fail;
758 : }
759 :
760 936 : status = make_session_info_anonymous(
761 936 : info8->session_info,
762 936 : &info8->session_info->session_info);
763 936 : if (!NT_STATUS_IS_OK(status)) {
764 0 : DBG_DEBUG("make_session_info_anonymous failed: %s\n",
765 : nt_errstr(status));
766 0 : goto fail;
767 : }
768 :
769 936 : ret = tsocket_address_bsd_from_samba_sockaddr(info8,
770 : peer_addr,
771 : &taddr);
772 936 : if (ret == -1) {
773 0 : status = map_nt_error_from_unix(errno);
774 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
775 : "%s\n",
776 : strerror(errno));
777 0 : goto fail;
778 : }
779 936 : remote_client_addr = tsocket_address_to_name_fn(taddr, info8);
780 936 : if (remote_client_addr == NULL) {
781 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
782 0 : goto nomem;
783 : }
784 936 : TALLOC_FREE(taddr);
785 :
786 936 : remote_client_name = talloc_strdup(info8, remote_client_addr);
787 936 : if (remote_client_name == NULL) {
788 0 : DBG_DEBUG("talloc_strdup failed\n");
789 0 : goto nomem;
790 : }
791 :
792 936 : if (transport == NCACN_IP_TCP) {
793 798 : bool ok = samba_sockaddr_get_port(peer_addr,
794 : &info8->remote_client_port);
795 798 : if (!ok) {
796 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
797 0 : status = NT_STATUS_INVALID_PARAMETER;
798 0 : goto fail;
799 : }
800 : }
801 :
802 936 : ret = getsockname(sock, &local_addr.u.sa, &local_addr.sa_socklen);
803 936 : if (ret == -1) {
804 0 : status = map_nt_error_from_unix(errno);
805 0 : DBG_DEBUG("getsockname failed: %s\n", strerror(errno));
806 0 : goto fail;
807 : }
808 :
809 936 : ret = tsocket_address_bsd_from_samba_sockaddr(info8,
810 : &local_addr,
811 : &taddr);
812 936 : if (ret == -1) {
813 0 : status = map_nt_error_from_unix(errno);
814 0 : DBG_DEBUG("tsocket_address_bsd_from_samba_sockaddr failed: "
815 : "%s\n",
816 : strerror(errno));
817 0 : goto fail;
818 : }
819 936 : local_server_addr = tsocket_address_to_name_fn(taddr, info8);
820 936 : if (local_server_addr == NULL) {
821 0 : DBG_DEBUG("tsocket_address_to_name_fn failed\n");
822 0 : goto nomem;
823 : }
824 936 : TALLOC_FREE(taddr);
825 :
826 936 : local_server_name = talloc_strdup(info8, local_server_addr);
827 936 : if (local_server_name == NULL) {
828 0 : DBG_DEBUG("talloc_strdup failed\n");
829 0 : goto nomem;
830 : }
831 :
832 936 : if (transport == NCACN_IP_TCP) {
833 798 : bool ok = samba_sockaddr_get_port(&local_addr,
834 : &info8->local_server_port);
835 798 : if (!ok) {
836 0 : DBG_DEBUG("samba_sockaddr_get_port failed\n");
837 0 : status = NT_STATUS_INVALID_PARAMETER;
838 0 : goto fail;
839 : }
840 : }
841 :
842 936 : if (transport == NCALRPC) {
843 : uid_t uid;
844 : gid_t gid;
845 :
846 138 : ret = getpeereid(sock, &uid, &gid);
847 138 : if (ret < 0) {
848 0 : status = map_nt_error_from_unix(errno);
849 0 : DBG_DEBUG("getpeereid failed: %s\n", strerror(errno));
850 0 : goto fail;
851 : }
852 :
853 138 : if (uid == sec_initial_uid()) {
854 :
855 : /*
856 : * Indicate "root" to gensec
857 : */
858 :
859 138 : TALLOC_FREE(remote_client_addr);
860 138 : TALLOC_FREE(remote_client_name);
861 :
862 138 : ret = tsocket_address_unix_from_path(
863 : info8,
864 : AS_SYSTEM_MAGIC_PATH_TOKEN,
865 : &taddr);
866 138 : if (ret == -1) {
867 0 : DBG_DEBUG("tsocket_address_unix_from_path "
868 : "failed\n");
869 0 : goto nomem;
870 : }
871 :
872 : remote_client_addr =
873 138 : tsocket_address_unix_path(taddr, info8);
874 138 : if (remote_client_addr == NULL) {
875 0 : DBG_DEBUG("tsocket_address_unix_path "
876 : "failed\n");
877 0 : goto nomem;
878 : }
879 : remote_client_name =
880 138 : talloc_strdup(info8, remote_client_addr);
881 138 : if (remote_client_name == NULL) {
882 0 : DBG_DEBUG("talloc_strdup failed\n");
883 0 : goto nomem;
884 : }
885 : }
886 : }
887 :
888 936 : info8->remote_client_addr = remote_client_addr;
889 936 : info8->remote_client_name = remote_client_name;
890 936 : info8->local_server_addr = local_server_addr;
891 936 : info8->local_server_name = local_server_name;
892 :
893 936 : *pinfo8 = info8;
894 936 : return NT_STATUS_OK;
895 :
896 0 : nomem:
897 0 : status = NT_STATUS_NO_MEMORY;
898 0 : fail:
899 0 : TALLOC_FREE(info8);
900 0 : return status;
901 : }
902 :
903 : struct rpc_host_bind_read_state {
904 : struct tevent_context *ev;
905 :
906 : int sock;
907 : struct tstream_context *plain;
908 : struct tstream_context *npa_stream;
909 :
910 : struct ncacn_packet *pkt;
911 : struct rpc_host_client *client;
912 : };
913 :
914 : static void rpc_host_bind_read_cleanup(
915 : struct tevent_req *req, enum tevent_req_state req_state);
916 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq);
917 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq);
918 :
919 : /*
920 : * Wait for a bind packet from a client.
921 : */
922 36200 : static struct tevent_req *rpc_host_bind_read_send(
923 : TALLOC_CTX *mem_ctx,
924 : struct tevent_context *ev,
925 : enum dcerpc_transport_t transport,
926 : int *psock,
927 : const struct samba_sockaddr *peer_addr)
928 : {
929 36200 : struct tevent_req *req = NULL, *subreq = NULL;
930 36200 : struct rpc_host_bind_read_state *state = NULL;
931 : int rc, sock_dup;
932 : NTSTATUS status;
933 :
934 36200 : req = tevent_req_create(
935 : mem_ctx, &state, struct rpc_host_bind_read_state);
936 36200 : if (req == NULL) {
937 0 : return NULL;
938 : }
939 36200 : state->ev = ev;
940 :
941 36200 : state->sock = *psock;
942 36200 : *psock = -1;
943 :
944 36200 : tevent_req_set_cleanup_fn(req, rpc_host_bind_read_cleanup);
945 :
946 36200 : state->client = talloc_zero(state, struct rpc_host_client);
947 36200 : if (tevent_req_nomem(state->client, req)) {
948 0 : return tevent_req_post(req, ev);
949 : }
950 :
951 : /*
952 : * Dup the socket to read the first RPC packet:
953 : * tstream_bsd_existing_socket() takes ownership with
954 : * autoclose, but we need to send "sock" down to our worker
955 : * process later.
956 : */
957 36200 : sock_dup = dup(state->sock);
958 36200 : if (sock_dup == -1) {
959 0 : tevent_req_error(req, errno);
960 0 : return tevent_req_post(req, ev);
961 : }
962 :
963 36200 : rc = tstream_bsd_existing_socket(state, sock_dup, &state->plain);
964 36200 : if (rc == -1) {
965 0 : DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n",
966 : strerror(errno));
967 0 : tevent_req_error(req, errno);
968 0 : close(sock_dup);
969 0 : return tevent_req_post(req, ev);
970 : }
971 : /* as server we want to fail early */
972 36200 : tstream_bsd_fail_readv_first_error(state->plain, true);
973 :
974 36200 : if (transport == NCACN_NP) {
975 35264 : subreq = tstream_npa_accept_existing_send(
976 : state,
977 : ev,
978 35264 : state->plain,
979 : FILE_TYPE_MESSAGE_MODE_PIPE,
980 : 0xff | 0x0400 | 0x0100,
981 : 4096);
982 35264 : if (tevent_req_nomem(subreq, req)) {
983 0 : return tevent_req_post(req, ev);
984 : }
985 35264 : tevent_req_set_callback(
986 : subreq, rpc_host_bind_read_got_npa, req);
987 35264 : return req;
988 : }
989 :
990 936 : status = rpc_host_generate_npa_info8_from_sock(
991 936 : state->client,
992 : transport,
993 936 : state->sock,
994 : peer_addr,
995 936 : &state->client->npa_info8);
996 936 : if (!NT_STATUS_IS_OK(status)) {
997 0 : tevent_req_oom(req);
998 0 : return tevent_req_post(req, ev);
999 : }
1000 :
1001 936 : subreq = dcerpc_read_ncacn_packet_send(state, ev, state->plain);
1002 936 : if (tevent_req_nomem(subreq, req)) {
1003 0 : return tevent_req_post(req, ev);
1004 : }
1005 936 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1006 936 : return req;
1007 : }
1008 :
1009 72400 : static void rpc_host_bind_read_cleanup(
1010 : struct tevent_req *req, enum tevent_req_state req_state)
1011 : {
1012 72400 : struct rpc_host_bind_read_state *state = tevent_req_data(
1013 : req, struct rpc_host_bind_read_state);
1014 :
1015 72400 : if ((req_state == TEVENT_REQ_RECEIVED) && (state->sock != -1)) {
1016 84 : close(state->sock);
1017 84 : state->sock = -1;
1018 : }
1019 72400 : }
1020 :
1021 35264 : static void rpc_host_bind_read_got_npa(struct tevent_req *subreq)
1022 : {
1023 35264 : struct tevent_req *req = tevent_req_callback_data(
1024 : subreq, struct tevent_req);
1025 35264 : struct rpc_host_bind_read_state *state = tevent_req_data(
1026 : req, struct rpc_host_bind_read_state);
1027 35264 : struct named_pipe_auth_req_info8 *info8 = NULL;
1028 : int ret, err;
1029 :
1030 35264 : ret = tstream_npa_accept_existing_recv(subreq,
1031 : &err,
1032 : state,
1033 : &state->npa_stream,
1034 : &info8,
1035 : NULL, /* transport */
1036 : NULL, /* remote_client_addr */
1037 : NULL, /* remote_client_name */
1038 : NULL, /* local_server_addr */
1039 : NULL, /* local_server_name */
1040 : NULL); /* session_info */
1041 35264 : if (ret == -1) {
1042 0 : tevent_req_error(req, err);
1043 0 : return;
1044 : }
1045 :
1046 35264 : state->client->npa_info8 = talloc_move(state->client, &info8);
1047 :
1048 35264 : subreq = dcerpc_read_ncacn_packet_send(
1049 : state, state->ev, state->npa_stream);
1050 35264 : if (tevent_req_nomem(subreq, req)) {
1051 0 : return;
1052 : }
1053 35264 : tevent_req_set_callback(subreq, rpc_host_bind_read_got_bind, req);
1054 : }
1055 :
1056 36200 : static void rpc_host_bind_read_got_bind(struct tevent_req *subreq)
1057 : {
1058 36200 : struct tevent_req *req = tevent_req_callback_data(
1059 : subreq, struct tevent_req);
1060 36200 : struct rpc_host_bind_read_state *state = tevent_req_data(
1061 : req, struct rpc_host_bind_read_state);
1062 36200 : struct ncacn_packet *pkt = NULL;
1063 : NTSTATUS status;
1064 :
1065 36200 : status = dcerpc_read_ncacn_packet_recv(
1066 : subreq,
1067 36200 : state->client,
1068 : &pkt,
1069 36200 : &state->client->bind_packet);
1070 36200 : TALLOC_FREE(subreq);
1071 36200 : if (!NT_STATUS_IS_OK(status)) {
1072 84 : DBG_DEBUG("dcerpc_read_ncacn_packet_recv failed: %s\n",
1073 : nt_errstr(status));
1074 84 : tevent_req_error(req, EINVAL); /* TODO */
1075 84 : return;
1076 : }
1077 36116 : state->pkt = talloc_move(state, &pkt);
1078 :
1079 36116 : tevent_req_done(req);
1080 : }
1081 :
1082 36200 : static int rpc_host_bind_read_recv(
1083 : struct tevent_req *req,
1084 : TALLOC_CTX *mem_ctx,
1085 : int *sock,
1086 : struct rpc_host_client **client,
1087 : struct ncacn_packet **bind_pkt)
1088 : {
1089 36200 : struct rpc_host_bind_read_state *state = tevent_req_data(
1090 : req, struct rpc_host_bind_read_state);
1091 : int err;
1092 :
1093 36200 : if (tevent_req_is_unix_error(req, &err)) {
1094 84 : tevent_req_received(req);
1095 84 : return err;
1096 : }
1097 :
1098 36116 : *sock = state->sock;
1099 36116 : state->sock = -1;
1100 :
1101 36116 : *client = talloc_move(mem_ctx, &state->client);
1102 36116 : *bind_pkt = talloc_move(mem_ctx, &state->pkt);
1103 36116 : tevent_req_received(req);
1104 36116 : return 0;
1105 : }
1106 :
1107 : /*
1108 : * Start the given rpcd_* binary.
1109 : */
1110 596 : static int rpc_host_exec_worker(struct rpc_server *server, size_t idx)
1111 : {
1112 596 : struct rpc_work_process *worker = &server->workers[idx];
1113 596 : char **argv = NULL;
1114 596 : int ret = ENOMEM;
1115 :
1116 596 : argv = str_list_make_empty(server);
1117 596 : str_list_add_printf(
1118 : &argv, "%s", server->rpc_server_exe);
1119 596 : str_list_add_printf(
1120 : &argv, "--configfile=%s", get_dyn_CONFIGFILE());
1121 596 : str_list_add_printf(
1122 : &argv, "--worker-group=%"PRIu32, server->server_index);
1123 596 : str_list_add_printf(
1124 : &argv, "--worker-index=%zu", idx);
1125 596 : str_list_add_printf(
1126 : &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV));
1127 596 : if (!is_default_dyn_LOGFILEBASE()) {
1128 481 : str_list_add_printf(
1129 : &argv, "--log-basename=%s", get_dyn_LOGFILEBASE());
1130 : }
1131 596 : if (argv == NULL) {
1132 0 : ret = ENOMEM;
1133 0 : goto fail;
1134 : }
1135 :
1136 596 : worker->pid = fork();
1137 1212 : if (worker->pid == -1) {
1138 0 : ret = errno;
1139 0 : goto fail;
1140 : }
1141 1212 : if (worker->pid == 0) {
1142 : /* Child. */
1143 616 : close(server->host->worker_stdin[1]);
1144 616 : ret = dup2(server->host->worker_stdin[0], 0);
1145 616 : if (ret != 0) {
1146 0 : exit(1);
1147 : }
1148 616 : execv(argv[0], argv);
1149 616 : _exit(1);
1150 : }
1151 :
1152 596 : DBG_DEBUG("Creating worker %s for index %zu: pid=%d\n",
1153 : server->rpc_server_exe,
1154 : idx,
1155 : (int)worker->pid);
1156 :
1157 596 : ret = 0;
1158 596 : fail:
1159 596 : TALLOC_FREE(argv);
1160 596 : return ret;
1161 : }
1162 :
1163 : /*
1164 : * Find an rpcd_* worker for an external client, respect server->max_workers
1165 : */
1166 5302 : static struct rpc_work_process *rpc_host_find_worker(struct rpc_server *server)
1167 : {
1168 5302 : struct rpc_work_process *worker = NULL;
1169 5302 : struct rpc_work_process *perfect_worker = NULL;
1170 5302 : struct rpc_work_process *best_worker = NULL;
1171 5302 : size_t empty_slot = SIZE_MAX;
1172 : size_t i;
1173 :
1174 13247 : for (i=0; i<server->max_workers; i++) {
1175 12687 : worker = &server->workers[i];
1176 :
1177 12687 : if (worker->pid == -1) {
1178 1028 : empty_slot = MIN(empty_slot, i);
1179 1028 : continue;
1180 : }
1181 11659 : if (!worker->available) {
1182 0 : continue;
1183 : }
1184 11659 : if (worker->num_associations == 0) {
1185 : /*
1186 : * We have an idle worker...
1187 : */
1188 4742 : perfect_worker = worker;
1189 4742 : break;
1190 : }
1191 6917 : if (best_worker == NULL) {
1192 : /*
1193 : * It's busy, but the best so far...
1194 : */
1195 2397 : best_worker = worker;
1196 2397 : continue;
1197 : }
1198 4520 : if (worker->num_associations < best_worker->num_associations) {
1199 : /*
1200 : * It's also busy, but has less association groups
1201 : * (logical clients)
1202 : */
1203 240 : best_worker = worker;
1204 240 : continue;
1205 : }
1206 4280 : if (worker->num_associations > best_worker->num_associations) {
1207 : /*
1208 : * It's not better
1209 : */
1210 90 : continue;
1211 : }
1212 : /*
1213 : * Ok, with the same number of association groups
1214 : * we pick the one with the lowest number of connections
1215 : */
1216 4190 : if (worker->num_connections < best_worker->num_connections) {
1217 0 : best_worker = worker;
1218 0 : continue;
1219 : }
1220 : }
1221 :
1222 5302 : if (perfect_worker != NULL) {
1223 4742 : return perfect_worker;
1224 : }
1225 :
1226 560 : if (empty_slot < SIZE_MAX) {
1227 258 : int ret = rpc_host_exec_worker(server, empty_slot);
1228 258 : if (ret != 0) {
1229 0 : DBG_WARNING("Could not fork worker: %s\n",
1230 : strerror(ret));
1231 : }
1232 258 : return NULL;
1233 : }
1234 :
1235 302 : if (best_worker != NULL) {
1236 302 : return best_worker;
1237 : }
1238 :
1239 0 : return NULL;
1240 : }
1241 :
1242 : /*
1243 : * Find an rpcd_* worker for an internal connection, possibly go beyond
1244 : * server->max_workers
1245 : */
1246 31384 : static struct rpc_work_process *rpc_host_find_idle_worker(
1247 : struct rpc_server *server)
1248 : {
1249 31384 : struct rpc_work_process *worker = NULL, *tmp = NULL;
1250 31384 : size_t i, num_workers = talloc_array_length(server->workers);
1251 31384 : size_t empty_slot = SIZE_MAX;
1252 : int ret;
1253 :
1254 34907 : for (i=server->max_workers; i<num_workers; i++) {
1255 34569 : worker = &server->workers[i];
1256 :
1257 34569 : if (worker->pid == -1) {
1258 279 : empty_slot = MIN(empty_slot, i);
1259 279 : continue;
1260 : }
1261 34290 : if (!worker->available) {
1262 564 : continue;
1263 : }
1264 33726 : if (worker->num_associations == 0) {
1265 31046 : return &server->workers[i];
1266 : }
1267 : }
1268 :
1269 338 : if (empty_slot < SIZE_MAX) {
1270 81 : ret = rpc_host_exec_worker(server, empty_slot);
1271 81 : if (ret != 0) {
1272 0 : DBG_WARNING("Could not fork worker: %s\n",
1273 : strerror(ret));
1274 : }
1275 81 : return NULL;
1276 : }
1277 :
1278 : /*
1279 : * All workers are busy. We need to expand the number of
1280 : * workers because we were asked for an idle worker.
1281 : */
1282 257 : if (num_workers >= UINT16_MAX) {
1283 : /*
1284 : * The worker index would not fit into 16-bits
1285 : */
1286 0 : return NULL;
1287 : }
1288 257 : tmp = talloc_realloc(
1289 : server,
1290 : server->workers,
1291 : struct rpc_work_process,
1292 : num_workers+1);
1293 257 : if (tmp == NULL) {
1294 0 : return NULL;
1295 : }
1296 257 : server->workers = tmp;
1297 :
1298 257 : server->workers[num_workers] = (struct rpc_work_process) { .pid=-1, };
1299 :
1300 257 : ret = rpc_host_exec_worker(server, num_workers);
1301 257 : if (ret != 0) {
1302 0 : DBG_WARNING("Could not exec worker: %s\n", strerror(ret));
1303 : }
1304 :
1305 257 : return NULL;
1306 : }
1307 :
1308 : /*
1309 : * Find an rpcd_* process to talk to. Start a new one if necessary.
1310 : */
1311 72817 : static void rpc_host_distribute_clients(struct rpc_server *server)
1312 : {
1313 72817 : struct rpc_work_process *worker = NULL;
1314 72817 : struct rpc_host_pending_client *pending_client = NULL;
1315 : uint32_t assoc_group_id;
1316 : DATA_BLOB blob;
1317 : struct iovec iov;
1318 : enum ndr_err_code ndr_err;
1319 : NTSTATUS status;
1320 72817 : const char *client_type = NULL;
1321 :
1322 72817 : again:
1323 72817 : pending_client = server->pending_clients;
1324 72817 : if (pending_client == NULL) {
1325 36105 : DBG_DEBUG("No pending clients\n");
1326 36701 : return;
1327 : }
1328 :
1329 36712 : assoc_group_id = pending_client->bind_pkt->u.bind.assoc_group_id;
1330 :
1331 36712 : if (assoc_group_id != 0) {
1332 26 : size_t num_workers = talloc_array_length(server->workers);
1333 26 : uint16_t worker_index = assoc_group_id >> 16;
1334 :
1335 26 : client_type = "associated";
1336 :
1337 26 : if (worker_index >= num_workers) {
1338 0 : DBG_DEBUG("Invalid assoc group id %"PRIu32"\n",
1339 : assoc_group_id);
1340 0 : goto done;
1341 : }
1342 26 : worker = &server->workers[worker_index];
1343 :
1344 26 : if ((worker->pid == -1) || !worker->available) {
1345 0 : DBG_DEBUG("Requested worker index %"PRIu16": "
1346 : "pid=%d, available=%d\n",
1347 : worker_index,
1348 : (int)worker->pid,
1349 : (int)worker->available);
1350 : /*
1351 : * Pick a random one for a proper bind nack
1352 : */
1353 0 : client_type = "associated+lost";
1354 0 : worker = rpc_host_find_worker(server);
1355 : }
1356 : } else {
1357 36686 : struct auth_session_info_transport *session_info =
1358 36686 : pending_client->client->npa_info8->session_info;
1359 36686 : uint32_t flags = 0;
1360 : bool found;
1361 :
1362 36686 : client_type = "new";
1363 :
1364 36686 : found = security_token_find_npa_flags(
1365 36686 : session_info->session_info->security_token,
1366 : &flags);
1367 :
1368 : /* fresh assoc group requested */
1369 36686 : if (found & (flags & SAMBA_NPA_FLAGS_NEED_IDLE)) {
1370 31384 : client_type = "new+exclusive";
1371 31384 : worker = rpc_host_find_idle_worker(server);
1372 : } else {
1373 5302 : client_type = "new";
1374 5302 : worker = rpc_host_find_worker(server);
1375 : }
1376 : }
1377 :
1378 36712 : if (worker == NULL) {
1379 596 : DBG_DEBUG("No worker found for %s client\n", client_type);
1380 596 : return;
1381 : }
1382 :
1383 36116 : DLIST_REMOVE(server->pending_clients, pending_client);
1384 :
1385 36116 : ndr_err = ndr_push_struct_blob(
1386 : &blob,
1387 : pending_client,
1388 36116 : pending_client->client,
1389 : (ndr_push_flags_fn_t)ndr_push_rpc_host_client);
1390 36116 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1391 0 : DBG_WARNING("ndr_push_rpc_host_client failed: %s\n",
1392 : ndr_errstr(ndr_err));
1393 0 : goto done;
1394 : }
1395 :
1396 36116 : DBG_INFO("Sending %s client %s to %d with "
1397 : "%"PRIu32" associations and %"PRIu32" connections\n",
1398 : client_type,
1399 : server->rpc_server_exe,
1400 : worker->pid,
1401 : worker->num_associations,
1402 : worker->num_connections);
1403 :
1404 36116 : iov = (struct iovec) {
1405 36116 : .iov_base = blob.data, .iov_len = blob.length,
1406 : };
1407 :
1408 72232 : status = messaging_send_iov(
1409 36116 : server->host->msg_ctx,
1410 : pid_to_procid(worker->pid),
1411 : MSG_RPC_HOST_NEW_CLIENT,
1412 : &iov,
1413 : 1,
1414 36116 : &pending_client->sock,
1415 : 1);
1416 36116 : if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
1417 0 : DBG_DEBUG("worker %d died, sigchld not yet received?\n",
1418 : worker->pid);
1419 0 : DLIST_ADD(server->pending_clients, pending_client);
1420 0 : worker->available = false;
1421 0 : goto again;
1422 : }
1423 36116 : if (!NT_STATUS_IS_OK(status)) {
1424 0 : DBG_DEBUG("messaging_send_iov failed: %s\n",
1425 : nt_errstr(status));
1426 0 : goto done;
1427 : }
1428 36116 : if (assoc_group_id == 0) {
1429 36090 : worker->num_associations += 1;
1430 : }
1431 36116 : worker->num_connections += 1;
1432 36116 : TALLOC_FREE(worker->exit_timer);
1433 :
1434 36116 : TALLOC_FREE(server->host->np_helper_shutdown);
1435 :
1436 36040 : done:
1437 36116 : TALLOC_FREE(pending_client);
1438 : }
1439 :
1440 36116 : static int rpc_host_pending_client_destructor(
1441 : struct rpc_host_pending_client *p)
1442 : {
1443 36116 : TALLOC_FREE(p->hangup_wait);
1444 36116 : if (p->sock != -1) {
1445 36116 : close(p->sock);
1446 36116 : p->sock = -1;
1447 : }
1448 36116 : DLIST_REMOVE(p->server->pending_clients, p);
1449 36116 : return 0;
1450 : }
1451 :
1452 : /*
1453 : * Exception condition handler before rpcd_* worker
1454 : * is handling the socket. Either the client exited or
1455 : * sent unexpected data after the initial bind.
1456 : */
1457 0 : static void rpc_host_client_exited(struct tevent_req *subreq)
1458 : {
1459 0 : struct rpc_host_pending_client *pending = tevent_req_callback_data(
1460 : subreq, struct rpc_host_pending_client);
1461 : bool ok;
1462 : int err;
1463 :
1464 0 : ok = wait_for_read_recv(subreq, &err);
1465 :
1466 0 : TALLOC_FREE(subreq);
1467 0 : pending->hangup_wait = NULL;
1468 :
1469 0 : if (ok) {
1470 0 : DBG_DEBUG("client on sock %d sent data\n", pending->sock);
1471 : } else {
1472 0 : DBG_DEBUG("client exited with %s\n", strerror(err));
1473 : }
1474 0 : TALLOC_FREE(pending);
1475 0 : }
1476 :
1477 : struct rpc_iface_binding_map {
1478 : struct ndr_syntax_id iface;
1479 : char *bindings;
1480 : };
1481 :
1482 1731 : static bool rpc_iface_binding_map_add_endpoint(
1483 : TALLOC_CTX *mem_ctx,
1484 : const struct rpc_host_endpoint *ep,
1485 : struct rpc_host_iface_name *iface_names,
1486 : struct rpc_iface_binding_map **pmaps)
1487 : {
1488 1731 : const struct ndr_syntax_id mgmt_iface = {
1489 : {0xafa8bd80,
1490 : 0x7d8a,
1491 : 0x11c9,
1492 : {0xbe,0xf4},
1493 : {0x08,0x00,0x2b,0x10,0x29,0x89}
1494 : },
1495 : 1.0};
1496 :
1497 1731 : struct rpc_iface_binding_map *maps = *pmaps;
1498 1731 : size_t i, num_ifaces = talloc_array_length(ep->interfaces);
1499 1731 : char *binding_string = NULL;
1500 1731 : bool ok = false;
1501 :
1502 1731 : binding_string = dcerpc_binding_string(mem_ctx, ep->binding);
1503 1731 : if (binding_string == NULL) {
1504 0 : return false;
1505 : }
1506 :
1507 3782 : for (i=0; i<num_ifaces; i++) {
1508 2051 : const struct ndr_syntax_id *iface = &ep->interfaces[i];
1509 2051 : size_t j, num_maps = talloc_array_length(maps);
1510 2051 : struct rpc_iface_binding_map *map = NULL;
1511 2051 : char *p = NULL;
1512 :
1513 2051 : if (ndr_syntax_id_equal(iface, &mgmt_iface)) {
1514 : /*
1515 : * mgmt is offered everywhere, don't put it
1516 : * into epmdb.tdb.
1517 : */
1518 0 : continue;
1519 : }
1520 :
1521 5022 : for (j=0; j<num_maps; j++) {
1522 3595 : map = &maps[j];
1523 3595 : if (ndr_syntax_id_equal(&map->iface, iface)) {
1524 624 : break;
1525 : }
1526 : }
1527 :
1528 2051 : if (j == num_maps) {
1529 1427 : struct rpc_iface_binding_map *tmp = NULL;
1530 1427 : struct rpc_host_iface_name *iface_name = NULL;
1531 :
1532 1427 : iface_name = rpc_host_iface_names_find(
1533 : iface_names, iface);
1534 1427 : if (iface_name == NULL) {
1535 0 : goto fail;
1536 : }
1537 :
1538 1427 : tmp = talloc_realloc(
1539 : mem_ctx,
1540 : maps,
1541 : struct rpc_iface_binding_map,
1542 : num_maps+1);
1543 1427 : if (tmp == NULL) {
1544 0 : goto fail;
1545 : }
1546 1427 : maps = tmp;
1547 :
1548 1427 : map = &maps[num_maps];
1549 1427 : *map = (struct rpc_iface_binding_map) {
1550 1427 : .iface = *iface,
1551 1427 : .bindings = talloc_move(
1552 : maps, &iface_name->name),
1553 : };
1554 : }
1555 :
1556 2051 : p = strv_find(map->bindings, binding_string);
1557 2051 : if (p == NULL) {
1558 2051 : int ret = strv_add(
1559 : maps, &map->bindings, binding_string);
1560 2051 : if (ret != 0) {
1561 0 : goto fail;
1562 : }
1563 : }
1564 : }
1565 :
1566 1731 : ok = true;
1567 1731 : fail:
1568 1731 : *pmaps = maps;
1569 1731 : return ok;
1570 : }
1571 :
1572 768 : static bool rpc_iface_binding_map_add_endpoints(
1573 : TALLOC_CTX *mem_ctx,
1574 : struct rpc_host_endpoint **endpoints,
1575 : struct rpc_host_iface_name *iface_names,
1576 : struct rpc_iface_binding_map **pbinding_maps)
1577 : {
1578 768 : size_t i, num_endpoints = talloc_array_length(endpoints);
1579 :
1580 2499 : for (i=0; i<num_endpoints; i++) {
1581 1731 : bool ok = rpc_iface_binding_map_add_endpoint(
1582 1731 : mem_ctx, endpoints[i], iface_names, pbinding_maps);
1583 1731 : if (!ok) {
1584 0 : return false;
1585 : }
1586 : }
1587 768 : return true;
1588 : }
1589 :
1590 768 : static bool rpc_host_fill_epm_db(
1591 : struct tdb_wrap *db,
1592 : struct rpc_host_endpoint **endpoints,
1593 : struct rpc_host_iface_name *iface_names)
1594 : {
1595 768 : struct rpc_iface_binding_map *maps = NULL;
1596 : size_t i, num_maps;
1597 768 : bool ret = false;
1598 : bool ok;
1599 :
1600 768 : ok = rpc_iface_binding_map_add_endpoints(
1601 : talloc_tos(), endpoints, iface_names, &maps);
1602 768 : if (!ok) {
1603 0 : goto fail;
1604 : }
1605 :
1606 768 : num_maps = talloc_array_length(maps);
1607 :
1608 2195 : for (i=0; i<num_maps; i++) {
1609 1427 : struct rpc_iface_binding_map *map = &maps[i];
1610 : struct ndr_syntax_id_buf buf;
1611 1427 : char *keystr = ndr_syntax_id_buf_string(&map->iface, &buf);
1612 2854 : TDB_DATA value = {
1613 1427 : .dptr = (uint8_t *)map->bindings,
1614 1427 : .dsize = talloc_array_length(map->bindings),
1615 : };
1616 : int rc;
1617 :
1618 1427 : rc = tdb_store(
1619 : db->tdb, string_term_tdb_data(keystr), value, 0);
1620 1427 : if (rc == -1) {
1621 0 : DBG_DEBUG("tdb_store() failed: %s\n",
1622 : tdb_errorstr(db->tdb));
1623 0 : goto fail;
1624 : }
1625 : }
1626 :
1627 768 : ret = true;
1628 768 : fail:
1629 768 : TALLOC_FREE(maps);
1630 768 : return ret;
1631 : }
1632 :
1633 : struct rpc_server_setup_state {
1634 : struct rpc_server *server;
1635 : };
1636 :
1637 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq);
1638 :
1639 : /*
1640 : * Async initialize state for all possible rpcd_* servers.
1641 : * Note this does not start them.
1642 : */
1643 768 : static struct tevent_req *rpc_server_setup_send(
1644 : TALLOC_CTX *mem_ctx,
1645 : struct tevent_context *ev,
1646 : struct rpc_host *host,
1647 : const char *rpc_server_exe)
1648 : {
1649 768 : struct tevent_req *req = NULL, *subreq = NULL;
1650 768 : struct rpc_server_setup_state *state = NULL;
1651 768 : struct rpc_server *server = NULL;
1652 :
1653 768 : req = tevent_req_create(
1654 : mem_ctx, &state, struct rpc_server_setup_state);
1655 768 : if (req == NULL) {
1656 0 : return NULL;
1657 : }
1658 768 : state->server = talloc_zero(state, struct rpc_server);
1659 768 : if (tevent_req_nomem(state->server, req)) {
1660 0 : return tevent_req_post(req, ev);
1661 : }
1662 :
1663 768 : server = state->server;
1664 :
1665 768 : *server = (struct rpc_server) {
1666 : .host = host,
1667 : .server_index = UINT32_MAX,
1668 768 : .rpc_server_exe = talloc_strdup(server, rpc_server_exe),
1669 : };
1670 768 : if (tevent_req_nomem(server->rpc_server_exe, req)) {
1671 0 : return tevent_req_post(req, ev);
1672 : }
1673 :
1674 768 : subreq = rpc_server_get_endpoints_send(
1675 : state,
1676 : ev,
1677 : rpc_server_exe,
1678 768 : host->np_helper ? NCACN_NP : NCA_UNKNOWN);
1679 768 : if (tevent_req_nomem(subreq, req)) {
1680 0 : return tevent_req_post(req, ev);
1681 : }
1682 768 : tevent_req_set_callback(subreq, rpc_server_setup_got_endpoints, req);
1683 768 : return req;
1684 : }
1685 :
1686 768 : static void rpc_server_setup_got_endpoints(struct tevent_req *subreq)
1687 : {
1688 768 : struct tevent_req *req = tevent_req_callback_data(
1689 : subreq, struct tevent_req);
1690 768 : struct rpc_server_setup_state *state = tevent_req_data(
1691 : req, struct rpc_server_setup_state);
1692 768 : struct rpc_server *server = state->server;
1693 : int ret;
1694 : size_t i, num_endpoints;
1695 : bool ok;
1696 :
1697 768 : ret = rpc_server_get_endpoints_recv(
1698 : subreq,
1699 : server,
1700 : &server->endpoints,
1701 : &server->iface_names,
1702 : &server->max_workers,
1703 : &server->idle_seconds);
1704 768 : TALLOC_FREE(subreq);
1705 768 : if (ret != 0) {
1706 0 : tevent_req_nterror(req, map_nt_error_from_unix(ret));
1707 0 : return;
1708 : }
1709 :
1710 768 : server->workers = talloc_array(
1711 : server, struct rpc_work_process, server->max_workers);
1712 768 : if (tevent_req_nomem(server->workers, req)) {
1713 0 : return;
1714 : }
1715 :
1716 3840 : for (i=0; i<server->max_workers; i++) {
1717 : /* mark as not yet created */
1718 3072 : server->workers[i] = (struct rpc_work_process) { .pid=-1, };
1719 : }
1720 :
1721 768 : num_endpoints = talloc_array_length(server->endpoints);
1722 :
1723 2499 : for (i=0; i<num_endpoints; i++) {
1724 1731 : struct rpc_host_endpoint *e = server->endpoints[i];
1725 : NTSTATUS status;
1726 : size_t j;
1727 :
1728 1731 : e->server = server;
1729 :
1730 1731 : status = dcesrv_create_binding_sockets(
1731 : e->binding, e, &e->num_fds, &e->fds);
1732 1731 : if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_SUPPORTED)) {
1733 16 : continue;
1734 : }
1735 1715 : if (tevent_req_nterror(req, status)) {
1736 0 : DBG_DEBUG("dcesrv_create_binding_sockets failed: %s\n",
1737 : nt_errstr(status));
1738 0 : return;
1739 : }
1740 :
1741 3510 : for (j=0; j<e->num_fds; j++) {
1742 1795 : ret = listen(e->fds[j], 256);
1743 1795 : if (ret == -1) {
1744 0 : tevent_req_nterror(
1745 : req, map_nt_error_from_unix(errno));
1746 0 : return;
1747 : }
1748 : }
1749 : }
1750 :
1751 768 : ok = rpc_host_fill_epm_db(
1752 768 : server->host->epmdb, server->endpoints, server->iface_names);
1753 768 : if (!ok) {
1754 0 : DBG_DEBUG("rpc_host_fill_epm_db failed\n");
1755 : }
1756 :
1757 768 : tevent_req_done(req);
1758 : }
1759 :
1760 768 : static NTSTATUS rpc_server_setup_recv(
1761 : struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_server **server)
1762 : {
1763 768 : struct rpc_server_setup_state *state = tevent_req_data(
1764 : req, struct rpc_server_setup_state);
1765 : NTSTATUS status;
1766 :
1767 768 : if (tevent_req_is_nterror(req, &status)) {
1768 0 : tevent_req_received(req);
1769 0 : return status;
1770 : }
1771 :
1772 768 : *server = talloc_move(mem_ctx, &state->server);
1773 768 : tevent_req_received(req);
1774 768 : return NT_STATUS_OK;
1775 : }
1776 :
1777 : /*
1778 : * rpcd_* died. Called from SIGCHLD handler.
1779 : */
1780 703 : static void rpc_worker_exited(struct rpc_host *host, pid_t pid)
1781 : {
1782 703 : size_t i, num_servers = talloc_array_length(host->servers);
1783 703 : struct rpc_work_process *worker = NULL;
1784 703 : bool found_pid = false;
1785 703 : bool have_active_worker = false;
1786 :
1787 6327 : for (i=0; i<num_servers; i++) {
1788 5624 : struct rpc_server *server = host->servers[i];
1789 : size_t j, num_workers;
1790 :
1791 5624 : if (server == NULL) {
1792 : /* SIGCHLD for --list-interfaces run */
1793 792 : continue;
1794 : }
1795 :
1796 4832 : num_workers = talloc_array_length(server->workers);
1797 :
1798 28468 : for (j=0; j<num_workers; j++) {
1799 23636 : worker = &server->workers[j];
1800 23636 : if (worker->pid == pid) {
1801 544 : found_pid = true;
1802 544 : worker->pid = -1;
1803 544 : worker->available = false;
1804 : }
1805 :
1806 23636 : if (worker->pid != -1) {
1807 2977 : have_active_worker = true;
1808 : }
1809 : }
1810 : }
1811 :
1812 703 : if (!found_pid) {
1813 159 : DBG_WARNING("No worker with PID %d\n", (int)pid);
1814 159 : return;
1815 : }
1816 :
1817 544 : if (!have_active_worker && host->np_helper) {
1818 : /*
1819 : * We have nothing left to do as an np_helper.
1820 : * Terminate ourselves (samba-dcerpcd). We will
1821 : * be restarted on demand anyway.
1822 : */
1823 76 : DBG_DEBUG("Exiting idle np helper\n");
1824 76 : exit(0);
1825 : }
1826 : }
1827 :
1828 : /*
1829 : * rpcd_* died.
1830 : */
1831 1151 : static void rpc_host_sigchld(
1832 : struct tevent_context *ev,
1833 : struct tevent_signal *se,
1834 : int signum,
1835 : int count,
1836 : void *siginfo,
1837 : void *private_data)
1838 : {
1839 1151 : struct rpc_host *state = talloc_get_type_abort(
1840 : private_data, struct rpc_host);
1841 : pid_t pid;
1842 : int wstatus;
1843 :
1844 1778 : while ((pid = waitpid(-1, &wstatus, WNOHANG)) > 0) {
1845 703 : DBG_DEBUG("pid=%d, wstatus=%d\n", (int)pid, wstatus);
1846 703 : rpc_worker_exited(state, pid);
1847 : }
1848 1075 : }
1849 :
1850 : /*
1851 : * Idle timer fired for a rcpd_* worker. Ask it to terminate.
1852 : */
1853 544 : static void rpc_host_exit_worker(
1854 : struct tevent_context *ev,
1855 : struct tevent_timer *te,
1856 : struct timeval current_time,
1857 : void *private_data)
1858 : {
1859 544 : struct rpc_server *server = talloc_get_type_abort(
1860 : private_data, struct rpc_server);
1861 544 : size_t i, num_workers = talloc_array_length(server->workers);
1862 :
1863 : /*
1864 : * Scan for the right worker. We don't have too many of those,
1865 : * and maintaining an index would be more data structure effort.
1866 : */
1867 :
1868 3014 : for (i=0; i<num_workers; i++) {
1869 3014 : struct rpc_work_process *w = &server->workers[i];
1870 : NTSTATUS status;
1871 :
1872 3014 : if (w->exit_timer != te) {
1873 2470 : continue;
1874 : }
1875 544 : w->exit_timer = NULL;
1876 :
1877 544 : SMB_ASSERT(w->num_associations == 0);
1878 :
1879 544 : status = messaging_send(
1880 544 : server->host->msg_ctx,
1881 : pid_to_procid(w->pid),
1882 : MSG_SHUTDOWN,
1883 : NULL);
1884 544 : if (!NT_STATUS_IS_OK(status)) {
1885 0 : DBG_DEBUG("Could not send SHUTDOWN msg: %s\n",
1886 : nt_errstr(status));
1887 : }
1888 :
1889 544 : w->available = false;
1890 544 : break;
1891 : }
1892 544 : }
1893 :
1894 : /*
1895 : * rcpd_* worker replied with its status.
1896 : */
1897 36701 : static void rpc_host_child_status_recv(
1898 : struct messaging_context *msg,
1899 : void *private_data,
1900 : uint32_t msg_type,
1901 : struct server_id server_id,
1902 : DATA_BLOB *data)
1903 : {
1904 36701 : struct rpc_host *host = talloc_get_type_abort(
1905 : private_data, struct rpc_host);
1906 36701 : size_t num_servers = talloc_array_length(host->servers);
1907 36701 : struct rpc_server *server = NULL;
1908 : size_t num_workers;
1909 36701 : pid_t src_pid = procid_to_pid(&server_id);
1910 36701 : struct rpc_work_process *worker = NULL;
1911 : struct rpc_worker_status status_message;
1912 : enum ndr_err_code ndr_err;
1913 :
1914 36701 : ndr_err = ndr_pull_struct_blob_all_noalloc(
1915 : data,
1916 : &status_message,
1917 : (ndr_pull_flags_fn_t)ndr_pull_rpc_worker_status);
1918 36701 : if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1919 : struct server_id_buf buf;
1920 0 : DBG_WARNING("Got invalid message from pid %s\n",
1921 : server_id_str_buf(server_id, &buf));
1922 0 : return;
1923 : }
1924 36701 : if (DEBUGLEVEL >= 10) {
1925 0 : NDR_PRINT_DEBUG(rpc_worker_status, &status_message);
1926 : }
1927 :
1928 36701 : if (status_message.server_index >= num_servers) {
1929 0 : DBG_WARNING("Got invalid server_index=%"PRIu32", "
1930 : "num_servers=%zu\n",
1931 : status_message.server_index,
1932 : num_servers);
1933 0 : return;
1934 : }
1935 :
1936 36701 : server = host->servers[status_message.server_index];
1937 :
1938 36701 : num_workers = talloc_array_length(server->workers);
1939 36701 : if (status_message.worker_index >= num_workers) {
1940 0 : DBG_WARNING("Got invalid worker_index=%"PRIu32", "
1941 : "num_workers=%zu\n",
1942 : status_message.worker_index,
1943 : num_workers);
1944 0 : return;
1945 : }
1946 36701 : worker = &server->workers[status_message.worker_index];
1947 :
1948 36701 : if (src_pid != worker->pid) {
1949 0 : DBG_WARNING("Got idx=%"PRIu32" from %d, expected %d\n",
1950 : status_message.worker_index,
1951 : (int)src_pid,
1952 : worker->pid);
1953 0 : return;
1954 : }
1955 :
1956 36701 : worker->available = true;
1957 36701 : worker->num_associations = status_message.num_association_groups;
1958 36701 : worker->num_connections = status_message.num_connections;
1959 :
1960 36701 : if (worker->num_associations != 0) {
1961 322 : TALLOC_FREE(worker->exit_timer);
1962 : } else {
1963 36379 : worker->exit_timer = tevent_add_timer(
1964 : messaging_tevent_context(msg),
1965 : server->workers,
1966 : tevent_timeval_current_ofs(server->idle_seconds, 0),
1967 : rpc_host_exit_worker,
1968 : server);
1969 : /* No NULL check, it's not fatal if this does not work */
1970 : }
1971 :
1972 36701 : rpc_host_distribute_clients(server);
1973 : }
1974 :
1975 : /*
1976 : * samba-dcerpcd has been asked to shutdown.
1977 : * Mark the initial tevent_req as done so we
1978 : * exit the event loop.
1979 : */
1980 0 : static void rpc_host_msg_shutdown(
1981 : struct messaging_context *msg,
1982 : void *private_data,
1983 : uint32_t msg_type,
1984 : struct server_id server_id,
1985 : DATA_BLOB *data)
1986 : {
1987 0 : struct tevent_req *req = talloc_get_type_abort(
1988 : private_data, struct tevent_req);
1989 0 : tevent_req_done(req);
1990 0 : }
1991 :
1992 : /*
1993 : * Only match directory entries starting in rpcd_
1994 : */
1995 4532 : static int rpcd_filter(const struct dirent *d)
1996 : {
1997 4532 : int match = fnmatch("rpcd_*", d->d_name, 0);
1998 4532 : return (match == 0) ? 1 : 0;
1999 : }
2000 :
2001 : /*
2002 : * Scan the given libexecdir for rpcd_* services
2003 : * and return them as a strv list.
2004 : */
2005 16 : static int rpc_host_list_servers(
2006 : const char *libexecdir, TALLOC_CTX *mem_ctx, char **pservers)
2007 : {
2008 16 : char *servers = NULL;
2009 16 : struct dirent **namelist = NULL;
2010 : int i, num_servers;
2011 16 : int ret = ENOMEM;
2012 :
2013 16 : num_servers = scandir(libexecdir, &namelist, rpcd_filter, alphasort);
2014 16 : if (num_servers == -1) {
2015 0 : DBG_DEBUG("scandir failed: %s\n", strerror(errno));
2016 0 : return errno;
2017 : }
2018 :
2019 144 : for (i=0; i<num_servers; i++) {
2020 128 : char *exe = talloc_asprintf(
2021 128 : mem_ctx, "%s/%s", libexecdir, namelist[i]->d_name);
2022 128 : if (exe == NULL) {
2023 0 : goto fail;
2024 : }
2025 :
2026 128 : ret = strv_add(mem_ctx, &servers, exe);
2027 128 : TALLOC_FREE(exe);
2028 128 : if (ret != 0) {
2029 0 : goto fail;
2030 : }
2031 : }
2032 16 : fail:
2033 144 : for (i=0; i<num_servers; i++) {
2034 128 : SAFE_FREE(namelist[i]);
2035 : }
2036 16 : SAFE_FREE(namelist);
2037 :
2038 16 : if (ret != 0) {
2039 0 : TALLOC_FREE(servers);
2040 0 : return ret;
2041 : }
2042 16 : *pservers = servers;
2043 16 : return 0;
2044 : }
2045 :
2046 : struct rpc_host_endpoint_accept_state {
2047 : struct tevent_context *ev;
2048 : struct rpc_host_endpoint *endpoint;
2049 : };
2050 :
2051 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq);
2052 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq);
2053 :
2054 : /*
2055 : * Asynchronously wait for a DCERPC connection from a client.
2056 : */
2057 1731 : static struct tevent_req *rpc_host_endpoint_accept_send(
2058 : TALLOC_CTX *mem_ctx,
2059 : struct tevent_context *ev,
2060 : struct rpc_host_endpoint *endpoint)
2061 : {
2062 1731 : struct tevent_req *req = NULL;
2063 1731 : struct rpc_host_endpoint_accept_state *state = NULL;
2064 : size_t i;
2065 :
2066 1731 : req = tevent_req_create(
2067 : mem_ctx, &state, struct rpc_host_endpoint_accept_state);
2068 1731 : if (req == NULL) {
2069 0 : return NULL;
2070 : }
2071 1731 : state->ev = ev;
2072 1731 : state->endpoint = endpoint;
2073 :
2074 3526 : for (i=0; i<endpoint->num_fds; i++) {
2075 1795 : struct tevent_req *subreq = NULL;
2076 :
2077 1795 : subreq = accept_send(state, ev, endpoint->fds[i]);
2078 1795 : if (tevent_req_nomem(subreq, req)) {
2079 0 : return tevent_req_post(req, ev);
2080 : }
2081 1795 : tevent_req_set_callback(
2082 : subreq, rpc_host_endpoint_accept_accepted, req);
2083 : }
2084 :
2085 1731 : return req;
2086 : }
2087 :
2088 : /*
2089 : * Accept a DCERPC connection from a client.
2090 : */
2091 36200 : static void rpc_host_endpoint_accept_accepted(struct tevent_req *subreq)
2092 : {
2093 36200 : struct tevent_req *req = tevent_req_callback_data(
2094 : subreq, struct tevent_req);
2095 36200 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2096 : req, struct rpc_host_endpoint_accept_state);
2097 36200 : struct rpc_host_endpoint *endpoint = state->endpoint;
2098 : int sock, listen_sock, err;
2099 : struct samba_sockaddr peer_addr;
2100 :
2101 36200 : sock = accept_recv(subreq, &listen_sock, &peer_addr, &err);
2102 36200 : TALLOC_FREE(subreq);
2103 36200 : if (sock == -1) {
2104 : /* What to do here? Just ignore the error and retry? */
2105 0 : DBG_DEBUG("accept_recv failed: %s\n", strerror(err));
2106 0 : tevent_req_error(req, err);
2107 0 : return;
2108 : }
2109 :
2110 36200 : subreq = accept_send(state, state->ev, listen_sock);
2111 36200 : if (tevent_req_nomem(subreq, req)) {
2112 0 : close(sock);
2113 0 : sock = -1;
2114 0 : return;
2115 : }
2116 36200 : tevent_req_set_callback(
2117 : subreq, rpc_host_endpoint_accept_accepted, req);
2118 :
2119 36200 : subreq = rpc_host_bind_read_send(
2120 : state,
2121 : state->ev,
2122 36200 : dcerpc_binding_get_transport(endpoint->binding),
2123 : &sock,
2124 : &peer_addr);
2125 36200 : if (tevent_req_nomem(subreq, req)) {
2126 0 : return;
2127 : }
2128 36200 : tevent_req_set_callback(
2129 : subreq, rpc_host_endpoint_accept_got_bind, req);
2130 : }
2131 :
2132 : /*
2133 : * Client sent us a DCERPC bind packet.
2134 : */
2135 36200 : static void rpc_host_endpoint_accept_got_bind(struct tevent_req *subreq)
2136 : {
2137 36200 : struct tevent_req *req = tevent_req_callback_data(
2138 : subreq, struct tevent_req);
2139 36200 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2140 : req, struct rpc_host_endpoint_accept_state);
2141 36200 : struct rpc_host_endpoint *endpoint = state->endpoint;
2142 36200 : struct rpc_server *server = endpoint->server;
2143 36200 : struct rpc_host_pending_client *pending = NULL;
2144 36200 : struct rpc_host_client *client = NULL;
2145 36200 : struct ncacn_packet *bind_pkt = NULL;
2146 : int ret;
2147 36200 : int sock=-1;
2148 :
2149 36200 : ret = rpc_host_bind_read_recv(
2150 : subreq, state, &sock, &client, &bind_pkt);
2151 36200 : TALLOC_FREE(subreq);
2152 36200 : if (ret != 0) {
2153 84 : DBG_DEBUG("rpc_host_bind_read_recv returned %s\n",
2154 : strerror(ret));
2155 84 : goto fail;
2156 : }
2157 :
2158 36116 : client->binding = dcerpc_binding_string(client, endpoint->binding);
2159 36116 : if (client->binding == NULL) {
2160 0 : DBG_WARNING("dcerpc_binding_string failed, dropping client\n");
2161 0 : goto fail;
2162 : }
2163 :
2164 36116 : pending = talloc_zero(server, struct rpc_host_pending_client);
2165 36116 : if (pending == NULL) {
2166 0 : DBG_WARNING("talloc failed, dropping client\n");
2167 0 : goto fail;
2168 : }
2169 36116 : pending->server = server;
2170 36116 : pending->sock = sock;
2171 36116 : pending->bind_pkt = talloc_move(pending, &bind_pkt);
2172 36116 : pending->client = talloc_move(pending, &client);
2173 36116 : talloc_set_destructor(pending, rpc_host_pending_client_destructor);
2174 36116 : sock = -1;
2175 :
2176 36116 : pending->hangup_wait = wait_for_read_send(
2177 : pending, state->ev, pending->sock, true);
2178 36116 : if (pending->hangup_wait == NULL) {
2179 0 : DBG_WARNING("wait_for_read_send failed, dropping client\n");
2180 0 : TALLOC_FREE(pending);
2181 36116 : return;
2182 : }
2183 36116 : tevent_req_set_callback(
2184 : pending->hangup_wait, rpc_host_client_exited, pending);
2185 :
2186 36116 : DLIST_ADD_END(server->pending_clients, pending);
2187 36116 : rpc_host_distribute_clients(server);
2188 36116 : return;
2189 :
2190 84 : fail:
2191 84 : TALLOC_FREE(client);
2192 84 : if (sock != -1) {
2193 0 : close(sock);
2194 : }
2195 : }
2196 :
2197 0 : static int rpc_host_endpoint_accept_recv(
2198 : struct tevent_req *req, struct rpc_host_endpoint **ep)
2199 : {
2200 0 : struct rpc_host_endpoint_accept_state *state = tevent_req_data(
2201 : req, struct rpc_host_endpoint_accept_state);
2202 :
2203 0 : *ep = state->endpoint;
2204 :
2205 0 : return tevent_req_simple_recv_unix(req);
2206 : }
2207 :
2208 : /*
2209 : * Full state for samba-dcerpcd. Everything else
2210 : * is hung off this.
2211 : */
2212 : struct rpc_host_state {
2213 : struct tevent_context *ev;
2214 : struct rpc_host *host;
2215 :
2216 : bool is_ready;
2217 : const char *daemon_ready_progname;
2218 : struct tevent_immediate *ready_signal_immediate;
2219 : int *ready_signal_fds;
2220 :
2221 : size_t num_servers;
2222 : size_t num_prepared;
2223 : };
2224 :
2225 : /*
2226 : * Tell whoever invoked samba-dcerpcd we're ready to
2227 : * serve.
2228 : */
2229 161 : static void rpc_host_report_readiness(
2230 : struct tevent_context *ev,
2231 : struct tevent_immediate *im,
2232 : void *private_data)
2233 : {
2234 161 : struct rpc_host_state *state = talloc_get_type_abort(
2235 : private_data, struct rpc_host_state);
2236 161 : size_t i, num_fds = talloc_array_length(state->ready_signal_fds);
2237 :
2238 161 : if (!state->is_ready) {
2239 0 : DBG_DEBUG("Not yet ready\n");
2240 0 : return;
2241 : }
2242 :
2243 306 : for (i=0; i<num_fds; i++) {
2244 145 : uint8_t byte = 0;
2245 : ssize_t nwritten;
2246 :
2247 : do {
2248 145 : nwritten = write(
2249 145 : state->ready_signal_fds[i],
2250 : (void *)&byte,
2251 : sizeof(byte));
2252 145 : } while ((nwritten == -1) && (errno == EINTR));
2253 :
2254 145 : close(state->ready_signal_fds[i]);
2255 : }
2256 :
2257 161 : TALLOC_FREE(state->ready_signal_fds);
2258 : }
2259 :
2260 : /*
2261 : * Respond to a "are you ready" message.
2262 : */
2263 402 : static bool rpc_host_ready_signal_filter(
2264 : struct messaging_rec *rec, void *private_data)
2265 : {
2266 402 : struct rpc_host_state *state = talloc_get_type_abort(
2267 : private_data, struct rpc_host_state);
2268 402 : size_t num_fds = talloc_array_length(state->ready_signal_fds);
2269 402 : int *tmp = NULL;
2270 :
2271 402 : if (rec->msg_type != MSG_DAEMON_READY_FD) {
2272 337 : return false;
2273 : }
2274 65 : if (rec->num_fds != 1) {
2275 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2276 0 : return false;
2277 : }
2278 :
2279 65 : if (num_fds + 1 < num_fds) {
2280 0 : return false;
2281 : }
2282 65 : tmp = talloc_realloc(state, state->ready_signal_fds, int, num_fds+1);
2283 65 : if (tmp == NULL) {
2284 0 : return false;
2285 : }
2286 65 : state->ready_signal_fds = tmp;
2287 :
2288 65 : state->ready_signal_fds[num_fds] = rec->fds[0];
2289 65 : rec->fds[0] = -1;
2290 :
2291 65 : tevent_schedule_immediate(
2292 : state->ready_signal_immediate,
2293 : state->ev,
2294 : rpc_host_report_readiness,
2295 : state);
2296 :
2297 65 : return false;
2298 : }
2299 :
2300 : /*
2301 : * Respond to a "what is your status" message.
2302 : */
2303 402 : static bool rpc_host_dump_status_filter(
2304 : struct messaging_rec *rec, void *private_data)
2305 : {
2306 402 : struct rpc_host_state *state = talloc_get_type_abort(
2307 : private_data, struct rpc_host_state);
2308 402 : struct rpc_host *host = state->host;
2309 402 : struct rpc_server **servers = host->servers;
2310 402 : size_t i, num_servers = talloc_array_length(servers);
2311 402 : FILE *f = NULL;
2312 :
2313 402 : if (rec->msg_type != MSG_RPC_DUMP_STATUS) {
2314 402 : return false;
2315 : }
2316 0 : if (rec->num_fds != 1) {
2317 0 : DBG_DEBUG("Got %"PRIu8" fds\n", rec->num_fds);
2318 0 : return false;
2319 : }
2320 :
2321 0 : f = fdopen_keepfd(rec->fds[0], "w");
2322 0 : if (f == NULL) {
2323 0 : DBG_DEBUG("fdopen failed: %s\n", strerror(errno));
2324 0 : return false;
2325 : }
2326 :
2327 0 : for (i=0; i<num_servers; i++) {
2328 0 : struct rpc_server *server = servers[i];
2329 0 : size_t j, num_workers = talloc_array_length(server->workers);
2330 0 : size_t active_workers = 0;
2331 :
2332 0 : for (j=0; j<num_workers; j++) {
2333 0 : if (server->workers[j].pid != -1) {
2334 0 : active_workers += 1;
2335 : }
2336 : }
2337 :
2338 0 : fprintf(f,
2339 : "%s: active_workers=%zu\n",
2340 : server->rpc_server_exe,
2341 : active_workers);
2342 :
2343 0 : for (j=0; j<num_workers; j++) {
2344 0 : struct rpc_work_process *w = &server->workers[j];
2345 :
2346 0 : if (w->pid == (pid_t)-1) {
2347 0 : continue;
2348 : }
2349 :
2350 0 : fprintf(f,
2351 : " worker[%zu]: pid=%d, num_associations=%"PRIu32", num_connections=%"PRIu32"\n",
2352 : j,
2353 0 : (int)w->pid,
2354 : w->num_associations,
2355 : w->num_connections);
2356 : }
2357 : }
2358 :
2359 0 : fclose(f);
2360 :
2361 0 : return false;
2362 : }
2363 :
2364 : static void rpc_host_server_setup_done(struct tevent_req *subreq);
2365 : static void rpc_host_endpoint_failed(struct tevent_req *subreq);
2366 :
2367 : /*
2368 : * Async startup for samba-dcerpcd.
2369 : */
2370 96 : static struct tevent_req *rpc_host_send(
2371 : TALLOC_CTX *mem_ctx,
2372 : struct tevent_context *ev,
2373 : struct messaging_context *msg_ctx,
2374 : char *servers,
2375 : int ready_signal_fd,
2376 : const char *daemon_ready_progname,
2377 : bool is_np_helper)
2378 : {
2379 96 : struct tevent_req *req = NULL, *subreq = NULL;
2380 96 : struct rpc_host_state *state = NULL;
2381 96 : struct rpc_host *host = NULL;
2382 96 : struct tevent_signal *se = NULL;
2383 96 : char *epmdb_path = NULL;
2384 96 : char *exe = NULL;
2385 96 : size_t i, num_servers = strv_count(servers);
2386 : NTSTATUS status;
2387 : int ret;
2388 :
2389 96 : req = tevent_req_create(req, &state, struct rpc_host_state);
2390 96 : if (req == NULL) {
2391 0 : return NULL;
2392 : }
2393 96 : state->ev = ev;
2394 96 : state->daemon_ready_progname = daemon_ready_progname;
2395 :
2396 96 : state->ready_signal_immediate = tevent_create_immediate(state);
2397 96 : if (tevent_req_nomem(state->ready_signal_immediate, req)) {
2398 0 : return tevent_req_post(req, ev);
2399 : }
2400 :
2401 96 : if (ready_signal_fd != -1) {
2402 80 : state->ready_signal_fds = talloc_array(state, int, 1);
2403 80 : if (tevent_req_nomem(state->ready_signal_fds, req)) {
2404 0 : return tevent_req_post(req, ev);
2405 : }
2406 80 : state->ready_signal_fds[0] = ready_signal_fd;
2407 : }
2408 :
2409 96 : state->host = talloc_zero(state, struct rpc_host);
2410 96 : if (tevent_req_nomem(state->host, req)) {
2411 0 : return tevent_req_post(req, ev);
2412 : }
2413 96 : host = state->host;
2414 :
2415 96 : host->msg_ctx = msg_ctx;
2416 96 : host->np_helper = is_np_helper;
2417 :
2418 96 : ret = pipe(host->worker_stdin);
2419 96 : if (ret == -1) {
2420 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2421 0 : return tevent_req_post(req, ev);
2422 : }
2423 :
2424 96 : host->servers = talloc_zero_array(
2425 : host, struct rpc_server *, num_servers);
2426 96 : if (tevent_req_nomem(host->servers, req)) {
2427 0 : return tevent_req_post(req, ev);
2428 : }
2429 :
2430 96 : se = tevent_add_signal(ev, state, SIGCHLD, 0, rpc_host_sigchld, host);
2431 96 : if (tevent_req_nomem(se, req)) {
2432 0 : return tevent_req_post(req, ev);
2433 : }
2434 96 : BlockSignals(false, SIGCHLD);
2435 :
2436 96 : status = messaging_register(
2437 : msg_ctx,
2438 : host,
2439 : MSG_RPC_WORKER_STATUS,
2440 : rpc_host_child_status_recv);
2441 96 : if (tevent_req_nterror(req, status)) {
2442 0 : return tevent_req_post(req, ev);
2443 : }
2444 :
2445 96 : status = messaging_register(
2446 : msg_ctx, req, MSG_SHUTDOWN, rpc_host_msg_shutdown);
2447 96 : if (tevent_req_nterror(req, status)) {
2448 0 : return tevent_req_post(req, ev);
2449 : }
2450 :
2451 96 : subreq = messaging_filtered_read_send(
2452 : state, ev, msg_ctx, rpc_host_ready_signal_filter, state);
2453 96 : if (tevent_req_nomem(subreq, req)) {
2454 0 : return tevent_req_post(req, ev);
2455 : }
2456 :
2457 96 : subreq = messaging_filtered_read_send(
2458 : state, ev, msg_ctx, rpc_host_dump_status_filter, state);
2459 96 : if (tevent_req_nomem(subreq, req)) {
2460 0 : return tevent_req_post(req, ev);
2461 : }
2462 :
2463 96 : epmdb_path = lock_path(state, "epmdb.tdb");
2464 96 : if (tevent_req_nomem(epmdb_path, req)) {
2465 0 : return tevent_req_post(req, ev);
2466 : }
2467 :
2468 96 : host->epmdb = tdb_wrap_open(
2469 : host,
2470 : epmdb_path,
2471 : 0,
2472 : TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH,
2473 : O_RDWR|O_CREAT,
2474 : 0644);
2475 96 : if (host->epmdb == NULL) {
2476 0 : DBG_DEBUG("tdb_wrap_open(%s) failed: %s\n",
2477 : epmdb_path,
2478 : strerror(errno));
2479 0 : tevent_req_nterror(req, map_nt_error_from_unix(errno));
2480 0 : return tevent_req_post(req, ev);
2481 : }
2482 96 : TALLOC_FREE(epmdb_path);
2483 :
2484 96 : for (exe = strv_next(servers, exe), i = 0;
2485 864 : exe != NULL;
2486 768 : exe = strv_next(servers, exe), i++) {
2487 :
2488 768 : DBG_DEBUG("server_setup for %s index %zu\n", exe, i);
2489 :
2490 768 : subreq = rpc_server_setup_send(
2491 : state,
2492 : ev,
2493 : host,
2494 : exe);
2495 768 : if (tevent_req_nomem(subreq, req)) {
2496 0 : return tevent_req_post(req, ev);
2497 : }
2498 768 : tevent_req_set_callback(
2499 : subreq, rpc_host_server_setup_done, req);
2500 : }
2501 :
2502 96 : return req;
2503 : }
2504 :
2505 : /*
2506 : * Timer function called after we were initialized but no one
2507 : * connected. Shutdown.
2508 : */
2509 4 : static void rpc_host_shutdown(
2510 : struct tevent_context *ev,
2511 : struct tevent_timer *te,
2512 : struct timeval current_time,
2513 : void *private_data)
2514 : {
2515 4 : struct tevent_req *req = talloc_get_type_abort(
2516 : private_data, struct tevent_req);
2517 4 : DBG_DEBUG("Nobody connected -- shutting down\n");
2518 4 : tevent_req_done(req);
2519 4 : }
2520 :
2521 768 : static void rpc_host_server_setup_done(struct tevent_req *subreq)
2522 : {
2523 768 : struct tevent_req *req = tevent_req_callback_data(
2524 : subreq, struct tevent_req);
2525 768 : struct rpc_host_state *state = tevent_req_data(
2526 : req, struct rpc_host_state);
2527 768 : struct rpc_server *server = NULL;
2528 768 : struct rpc_host *host = state->host;
2529 768 : size_t i, num_servers = talloc_array_length(host->servers);
2530 : NTSTATUS status;
2531 :
2532 768 : status = rpc_server_setup_recv(subreq, host, &server);
2533 768 : TALLOC_FREE(subreq);
2534 768 : if (!NT_STATUS_IS_OK(status)) {
2535 0 : DBG_DEBUG("rpc_server_setup_recv returned %s, ignoring\n",
2536 : nt_errstr(status));
2537 0 : host->servers = talloc_realloc(
2538 : host,
2539 : host->servers,
2540 : struct rpc_server *,
2541 : num_servers-1);
2542 672 : return;
2543 : }
2544 :
2545 768 : server->server_index = state->num_prepared;
2546 768 : host->servers[state->num_prepared] = server;
2547 :
2548 768 : state->num_prepared += 1;
2549 :
2550 768 : if (state->num_prepared < num_servers) {
2551 672 : return;
2552 : }
2553 :
2554 864 : for (i=0; i<num_servers; i++) {
2555 : size_t j, num_endpoints;
2556 :
2557 768 : server = host->servers[i];
2558 768 : num_endpoints = talloc_array_length(server->endpoints);
2559 :
2560 2499 : for (j=0; j<num_endpoints; j++) {
2561 1731 : subreq = rpc_host_endpoint_accept_send(
2562 1731 : state, state->ev, server->endpoints[j]);
2563 1731 : if (tevent_req_nomem(subreq, req)) {
2564 0 : return;
2565 : }
2566 1731 : tevent_req_set_callback(
2567 : subreq, rpc_host_endpoint_failed, req);
2568 : }
2569 : }
2570 :
2571 96 : state->is_ready = true;
2572 :
2573 96 : if (state->daemon_ready_progname != NULL) {
2574 16 : daemon_ready(state->daemon_ready_progname);
2575 : }
2576 :
2577 96 : if (host->np_helper) {
2578 : /*
2579 : * If we're started as an np helper, and no one talks to
2580 : * us within 10 seconds, just shut ourselves down.
2581 : */
2582 80 : host->np_helper_shutdown = tevent_add_timer(
2583 : state->ev,
2584 : state,
2585 : timeval_current_ofs(10, 0),
2586 : rpc_host_shutdown,
2587 : req);
2588 80 : if (tevent_req_nomem(host->np_helper_shutdown, req)) {
2589 0 : return;
2590 : }
2591 : }
2592 :
2593 96 : tevent_schedule_immediate(
2594 : state->ready_signal_immediate,
2595 : state->ev,
2596 : rpc_host_report_readiness,
2597 : state);
2598 : }
2599 :
2600 : /*
2601 : * Log accept fail on an endpoint.
2602 : */
2603 0 : static void rpc_host_endpoint_failed(struct tevent_req *subreq)
2604 : {
2605 0 : struct tevent_req *req = tevent_req_callback_data(
2606 : subreq, struct tevent_req);
2607 0 : struct rpc_host_state *state = tevent_req_data(
2608 : req, struct rpc_host_state);
2609 0 : struct rpc_host_endpoint *endpoint = NULL;
2610 0 : char *binding_string = NULL;
2611 : int ret;
2612 :
2613 0 : ret = rpc_host_endpoint_accept_recv(subreq, &endpoint);
2614 0 : TALLOC_FREE(subreq);
2615 :
2616 0 : binding_string = dcerpc_binding_string(state, endpoint->binding);
2617 0 : DBG_DEBUG("rpc_host_endpoint_accept_recv for %s returned %s\n",
2618 : binding_string,
2619 : strerror(ret));
2620 0 : TALLOC_FREE(binding_string);
2621 0 : }
2622 :
2623 20 : static NTSTATUS rpc_host_recv(struct tevent_req *req)
2624 : {
2625 20 : return tevent_req_simple_recv_ntstatus(req);
2626 : }
2627 :
2628 163 : static int rpc_host_pidfile_create(
2629 : struct messaging_context *msg_ctx,
2630 : const char *progname,
2631 : int ready_signal_fd)
2632 163 : {
2633 163 : const char *piddir = lp_pid_directory();
2634 163 : size_t len = strlen(piddir) + strlen(progname) + 6;
2635 163 : char pidFile[len];
2636 : pid_t existing_pid;
2637 : int fd, ret;
2638 :
2639 163 : snprintf(pidFile,
2640 : sizeof(pidFile),
2641 : "%s/%s.pid",
2642 : piddir, progname);
2643 :
2644 163 : ret = pidfile_path_create(pidFile, &fd, &existing_pid);
2645 163 : if (ret == 0) {
2646 : /* leak fd */
2647 96 : return 0;
2648 : }
2649 :
2650 67 : if (ret != EAGAIN) {
2651 0 : DBG_DEBUG("pidfile_path_create() failed: %s\n",
2652 : strerror(ret));
2653 0 : return ret;
2654 : }
2655 :
2656 67 : DBG_DEBUG("%s pid %d exists\n", progname, (int)existing_pid);
2657 :
2658 67 : if (ready_signal_fd != -1) {
2659 67 : NTSTATUS status = messaging_send_iov(
2660 : msg_ctx,
2661 : pid_to_procid(existing_pid),
2662 : MSG_DAEMON_READY_FD,
2663 : NULL,
2664 : 0,
2665 : &ready_signal_fd,
2666 : 1);
2667 67 : if (!NT_STATUS_IS_OK(status)) {
2668 0 : DBG_DEBUG("Could not send ready_signal_fd: %s\n",
2669 : nt_errstr(status));
2670 : }
2671 : }
2672 :
2673 67 : return EAGAIN;
2674 : }
2675 :
2676 16 : static void samba_dcerpcd_stdin_handler(
2677 : struct tevent_context *ev,
2678 : struct tevent_fd *fde,
2679 : uint16_t flags,
2680 : void *private_data)
2681 : {
2682 16 : struct tevent_req *req = talloc_get_type_abort(
2683 : private_data, struct tevent_req);
2684 : char c;
2685 :
2686 16 : if (read(0, &c, 1) != 1) {
2687 : /* we have reached EOF on stdin, which means the
2688 : parent has exited. Shutdown the server */
2689 16 : tevent_req_done(req);
2690 : }
2691 16 : }
2692 :
2693 : /*
2694 : * samba-dcerpcd microservice startup !
2695 : */
2696 16 : int main(int argc, const char *argv[])
2697 : {
2698 : const struct loadparm_substitution *lp_sub =
2699 16 : loadparm_s3_global_substitution();
2700 16 : const char *progname = getprogname();
2701 16 : TALLOC_CTX *frame = NULL;
2702 16 : struct tevent_context *ev_ctx = NULL;
2703 16 : struct messaging_context *msg_ctx = NULL;
2704 16 : struct tevent_req *req = NULL;
2705 16 : char *servers = NULL;
2706 16 : const char *arg = NULL;
2707 : size_t num_servers;
2708 : poptContext pc;
2709 : int ret, err;
2710 : NTSTATUS status;
2711 : bool log_stdout;
2712 : bool ok;
2713 :
2714 16 : int libexec_rpcds = 0;
2715 16 : int np_helper = 0;
2716 16 : int ready_signal_fd = -1;
2717 :
2718 16 : struct samba_cmdline_daemon_cfg *cmdline_daemon_cfg = NULL;
2719 64 : struct poptOption long_options[] = {
2720 : POPT_AUTOHELP
2721 : {
2722 : .longName = "libexec-rpcds",
2723 : .argInfo = POPT_ARG_NONE,
2724 : .arg = &libexec_rpcds,
2725 : .descrip = "Use all rpcds in libexec",
2726 : },
2727 : {
2728 : .longName = "ready-signal-fd",
2729 : .argInfo = POPT_ARG_INT,
2730 : .arg = &ready_signal_fd,
2731 : .descrip = "fd to close when initialized",
2732 : },
2733 : {
2734 : .longName = "np-helper",
2735 : .argInfo = POPT_ARG_NONE,
2736 : .arg = &np_helper,
2737 : .descrip = "Internal named pipe server",
2738 : },
2739 16 : POPT_COMMON_SAMBA
2740 16 : POPT_COMMON_DAEMON
2741 16 : POPT_COMMON_VERSION
2742 : POPT_TABLEEND
2743 : };
2744 :
2745 : {
2746 16 : const char *fd_params[] = { "ready-signal-fd", };
2747 :
2748 16 : closefrom_except_fd_params(
2749 : 3, ARRAY_SIZE(fd_params), fd_params, argc, argv);
2750 : }
2751 :
2752 16 : talloc_enable_null_tracking();
2753 16 : frame = talloc_stackframe();
2754 16 : umask(0);
2755 16 : sec_init();
2756 16 : smb_init_locale();
2757 :
2758 16 : ok = samba_cmdline_init(frame,
2759 : SAMBA_CMDLINE_CONFIG_SERVER,
2760 : true /* require_smbconf */);
2761 16 : if (!ok) {
2762 0 : DBG_ERR("Failed to init cmdline parser!\n");
2763 0 : TALLOC_FREE(frame);
2764 0 : exit(ENOMEM);
2765 : }
2766 :
2767 16 : pc = samba_popt_get_context(getprogname(),
2768 : argc,
2769 : argv,
2770 : long_options,
2771 : 0);
2772 16 : if (pc == NULL) {
2773 0 : DBG_ERR("Failed to setup popt context!\n");
2774 0 : TALLOC_FREE(frame);
2775 0 : exit(1);
2776 : }
2777 :
2778 16 : poptSetOtherOptionHelp(
2779 : pc, "[OPTIONS] [SERVICE_1 SERVICE_2 .. SERVICE_N]");
2780 :
2781 16 : ret = poptGetNextOpt(pc);
2782 :
2783 16 : if (ret != -1) {
2784 0 : if (ret >= 0) {
2785 0 : fprintf(stderr,
2786 : "\nGot unexpected option %d\n",
2787 : ret);
2788 0 : } else if (ret == POPT_ERROR_BADOPT) {
2789 0 : fprintf(stderr,
2790 : "\nInvalid option %s: %s\n\n",
2791 : poptBadOption(pc, 0),
2792 : poptStrerror(ret));
2793 : } else {
2794 0 : fprintf(stderr,
2795 : "\npoptGetNextOpt returned %s\n",
2796 : poptStrerror(ret));
2797 : }
2798 :
2799 0 : poptFreeContext(pc);
2800 0 : TALLOC_FREE(frame);
2801 0 : exit(1);
2802 : }
2803 :
2804 16 : while ((arg = poptGetArg(pc)) != NULL) {
2805 0 : ret = strv_add(frame, &servers, arg);
2806 0 : if (ret != 0) {
2807 0 : DBG_ERR("strv_add() failed\n");
2808 0 : poptFreeContext(pc);
2809 0 : TALLOC_FREE(frame);
2810 0 : exit(1);
2811 : }
2812 : }
2813 :
2814 16 : log_stdout = (debug_get_log_type() == DEBUG_STDOUT);
2815 16 : if (log_stdout) {
2816 16 : setup_logging(progname, DEBUG_STDOUT);
2817 : } else {
2818 0 : setup_logging(progname, DEBUG_FILE);
2819 : }
2820 :
2821 : /*
2822 : * If "rpc start on demand helpers = true" in smb.conf we must
2823 : * not start as standalone, only on demand from
2824 : * local_np_connect() functions. Log an error message telling
2825 : * the admin how to fix and then exit.
2826 : */
2827 16 : if (lp_rpc_start_on_demand_helpers() && np_helper == 0) {
2828 0 : DBG_ERR("Cannot start in standalone mode if smb.conf "
2829 : "[global] setting "
2830 : "\"rpc start on demand helpers = true\" - "
2831 : "exiting\n");
2832 0 : TALLOC_FREE(frame);
2833 0 : exit(1);
2834 : }
2835 :
2836 16 : if (libexec_rpcds != 0) {
2837 16 : ret = rpc_host_list_servers(
2838 : dyn_SAMBA_LIBEXECDIR, frame, &servers);
2839 16 : if (ret != 0) {
2840 0 : DBG_ERR("Could not list libexec: %s\n",
2841 : strerror(ret));
2842 0 : poptFreeContext(pc);
2843 0 : TALLOC_FREE(frame);
2844 0 : exit(1);
2845 : }
2846 : }
2847 :
2848 16 : num_servers = strv_count(servers);
2849 16 : if (num_servers == 0) {
2850 0 : poptPrintUsage(pc, stderr, 0);
2851 0 : poptFreeContext(pc);
2852 0 : TALLOC_FREE(frame);
2853 0 : exit(1);
2854 : }
2855 :
2856 16 : poptFreeContext(pc);
2857 :
2858 16 : cmdline_daemon_cfg = samba_cmdline_get_daemon_cfg();
2859 :
2860 16 : if (log_stdout && cmdline_daemon_cfg->fork) {
2861 0 : DBG_ERR("Can't log to stdout unless in foreground\n");
2862 0 : TALLOC_FREE(frame);
2863 0 : exit(1);
2864 : }
2865 :
2866 16 : msg_ctx = global_messaging_context();
2867 16 : if (msg_ctx == NULL) {
2868 0 : DBG_ERR("messaging_init() failed\n");
2869 0 : TALLOC_FREE(frame);
2870 0 : exit(1);
2871 : }
2872 16 : ev_ctx = messaging_tevent_context(msg_ctx);
2873 :
2874 16 : if (cmdline_daemon_cfg->fork) {
2875 0 : become_daemon(
2876 : true,
2877 0 : cmdline_daemon_cfg->no_process_group,
2878 : log_stdout);
2879 :
2880 147 : status = reinit_after_fork(msg_ctx, ev_ctx, false);
2881 147 : if (!NT_STATUS_IS_OK(status)) {
2882 0 : exit_daemon("reinit_after_fork() failed",
2883 : map_errno_from_nt_status(status));
2884 : }
2885 : } else {
2886 16 : DBG_DEBUG("Calling daemon_status\n");
2887 16 : daemon_status(progname, "Starting process ... ");
2888 : }
2889 :
2890 163 : BlockSignals(true, SIGPIPE);
2891 :
2892 163 : dump_core_setup(progname, lp_logfile(frame, lp_sub));
2893 :
2894 163 : reopen_logs();
2895 :
2896 163 : DBG_STARTUP_NOTICE("%s version %s started.\n%s\n",
2897 : progname,
2898 : samba_version_string(),
2899 : samba_copyright_string());
2900 :
2901 163 : (void)winbind_off();
2902 163 : ok = init_guest_session_info(frame);
2903 163 : (void)winbind_on();
2904 163 : if (!ok) {
2905 0 : DBG_ERR("init_guest_session_info failed\n");
2906 0 : global_messaging_context_free();
2907 0 : TALLOC_FREE(frame);
2908 0 : exit(1);
2909 : }
2910 :
2911 163 : ret = rpc_host_pidfile_create(msg_ctx, progname, ready_signal_fd);
2912 163 : if (ret != 0) {
2913 67 : DBG_DEBUG("rpc_host_pidfile_create failed: %s\n",
2914 : strerror(ret));
2915 67 : global_messaging_context_free();
2916 67 : TALLOC_FREE(frame);
2917 67 : exit(1);
2918 : }
2919 :
2920 96 : req = rpc_host_send(
2921 : ev_ctx,
2922 : ev_ctx,
2923 : msg_ctx,
2924 : servers,
2925 : ready_signal_fd,
2926 96 : cmdline_daemon_cfg->fork ? NULL : progname,
2927 : np_helper != 0);
2928 96 : if (req == NULL) {
2929 0 : DBG_ERR("rpc_host_send failed\n");
2930 0 : global_messaging_context_free();
2931 0 : TALLOC_FREE(frame);
2932 0 : exit(1);
2933 : }
2934 :
2935 96 : if (!cmdline_daemon_cfg->fork) {
2936 : struct stat st;
2937 16 : if (fstat(0, &st) != 0) {
2938 0 : DBG_DEBUG("fstat(0) failed: %s\n",
2939 : strerror(errno));
2940 0 : global_messaging_context_free();
2941 0 : TALLOC_FREE(frame);
2942 0 : exit(1);
2943 : }
2944 16 : if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
2945 16 : tevent_add_fd(
2946 : ev_ctx,
2947 : ev_ctx,
2948 : 0,
2949 : TEVENT_FD_READ,
2950 : samba_dcerpcd_stdin_handler,
2951 : req);
2952 : }
2953 : }
2954 :
2955 96 : ok = tevent_req_poll_unix(req, ev_ctx, &err);
2956 20 : if (!ok) {
2957 0 : DBG_ERR("tevent_req_poll_unix failed: %s\n",
2958 : strerror(err));
2959 0 : global_messaging_context_free();
2960 0 : TALLOC_FREE(frame);
2961 0 : exit(1);
2962 : }
2963 :
2964 20 : status = rpc_host_recv(req);
2965 20 : if (!NT_STATUS_IS_OK(status)) {
2966 0 : DBG_ERR("rpc_host_recv returned %s\n", nt_errstr(status));
2967 0 : global_messaging_context_free();
2968 0 : TALLOC_FREE(frame);
2969 0 : exit(1);
2970 : }
2971 :
2972 20 : TALLOC_FREE(frame);
2973 :
2974 20 : return 0;
2975 : }
|