~nickbp/kapiti

ref: 0dfbb8de73f6b881a91eac29e61e6b8d5e5a3462 kapiti/src/runner.rs -rw-r--r-- 14.4 KiB
0dfbb8deNick Parker Implement polling-based background filter updates (#9) 9 months ago
                                                                                
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
use std::fs;
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;

use anyhow::{bail, Context, Result};
use async_lock::Barrier;
use async_net::{TcpListener, TcpStream, UdpSocket};
use bytes::BytesMut;
use smol::Task;
use tracing::{self, debug, info, warn};

use crate::filter::{filter, reader};
use crate::{cache, client, config, hyper_smol, listen_tcp, listen_udp, lookup};

/// TCP size header is 16 bits, so max theoretical size is 64k
static MAX_TCP_BYTES: u16 = 65535;

/// Hardcoded hostnames that we should block for technical reasons
static HARDCODED_BLOCKED_HOSTS: &'static [&'static str] = &[
    // Placeholder domain for users to check that Kapiti is working
    "test-blocked.kapiti.io",
    // See https://support.mozilla.org/en-US/kb/canary-domain-use-application-dnsnet
    "use-application-dns.net",
];

/// Runs the server. Separate from main.rs to simplify testing in benchmarks
pub struct Runner {
    config: config::Config,
    storage_dir: PathBuf,
    tcp_listener: TcpListener,
    udp_sock: UdpSocket,
}

/// The data associated with an incoming request,
/// either a bytes payload for UDP or a stream (with response socket) for TCP
#[derive(Debug)]
pub enum RequestData {
    Udp(BytesMut),
    Tcp(TcpStream),
}

/// The request and the source that sent the request.
#[derive(Debug)]
pub struct RequestMsg {
    pub src: SocketAddr,
    pub data: RequestData,
}

impl Runner {
    /// Creates a new `Runner` instance after setting up any listen sockets.
    pub async fn new(config_path: String, config: config::Config) -> Result<Runner> {
        // Initialize listen socket up-front so that upstream can quickly downgrade the user to non-root if needed.
        let dns_listen_host = config.listen_dns.trim();
        let dns_listen_addr = dns_listen_host
            .to_socket_addrs()?
            .next()
            .with_context(|| format!("Invalid listen_dns address: {}", dns_listen_host))?;

        let storage_dir = PathBuf::from(config.storage.trim());
        if !storage_dir.exists() {
            fs::create_dir(&storage_dir).with_context(|| {
                format!("Failed to create storage directory: {:?}", storage_dir)
            })?;
        } else if storage_dir.is_file() {
            bail!(
                "Specified .storage path in {} is a regular file: {:?}",
                config_path,
                storage_dir
            );
        }

        // Set up sockets up-front. This is mainly to support listening on an ephemeral port (:0),
        // where tcp_addr/udp_addr are unknown until the listeners have been initialized.
        let tcp_listener = TcpListener::bind(dns_listen_addr)
            .await
            .with_context(|| format!("Failed to listen on TCP {}", dns_listen_addr))?;
        let udp_sock = UdpSocket::bind(dns_listen_addr)
            .await
            .with_context(|| format!("Failed to listen on UDP {}", dns_listen_addr))?;

        Ok(Runner {
            config,
            storage_dir,
            tcp_listener,
            udp_sock,
        })
    }

    /// Returns the listen endpoint for the TCP socket.
    /// This is for testing cases, where an ephemeral listen port is being used.
    pub fn get_tcp_endpoint(self: &Runner) -> Result<SocketAddr> {
        self.tcp_listener
            .local_addr()
            .with_context(|| "Couldn't get local TCP socket address")
    }

    /// Returns the listen endpoint for the UDP socket.
    /// This is for testing cases, where an ephemeral listen port is being used.
    pub fn get_udp_endpoint(self: &Runner) -> Result<SocketAddr> {
        self.udp_sock
            .local_addr()
            .with_context(|| "Couldn't get local UDP socket address")
    }

    /// Runs (and consumes) the server. This should block until one of the following occurs:
    /// - A fatal error
    /// - `stop` has received an event or the associated sender has been closed
    pub async fn run(self, stop: Arc<Barrier>) -> Result<()> {
        // Set up a channel for handling cache lookups/updates
        // We use the channel pattern here to avoid issues around a shared/mutexed cache object, which
        // internally may need to make await calls within the mutex lock guard. Rust doesn't like that.
        let (cache_tx, cache_task) = cache::task::start_cache(&self.config)?;

        // Set up a channel for receiving requests from listen sockets and making them availaboe for worker threads.
        let (mut server_query_tx, server_query_rx): (
            async_channel::Sender<RequestMsg>,
            async_channel::Receiver<RequestMsg>,
        ) = async_channel::bounded(32);
        // Lock the receive end: Shared by worker threads
        let server_query_rx = Arc::new(Mutex::new(server_query_rx));

        // Spawn task to listen for incoming TCP requests
        let tcp_listener_task: Task<()>;
        {
            let mut tcp_listener_move = self.tcp_listener;
            let mut server_query_tx_copy = server_query_tx.clone();
            tcp_listener_task = smol::spawn(async move {
                listen_tcp::listen_tcp(&mut tcp_listener_move, &mut server_query_tx_copy)
                    .await
                    .expect("TCP listen failed");
            });
        }

        // Spawn task to listen for incoming UDP requests
        let udp_listener_task: Task<()>;
        let udp_endpoint = self.udp_sock.local_addr()?;
        {
            let mut udp_sock_copy = self.udp_sock.clone();
            udp_listener_task = smol::spawn(async move {
                // Move the original server_query_tx rather than creating another clone.
                // This ensures that the queue will close when the listeners are gone.
                listen_udp::listen_udp(&mut udp_sock_copy, &mut server_query_tx)
                    .await
                    .expect("UDP listen failed");
            });
        }

        let resolver = client::upstream::parse_upstreams(cache_tx.clone(), &self.config.upstreams)?;
        let filters_dir = self.storage_dir.join("filters");
        if !filters_dir.exists() {
            fs::create_dir(&filters_dir).with_context(|| {
                format!(
                    "Failed to create filter download directory: {:?}",
                    filters_dir
                )
            })?;
        } else if filters_dir.is_file() {
            bail!(
                "Filter download directory configured storage path is a regular file: {:?}",
                filters_dir
            );
        }

        let mut filter = filter::Filter::new();
        // Set up the hardcoded values first - for now they take priority over any manual configuration.
        // There isn't a good reason for a user to override a Kapiti test domain, for example.
        filter.set_hardcoded_block(HARDCODED_BLOCKED_HOSTS.into())?;
        let filter = Arc::new(Mutex::new(filter));

        let mut thread_handles = Vec::new();

        // Set up a thread for periodically reloading filters.
        // Avoids using an async task because they dislike interacting with mutexes.
        let config_filters_copy = self.config.filters.clone();
        let config_filter_refresh = Duration::from_secs(self.config.filter_refresh_seconds);
        let filter_copy = filter.clone();
        let thread_fn = move || {
            smol::block_on(async move {
                let fetch_client = hyper_smol::client_kapiti(resolver, false, false, 4096);
                let span = tracing::info_span!("update-filters");
                let _enter = span.enter();
                // TODO(#9) for local paths, run inotify-based updates in a separate thread from this one
                loop {
                    for (_name, conf) in &config_filters_copy {
                        let mut filters = Vec::new();
                        for entry in &conf.overrides {
                            if let Ok(download_path_str) = filter::update_url(
                                &fetch_client,
                                &filters_dir,
                                &entry,
                                10000,
                            ).await {
                                if let Ok(filter) = reader::read(
                                    reader::FilterType::OVERRIDE,
                                    reader::FileInfo {
                                        source_path: entry.clone(),
                                        local_path: download_path_str
                                    }
                                ) {
                                    filters.push(filter);
                                }
                            }
                        }
                        for entry in &conf.blocks {
                            if let Ok(download_path_str) = filter::update_url(
                                &fetch_client,
                                &filters_dir,
                                &entry,
                                10000,
                            ).await {
                                if let Ok(filter) = reader::read(
                                    reader::FilterType::BLOCK,
                                    reader::FileInfo {
                                        source_path: entry.clone(),
                                        local_path: download_path_str,
                                    }
                                ) {
                                    filters.push(filter);
                                }
                            }
                        }
                        if let Ok(mut filter_locked) = filter_copy.lock() {
                            filter_locked.update_entries(filters);
                        } else {
                            warn!("Failed to lock filter for entry update");
                        }
                    }
                    if config_filter_refresh.as_secs() == 0 {
                        info!("Exiting filter refresh: filter_refresh_seconds=0");
                        break;
                    }
                    info!("Next filter refresh in {:?}", config_filter_refresh);
                    thread::sleep(config_filter_refresh);
                }
            });
        };
        thread_handles.push(
            thread::Builder::new()
                .name("filter-reload".to_string())
                .spawn(thread_fn)?,
        );

        // Start independent threads to handle received requests, query upstreams, and send back responses
        let response_timeout = Duration::from_millis(1000);
        for i in 0..10 {
            let mut server = lookup::Lookup::new(
                client::upstream::parse_upstreams(cache_tx.clone(), &self.config.upstreams)?,
                filter.clone(),
            );
            let server_query_rx_copy = server_query_rx.clone();
            let udp_sock_copy = self.udp_sock.clone();
            let thread_fn = move || {
                smol::block_on(async move {
                    let mut tcp_buf = BytesMut::with_capacity(MAX_TCP_BYTES as usize);
                    loop {
                        let request: RequestMsg;
                        match server_query_rx_copy.lock() {
                            Err(e) => {
                                warn!("Failed to lock receive queue, trying again: {:?}", e);
                                continue;
                            }
                            Ok(server_query_rx_lock) => {
                                // Grab a request from the queue, then release the lock
                                if let Ok(got_request) = server_query_rx_lock.recv().await {
                                    request = got_request;
                                } else {
                                    info!("Exiting thread: request queue has closed.");
                                    return;
                                }
                            }
                        }

                        let span = tracing::info_span!("handle-query");
                        let _enter = span.enter();
                        match request.data {
                            RequestData::Udp(buf) => {
                                listen_udp::handle_udp_request(&mut server, request.src, buf, &udp_sock_copy)
                                    .await
                            }
                            RequestData::Tcp(tcp_stream) => {
                                listen_tcp::handle_tcp_request(
                                    &mut server,
                                    &response_timeout,
                                    request.src,
                                    tcp_stream,
                                    &mut tcp_buf,
                                )
                                .await;
                                // Reset buffer size afterwards (capacity should stay the same)
                                tcp_buf.resize(MAX_TCP_BYTES as usize, 0);
                            }
                        }
                    }
                });
            };
            thread_handles.push(
                thread::Builder::new()
                    .name(format!("query-exec-{}", i))
                    .spawn(thread_fn)?,
            );
        }

        // We just log the UDP endpoint - it should be the same as the TCP endpoint unless port 0 was used
        info!("Waiting for clients at {:?}", udp_endpoint);

        // Wait indefinitely for the stop barrier to receive an event or be closed
        stop.wait().await;
        info!("Shutting down: stop signal received");

        // First, drop the task handles for the TCP/UDP listeners so that they stop.
        drop(tcp_listener_task);
        drop(udp_listener_task);

        // Now that the listeners have stopped, the processing queue should eventually empty.
        // Once empty, the queue should return None to the threads since its inputs (the listeners) have all been dropped.
        // At that point the threads should exit on their own.
        for thread in thread_handles {
            let thread_str = format!("{:?}", &thread);
            debug!("Waiting for thread to exit: {}", thread_str);
            // Would use with_context but the error type is weird
            match thread.join().err() {
                Some(e) => bail!("Failed to wait for thread to exit: {} {:?}", thread_str, e),
                None => {}
            }
        }

        // Finally shut down the cache once nobody should be using it.
        drop(cache_task);

        info!("Shutdown complete");
        Ok(())
    }
}