~siborgium/prelockdpp

f72557e2b403667d87e91adcf0ac49d02d79676d — Sergey Smirnykh 2 years ago 368e662
Finally get rid of fstreams, fix intr node migration error

std::fstream internally allocates std::filebuf, which handles all file work
and has a rather large buffer. We would create one each iteration, effectively
messing up our no-alloc strategy. It is possible to `reopen` a file, but we
have no access to the file buffer (sadly), which in turn forces us to allocate
one, which is not nice too.

Now with C files & linebuf buffer handling is more transparent.
1 files changed, 25 insertions(+), 16 deletions(-)

M main.cpp
M main.cpp => main.cpp +25 -16
@@ 1,6 1,5 @@

#include <iostream>
#include <fstream>
#include <limits>
#include <charconv>
#include <string>


@@ 61,9 60,11 @@ auto concat(auto&& ... xs) {
};

struct errno_exception: public std::runtime_error {
    int err;

    errno_exception(std::string_view reason, int err):
        // FIXME: possible strerror invalidation
        std::runtime_error{ concat(reason, ": ", std::strerror(err)) }
        std::runtime_error{ concat(reason, ": ", std::strerror(err)) }, err{ err }
    {
        // intentionally left blank
    }


@@ 265,8 266,8 @@ struct context {
    char*       pid_end;

    // buffers used to avoid re-allocating the same space
    linebuf                         line_buffer;
    std::string                     stat_buffer;
    linebuf line_buffer;
    linebuf stat_buffer;

    // names (comms, actually, see comm in proc(5), /proc/[pid]/stat) of
    // programs we should lock


@@ 361,11 362,14 @@ struct context {
    auto with_uniq_id(const char* stat_path, auto && callback) {
        using namespace std::string_view_literals;
        try {
            // TODO: can we do reading stat cheaper?
            std::ifstream stat{ stat_path };
            std::getline(stat, stat_buffer);
            auto stat = fopen(stat_path, "r");
            if (!stat) {
                throw errno_exception("fopen(/proc/PID/stat) failed", errno);
            }
            DEFER{ fclose(stat); };
            auto stat_len = stat_buffer.getline(stat);

            std::string_view stat_view{ stat_buffer };
            std::string_view stat_view( stat_buffer.ptr, stat_len );
            if (stat_view.ends_with(" 0 0 0")) {
                // ignore kthreads
                return;


@@ 432,9 436,8 @@ struct context {

            callback(start_time_);

        } catch (const std::system_error& e) {
            auto code = e.code();
            if (code.value() != ENOENT) {
        } catch (const errno_exception& e) {
            if (e.err != ENOENT && e.err != EPERM) {
                throw;
            }
        }


@@ 453,8 456,8 @@ struct context {
                    vec.emplace_back(begin, line_buffer.ptr + n - begin);
                }
            }
        } catch (const std::system_error& e) {
            if (e.code().value() != ENOENT) {
        } catch (const errno_exception& e) {
            if (e.err != ENOENT && e.err != EPERM) {
                throw;
            }
        }


@@ 464,8 467,11 @@ struct context {

        // this set will contain ids (see later)
        // that are alive at this iteration
        // FIXME: we could use a list, actually
        //        any_hooks are pain though
        // NOTE: I thought using list would be desirable here,
        //       but actually using set is fine since we get
        //       migration for free, no need so sort uniqs
        //       when inserting to `current_uniqs_set`, just = move(new_ids)
        //       plus any_hooks are pain
        uniq_alive_set new_ids;

        auto m0 = get_clock_time(CLOCK_MONOTONIC);


@@ 508,8 514,11 @@ struct context {
                    if (iter != current_uniqs_set.end()) {
                        // the id is still alive from the previous iteration
                        // just move it to new_ids
                        new_ids.insert(*iter);
                        // this part is a bit tricky: we have to erase BEFORE insert
                        // just keep in mind that erasure does not invalidate refs
                        auto& uniq = *iter;
                        current_uniqs_set.erase(iter);
                        new_ids.insert(uniq);
                    } else {
                        auto colony_iter = ids.emplace(source);
                        set_self_iter(*colony_iter, colony_iter);