~rjarry/aerc

9fdc7acf5b4842b95ab2b53c9baf69ab085b9e79 — Tim Culverhouse 17 days ago a91009e
cache: fetch flags from UI

When cached headers are fetched, an action is posted back to the Worker
to immediately fetch the flags for the message from the server (we can't
know the flags state, therefore it's not cached). When scrolling, a lag
occurs when loading cached headers because the n+1 message has to wait
for the flag request to return before the cached headers are retrieved.

Collect the message UIDs in the UI that need flags, and fetch them based
off a debounce timer in a single request. Post the action from the UI to
eliminate an (ugly) go routine in the worker.

Signed-off-by: Tim Culverhouse <tim@timculverhouse.com>
Acked-by: Robin Jarry <robin@jarry.cc>
3 files changed, 29 insertions(+), 11 deletions(-)

M lib/msgstore.go
M worker/imap/cache.go
M worker/types/messages.go
M lib/msgstore.go => lib/msgstore.go +23 -0
@@ 48,6 48,10 @@ type MessageStore struct {
	pendingHeaders map[uint32]interface{}
	worker         *types.Worker

	needsFlags         []uint32
	fetchFlagsDebounce *time.Timer
	fetchFlagsDelay    time.Duration

	triggerNewEmail        func(*models.MessageInfo)
	triggerDirectoryChange func()



@@ 91,6 95,9 @@ func NewMessageStore(worker *types.Worker,
		pendingHeaders: make(map[uint32]interface{}),
		worker:         worker,

		needsFlags:      []uint32{},
		fetchFlagsDelay: 50 * time.Millisecond,

		triggerNewEmail:        triggerNewEmail,
		triggerDirectoryChange: triggerDirectoryChange,



@@ 251,6 258,10 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
		} else if msg.Info.Envelope != nil {
			store.Messages[msg.Info.Uid] = msg.Info
		}
		if msg.NeedsFlags {
			store.needsFlags = append(store.needsFlags, msg.Info.Uid)
			store.fetchFlags()
		}
		seen := false
		recent := false
		for _, flag := range msg.Info.Flags {


@@ 752,3 763,15 @@ func (store *MessageStore) Capabilities() *models.Capabilities {
func (store *MessageStore) SelectedIndex() int {
	return store.FindIndexByUid(store.selectedUid)
}

func (store *MessageStore) fetchFlags() {
	if store.fetchFlagsDebounce != nil {
		store.fetchFlagsDebounce.Stop()
	}
	store.fetchFlagsDebounce = time.AfterFunc(store.fetchFlagsDelay, func() {
		store.worker.PostAction(&types.FetchMessageFlags{
			Uids: store.needsFlags,
		}, nil)
		store.needsFlags = []uint32{}
	})
}

M worker/imap/cache.go => worker/imap/cache.go +4 -10
@@ 85,7 85,7 @@ func (w *IMAPWorker) cacheHeader(mi *models.MessageInfo) {

func (w *IMAPWorker) getCachedHeaders(msg *types.FetchMessageHeaders) []uint32 {
	logging.Debugf("Retrieving headers from cache: %v", msg.Uids)
	var need, found []uint32
	var need []uint32
	uv := fmt.Sprintf("%d", w.selected.UidValidity)
	for _, uid := range msg.Uids {
		u := fmt.Sprintf("%d", uid)


@@ 118,17 118,11 @@ func (w *IMAPWorker) getCachedHeaders(msg *types.FetchMessageHeaders) []uint32 {
			Uid:           ch.Uid,
			RFC822Headers: hdr,
		}
		found = append(found, uid)
		logging.Debugf("located cached header %s.%s", uv, u)
		w.worker.PostMessage(&types.MessageInfo{
			Message: types.RespondTo(msg),
			Info:    mi,
		}, nil)
	}
	if len(found) > 0 {
		// Post in a separate goroutine to prevent deadlocking
		go w.worker.PostAction(&types.FetchMessageFlags{
			Uids: found,
			Message:    types.RespondTo(msg),
			Info:       mi,
			NeedsFlags: true,
		}, nil)
	}
	return need

M worker/types/messages.go => worker/types/messages.go +2 -1
@@ 212,7 212,8 @@ type SearchResults struct {

type MessageInfo struct {
	Message
	Info *models.MessageInfo
	Info       *models.MessageInfo
	NeedsFlags bool
}

type FullMessage struct {