aboutsummaryrefslogtreecommitdiff
path: root/lib/msgstore.go
diff options
context:
space:
mode:
Diffstat (limited to 'lib/msgstore.go')
-rw-r--r--lib/msgstore.go196
1 files changed, 168 insertions, 28 deletions
diff --git a/lib/msgstore.go b/lib/msgstore.go
index e2e968b..ccde2c2 100644
--- a/lib/msgstore.go
+++ b/lib/msgstore.go
@@ -1,12 +1,15 @@
package lib
import (
+ "fmt"
"io"
+ "sort"
"time"
- "git.sr.ht/~sircmpwn/aerc/lib/sort"
+ aercSort "git.sr.ht/~sircmpwn/aerc/lib/sort"
"git.sr.ht/~sircmpwn/aerc/models"
"git.sr.ht/~sircmpwn/aerc/worker/types"
+ "github.com/emersion/go-message"
)
// Accesses to fields must be guarded by MessageStore.Lock/Unlock
@@ -15,15 +18,17 @@ type MessageStore struct {
DirInfo models.DirectoryInfo
Messages map[uint32]*models.MessageInfo
// Ordered list of known UIDs
- uids []uint32
- Threads []*types.Thread
+ uids []uint32
selected int
bodyCallbacks map[uint32][]func(io.Reader)
headerCallbacks map[uint32][]func(*types.MessageInfo)
// If set, messages in the mailbox will be threaded
- thread bool
+ ThreadRoot *types.Thread
+ FlatThreads []*types.Thread
+ thread bool
+ threadRefs map[string]*types.Thread
// Search/filter results
results []uint32
@@ -57,8 +62,9 @@ func NewMessageStore(worker *types.Worker,
selected: 0,
bodyCallbacks: make(map[uint32][]func(io.Reader)),
headerCallbacks: make(map[uint32][]func(*types.MessageInfo)),
-
- thread: thread,
+ ThreadRoot: &types.Thread{Uid: 0, Dummy: true},
+ thread: thread,
+ threadRefs: make(map[string]*types.Thread),
defaultSortCriteria: defaultSortCriteria,
@@ -175,36 +181,47 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
}
update = true
case *types.DirectoryThreaded:
- var uids []uint32
+ var (
+ uids []uint32
+ flattened []*types.Thread
+ )
newMap := make(map[uint32]*models.MessageInfo)
- for i := len(msg.Threads) - 1; i >= 0; i-- {
- msg.Threads[i].FormatThread(func(t *types.Thread, x []rune) bool {
- uid := t.Uid
- uids = append([]uint32{uid}, uids...)
- if msg, ok := store.Messages[uid]; ok {
- newMap[uid] = msg
- } else {
- newMap[uid] = nil
- directoryChange = true
- }
- return false
- })
- }
+ msg.ThreadRoot.Traverse(false, func(t *types.Thread) bool {
+ uid := t.Uid
+ uids = append([]uint32{uid}, uids...)
+ flattened = append([]*types.Thread{t}, flattened...)
+ if msg, ok := store.Messages[uid]; ok {
+ newMap[uid] = msg
+ } else {
+ newMap[uid] = nil
+ directoryChange = true
+ }
+ return false
+ })
store.Messages = newMap
store.uids = uids
- store.Threads = msg.Threads
+ store.FlatThreads = flattened
+ store.ThreadRoot = msg.ThreadRoot
update = true
case *types.DirectoryContents:
+ var needsHeaders []uint32
newMap := make(map[uint32]*models.MessageInfo)
for _, uid := range msg.Uids {
if msg, ok := store.Messages[uid]; ok {
newMap[uid] = msg
} else {
newMap[uid] = nil
+ needsHeaders = append(needsHeaders, uid)
directoryChange = true
}
}
+ if store.thread {
+ // We need the headers to perform references. Grab them all for
+ // now. We can probably be smarter here, but let's get something
+ // working first.
+ store.FetchHeaders(needsHeaders, nil)
+ }
store.Messages = newMap
store.uids = msg.Uids
update = true
@@ -234,6 +251,9 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
}
}
}
+ if store.thread {
+ store.threadMessage(msg.Info)
+ }
update = true
case *types.FullMessage:
if _, ok := store.pendingBodies[msg.Content.Uid]; ok {
@@ -248,19 +268,49 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
case *types.MessagesDeleted:
toDelete := make(map[uint32]interface{})
for _, uid := range msg.Uids {
+ if store.thread {
+ if needle := store.ThreadRoot.Find(uid); needle != nil {
+ _msg := store.Messages[uid]
+ delete(store.threadRefs, _msg.Envelope.MessageId)
+ needle.Dummy = true
+ }
+ }
toDelete[uid] = nil
delete(store.Messages, uid)
delete(store.Deleted, uid)
}
uids := make([]uint32, len(store.uids)-len(msg.Uids))
- j := 0
- for _, uid := range store.uids {
- if _, deleted := toDelete[uid]; !deleted && j < len(uids) {
- uids[j] = uid
- j += 1
+ if store.thread {
+ flattened := make([]*types.Thread, len(store.FlatThreads)-len(msg.Uids))
+ j := 0
+ for _, uid := range store.uids {
+ if _, deleted := toDelete[uid]; !deleted && j < len(uids) {
+ uids[j] = uid
+ j += 1
+ }
+ }
+ j = 0
+ for _, t := range store.FlatThreads {
+ uid := t.Uid
+ if _, deleted := toDelete[uid]; !deleted && j < len(flattened) {
+ flattened[j] = t
+ j += 1
+ }
}
+ fmt.Printf("DELETE UID: prev: %d, new: %d\n", len(store.uids), len(uids))
+ fmt.Printf("DELETE FLAT: prev: %d, new: %d\n", len(store.FlatThreads), len(flattened))
+ store.uids = uids
+ store.FlatThreads = flattened
+ } else {
+ j := 0
+ for _, uid := range store.uids {
+ if _, deleted := toDelete[uid]; !deleted && j < len(uids) {
+ uids[j] = uid
+ j += 1
+ }
+ }
+ store.uids = uids
}
- store.uids = uids
update = true
}
@@ -273,6 +323,96 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
}
}
+func (store *MessageStore) threadMessage(msg *models.MessageInfo) {
+ var (
+ fields message.HeaderFields
+
+ childThread *types.Thread
+ irt *types.Thread
+ roots []*types.Thread
+ )
+ if msg.Envelope == nil {
+ return
+ }
+
+ newRefs := make(map[string]*types.Thread)
+
+ if thread, ok := store.threadRefs[msg.Envelope.MessageId]; ok {
+ // Are we in the references table as someone else's parent?
+ thread.Dummy = false
+ thread.Uid = msg.Uid
+ childThread = thread
+ } else {
+ // Then we create a new thread
+ childThread = &types.Thread{Uid: msg.Uid}
+ }
+
+ newRefs[msg.Envelope.MessageId] = childThread
+
+ fields = msg.RFC822Headers.FieldsByKey("In-Reply-To")
+ if fields.Next() {
+ inReplyHeader, err := fields.Text()
+ if err != nil {
+ return
+ }
+ if p, ok := store.threadRefs[inReplyHeader]; ok {
+ irt = p
+ } else {
+ irt = &types.Thread{Uid: 0, Dummy: true}
+ }
+ childThread.Parent = irt
+ irt.Children = append(irt.Children, childThread)
+ newRefs[inReplyHeader] = irt
+ }
+
+ for r, t := range store.threadRefs {
+ if _, ok := newRefs[r]; !ok {
+ newRefs[r] = t
+ }
+ }
+
+ for _, t := range newRefs {
+ if t.Parent == nil || t.Parent == store.ThreadRoot {
+ roots = append(roots, t)
+ t.Parent = store.ThreadRoot
+ }
+ }
+ store.ThreadRoot.Children = roots
+
+ var (
+ uids []uint32
+ flattened []*types.Thread
+ )
+
+ if len(store.pendingHeaders) == 0 {
+ // Sort the root of the tree
+ children := store.ThreadRoot.Children
+ sort.Slice(children, func(i, j int) bool {
+ ci, cj := children[i], children[j]
+ if ci.Dummy {
+ ci = ci.Children[0]
+ }
+ if cj.Dummy {
+ cj = cj.Children[0]
+ }
+ mi, mj := store.Messages[ci.Uid], store.Messages[cj.Uid]
+ return mi.InternalDate.After(mj.InternalDate)
+ })
+
+ // Linearize tree
+ store.ThreadRoot.Traverse(false, func(t *types.Thread) bool {
+ uid := t.Uid
+ uids = append([]uint32{uid}, uids...)
+ flattened = append(flattened, t)
+ return false
+ })
+ }
+
+ store.FlatThreads = flattened
+ store.threadRefs = newRefs
+ store.uids = uids
+}
+
func (store *MessageStore) OnUpdate(fn func(store *MessageStore)) {
store.onUpdate = fn
}
@@ -418,7 +558,7 @@ func (store *MessageStore) Search(args []string, cb func([]uint32)) {
}, func(msg types.WorkerMessage) {
switch msg := msg.(type) {
case *types.SearchResults:
- sort.SortBy(msg.Uids, store.uids)
+ aercSort.SortBy(msg.Uids, store.uids)
cb(msg.Uids)
}
})