Commit b0168fd4 authored by louiz’'s avatar louiz’

mam: Send “fin complete” only when appropriate

Also simplify how we did the whole “limit + 1”
And fix one bad interpretation of the XEP for the case where the query has
no after or before restriction.

fix #3349
parent 0887be6e
Pipeline #1251 passed with stages
in 30 minutes and 2 seconds
......@@ -1005,7 +1005,8 @@ void Bridge::send_room_history(const std::string& hostname, std::string chan_nam
auto limit = coptions.col<Database::MaxHistoryLength>();
if (history_limit.stanzas >= 0 && history_limit.stanzas < limit)
limit = history_limit.stanzas;
const auto lines = Database::get_muc_logs(this->user_jid, chan_name, hostname, limit, history_limit.since, {}, Id::unset_value, Database::Paging::last);
const auto result = Database::get_muc_logs(this->user_jid, chan_name, hostname, limit, history_limit.since, {}, Id::unset_value, Database::Paging::last);
const auto& lines = std::get<1>(result);
chan_name.append(utils::empty_if_fixed_server("%" + hostname));
for (const auto& line: lines)
{
......
......@@ -190,12 +190,9 @@ std::string Database::store_muc_message(const std::string& owner, const std::str
return uuid;
}
std::vector<Database::MucLogLine> Database::get_muc_logs(const std::string& owner, const std::string& chan_name, const std::string& server,
int limit, const std::string& start, const std::string& end, const Id::real_type reference_record_id, Database::Paging paging)
std::tuple<bool, std::vector<Database::MucLogLine>> Database::get_muc_logs(const std::string& owner, const std::string& chan_name, const std::string& server,
std::size_t limit, const std::string& start, const std::string& end, const Id::real_type reference_record_id, Database::Paging paging)
{
if (limit == 0)
return {};
auto request = select(Database::muc_log_lines);
request.where() << Database::Owner{} << "=" << owner << \
" and " << Database::IrcChanName{} << "=" << chan_name << \
......@@ -228,15 +225,26 @@ std::vector<Database::MucLogLine> Database::get_muc_logs(const std::string& owne
else
request.order_by() << Id{} << " DESC ";
if (limit >= 0)
request.limit() << limit;
// Just a simple trick: to know whether we got the totality of the
// possible results matching this query (except for the limit), we just
// ask one more element. If we get that additional element, this means
// we don’t have everything. And then we just discard it. If we don’t
// have more, this means we have everything.
request.limit() << limit + 1;
auto result = request.execute(*Database::db);
bool complete = true;
if (result.size() == limit + 1)
{
complete = false;
result.erase(std::prev(result.end()));
}
if (paging == Database::Paging::first)
return result;
return {complete, result};
else
return {result.crbegin(), result.crend()};
return {complete, {result.crbegin(), result.crend()}};
}
Database::MucLogLine Database::get_muc_log(const std::string& owner, const std::string& chan_name, const std::string& server,
......
......@@ -131,8 +131,8 @@ class Database
* Get all the lines between (optional) start and end dates, with a (optional) limit.
* If after_id is set, only the records after it will be returned.
*/
static std::vector<MucLogLine> get_muc_logs(const std::string& owner, const std::string& chan_name, const std::string& server,
int limit=-1, const std::string& start="", const std::string& end="",
static std::tuple<bool, std::vector<MucLogLine>> get_muc_logs(const std::string& owner, const std::string& chan_name, const std::string& server,
std::size_t limit, const std::string& start="", const std::string& end="",
const Id::real_type reference_record_id=Id::unset_value, Paging=Paging::first);
/**
......
......@@ -743,19 +743,15 @@ bool BiboumiComponent::handle_mam_request(const Stanza& stanza)
}
// Do not send more than 100 messages, even if the client asked for more,
// or if it didn’t specify any limit.
// 101 is just a trick to know if there are more available messages.
// If our query returns 101 message, we know it’s incomplete, but we
// still send only 100
if ((limit == -1 && start.empty() && end.empty())
|| limit > 100)
limit = 101;
auto lines = Database::get_muc_logs(from.bare(), iid.get_local(), iid.get_server(), limit, start, end, reference_record_id, paging_order);
bool complete = true;
if (lines.size() > 100)
{
complete = false;
lines.erase(lines.begin(), std::prev(lines.end(), 100));
}
if (limit < 0 || limit > 100)
limit = 100;
auto result = Database::get_muc_logs(from.bare(), iid.get_local(), iid.get_server(),
limit,
start, end,
reference_record_id, paging_order);
bool complete = std::get<bool>(result);
auto& lines = std::get<1>(result);
for (const Database::MucLogLine& line: lines)
{
if (!line.col<Database::Nick>().empty())
......
......@@ -1961,7 +1961,7 @@ if __name__ == '__main__':
("/iq[@type='result'][@id='id3'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set")),
# Retrieve a limited archive
# Retrieve the whole archive, but limit the response to one elemet
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id4'><query xmlns='urn:xmpp:mam:2' queryid='qid4'><set xmlns='http://jabber.org/protocol/rsm'><max>1</max></set></query></iq>"),
partial(expect_stanza,
......@@ -1971,7 +1971,7 @@ if __name__ == '__main__':
partial(expect_stanza,
("/iq[@type='result'][@id='id4'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set")),
"!/iq/mam:fin[@complete='true']/rsm:set")),
]),
Scenario("mam_with_timestamps",
......@@ -2205,11 +2205,9 @@ if __name__ == '__main__':
] * 150 + [
# Retrieve the archive, without any restriction
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id1'><query xmlns='urn:xmpp:mam:2' queryid='qid1' /></iq>"),
# Since we should only receive the last 100 messages from the archive,
# it should start with message "1"
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='1']")
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='0']")
),
] + [
# followed by 98 more messages
......@@ -2221,7 +2219,7 @@ if __name__ == '__main__':
# and finally the message "99"
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='100']"),
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='99']"),
after = partial(save_value, "last_uuid", partial(extract_attribute, "/message/mam:result", "id"))
),
# And it should not be marked as complete
......@@ -2236,9 +2234,9 @@ if __name__ == '__main__':
partial(expect_stanza,
("/message/mam:result[@queryid='qid2']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='101']")
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='100']")
),
] + 47 * [
] + 48 * [
partial(expect_stanza,
("/message/mam:result[@queryid='qid2']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body")
......@@ -2297,8 +2295,7 @@ if __name__ == '__main__':
partial(expect_stanza,
("/iq[@type='result'][@id='id4'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rsm:set/rsm:last[text()='{last_uuid}']",
"/iq/mam:fin[@complete='true']",
"/iq/mam:fin")),
"!/iq/mam:fin[@complete='true']",)),
]),
Scenario("channel_history_on_fixed_server",
[
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment