1
0
Fork 0
forked from mirror/grapevine

Use self instead of going through services()

This commit is contained in:
Lambda 2024-09-01 11:35:02 +00:00
parent f52cf53931
commit 341f4213d0
8 changed files with 26 additions and 60 deletions

View file

@ -236,8 +236,7 @@ impl Service {
tokio::spawn(async move {
let mut receiver = self2.receiver.lock().await;
let Ok(Some(grapevine_room)) = services().admin.get_admin_room()
else {
let Ok(Some(grapevine_room)) = self2.get_admin_room() else {
return;
};
@ -1470,7 +1469,7 @@ impl Service {
user_id: &UserId,
displayname: String,
) -> Result<()> {
if let Some(room_id) = services().admin.get_admin_room()? {
if let Some(room_id) = self.get_admin_room()? {
let room_token = services()
.globals
.roomid_mutex_state

View file

@ -11,7 +11,7 @@ use ruma::{
};
use tokio::sync::RwLock;
use crate::{services, Result};
use crate::Result;
/// Compiled regular expressions for a namespace.
#[derive(Clone, Debug)]
@ -160,15 +160,9 @@ impl Service {
&self,
service_name: &str,
) -> Result<()> {
services()
.appservice
.registration_info
.write()
.await
.remove(service_name)
.ok_or_else(|| {
crate::Error::AdminCommand("Appservice not found")
})?;
self.registration_info.write().await.remove(service_name).ok_or_else(
|| crate::Error::AdminCommand("Appservice not found"),
)?;
self.db.unregister_appservice(service_name)
}

View file

@ -36,7 +36,6 @@ use crate::{
api::server_server::FedDest,
observability::FilterReloadHandles,
service::media::MediaFileKey,
services,
utils::on_demand_hashmap::{OnDemandHashMap, TokenSet},
Config, Error, Result,
};
@ -517,7 +516,7 @@ impl Service {
pub(crate) fn shutdown(&self) {
self.shutdown.store(true, atomic::Ordering::Relaxed);
services().globals.rotate.fire();
self.rotate.fire();
}
}

View file

@ -70,10 +70,8 @@ impl Service {
let chunk_key: Vec<u64> =
chunk.iter().map(|(short, _)| short).copied().collect();
if let Some(cached) = services()
.rooms
.auth_chain
.get_cached_eventid_authchain(&chunk_key)?
if let Some(cached) =
self.get_cached_eventid_authchain(&chunk_key)?
{
hits += 1;
full_auth_chain.extend(cached.iter().copied());
@ -86,10 +84,8 @@ impl Service {
let mut misses2 = 0;
let mut i = 0;
for (sevent_id, event_id) in chunk {
if let Some(cached) = services()
.rooms
.auth_chain
.get_cached_eventid_authchain(&[sevent_id])?
if let Some(cached) =
self.get_cached_eventid_authchain(&[sevent_id])?
{
hits2 += 1;
chunk_cache.extend(cached.iter().copied());
@ -98,7 +94,7 @@ impl Service {
let auth_chain = Arc::new(
self.get_auth_chain_inner(room_id, &event_id)?,
);
services().rooms.auth_chain.cache_auth_chain(
self.cache_auth_chain(
vec![sevent_id],
Arc::clone(&auth_chain),
)?;
@ -122,10 +118,7 @@ impl Service {
"Chunk missed",
);
let chunk_cache = Arc::new(chunk_cache);
services()
.rooms
.auth_chain
.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?;
self.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?;
full_auth_chain.extend(chunk_cache.iter());
}

View file

@ -105,7 +105,7 @@ impl Service {
));
}
services().rooms.event_handler.acl_check(origin, room_id)?;
self.acl_check(origin, room_id)?;
// 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
@ -289,9 +289,7 @@ impl Service {
.write()
.await
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
let r = services()
.rooms
.event_handler
let r = self
.upgrade_outlier_to_timeline_pdu(
incoming_pdu,
val,

View file

@ -45,12 +45,7 @@ impl Service {
}
}
#[allow(
clippy::too_many_arguments,
clippy::too_many_lines,
// Allowed because this function uses `services()`
clippy::unused_self,
)]
#[allow(clippy::too_many_arguments, clippy::too_many_lines)]
#[tracing::instrument(skip(self))]
pub(crate) fn paginate_relations_with_filter(
&self,
@ -69,9 +64,7 @@ impl Service {
match ruma::api::Direction::Backward {
ruma::api::Direction::Forward => {
// TODO: should be relations_after
let events_after: Vec<_> = services()
.rooms
.pdu_metadata
let events_after: Vec<_> = self
.relations_until(sender_user, room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
@ -126,9 +119,7 @@ impl Service {
})
}
ruma::api::Direction::Backward => {
let events_before: Vec<_> = services()
.rooms
.pdu_metadata
let events_before: Vec<_> = self
.relations_until(sender_user, room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {

View file

@ -397,8 +397,7 @@ impl Service {
state_key: Option<&str>,
content: &serde_json::value::RawValue,
) -> Result<StateMap<Arc<PduEvent>>> {
let Some(shortstatehash) =
services().rooms.state.get_room_shortstatehash(room_id)?
let Some(shortstatehash) = self.get_room_shortstatehash(room_id)?
else {
return Ok(HashMap::new());
};

View file

@ -575,10 +575,8 @@ impl Service {
if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{
if let Some(related_pducount) = services()
.rooms
.timeline
.get_pdu_count(&content.relates_to.event_id)?
if let Some(related_pducount) =
self.get_pdu_count(&content.relates_to.event_id)?
{
services()
.rooms
@ -596,10 +594,8 @@ impl Service {
} => {
// We need to do it again here, because replies don't have
// event_id as a top level field
if let Some(related_pducount) = services()
.rooms
.timeline
.get_pdu_count(&in_reply_to.event_id)?
if let Some(related_pducount) =
self.get_pdu_count(&in_reply_to.event_id)?
{
services().rooms.pdu_metadata.add_relation(
PduCount::Normal(count2),
@ -1134,11 +1130,8 @@ impl Service {
return Ok(None);
}
let pdu_id = services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, room_id)
.await?;
let pdu_id =
self.append_pdu(pdu, pdu_json, new_room_leaves, room_id).await?;
Ok(Some(pdu_id))
}
@ -1314,7 +1307,7 @@ impl Service {
.await;
// Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? {
if let Some(pdu_id) = self.get_pdu_id(&event_id)? {
info!(%event_id, ?pdu_id, "We already know this event");
return Ok(());
}