Comparing sensitive data, confidential files or internal emails?

Most legal and privacy policies prohibit uploading sensitive data online. Diffchecker Desktop ensures your confidential information never leaves your computer. Work offline and compare documents securely.

on_select floater removal

Created Diff never expires
19 removals
61 lines
76 additions
115 lines
def on_select(self, click: ViewerDragbox):
def on_select(self, click: ViewerRectSelect):
def update_colors():

self.selected_gaussian_indices.clear()

def update_colors():
"""
"""
Updates the colors of dragboxes so that it colors their intersecting gaussians red, and other gaussians
Updates the colors of dragboxes so that it colors their intersecting gaussians red, and other gaussians
their original colors
their original colors
"""
"""

# if self.was_selected and self.original_colors is not None:
# with torch.no_grad():
# self.gauss_params["features_dc"] = self.original_colors.clone()

if self.original_colors is None:
if self.original_colors is None:
self.original_colors = self.features_dc.clone()
self.original_colors = self.features_dc.clone()
with torch.no_grad():
with torch.no_grad():
self.gauss_params["features_dc"] = self.original_colors.clone()
self.gauss_params["features_dc"] = self.original_colors.clone()


with torch.no_grad():
with torch.no_grad():
intersection_mask = torch.ones(self.features_dc.shape[0], dtype=torch.bool, device=self.device)
intersection_mask = torch.ones(self.features_dc.shape[0], dtype=torch.bool, device=self.device)



for indices in self.selected_gaussian_indices:
for indices in self.selected_gaussian_indices:
current_mask = torch.zeros_like(intersection_mask)
current_mask = torch.zeros_like(intersection_mask)
current_mask[list(indices)] = True
current_mask[list(indices)] = True
intersection_mask &= current_mask
intersection_mask &= current_mask


print("Number of selected gaussians: ", intersection_mask.sum().item())

intersection_indices = intersection_mask.nonzero(as_tuple=True)[0]
intersection_indices = intersection_mask.nonzero(as_tuple=True)[0]
self.gauss_params["features_dc"][intersection_indices] = RGB2SH(
self.gauss_params["features_dc"][intersection_indices] = RGB2SH(
torch.tensor([1, 0, 0], device=self.device)
torch.tensor([1, 0, 0], device=self.device)
)
)




with torch.no_grad():
with torch.no_grad():
camera = self.viewer_control.get_camera(300, 400)
# 0 is for the first client, not the best solution, but the one that works for now
client = self.viewer_control.viser_server.get_clients()[0]
camera_state = self.viewer_control.viewer.get_camera_state(client)
aspect_ratio = camera_state.aspect

# 0 is for the first client, not the best solution, but the one that works for now
(H, W) = self.viewer_control.viewer.render_statemachines[0]._calculate_image_res(aspect_ratio)
image_dim = (H, W)

camera = self.viewer_control.get_camera(img_height=image_dim[0],
img_width=image_dim[1])
assert camera is not None
assert camera is not None
camera = camera.to(self.device)
camera = camera.to(self.device)


self.eval()
self.eval()
self.get_outputs_for_camera(camera)

with torch.no_grad():
camera_scale_fac = self._get_downscale_factor()
camera.rescale_output_resolution(1 / camera_scale_fac)
K = camera.get_intrinsics_matrices().cuda()

W, H = int(camera.width.item()), int(camera.height.item())

camera.rescale_output_resolution(camera_scale_fac) # type: ignore

viewmat = get_viewmat(camera.camera_to_worlds)
means2d = self.example_world_to_cam(self.means, viewmat)
means2d = self.example_persp_proj(
means2d, # [1, N, 3]
K, # [1, 3, 3]
image_dim[0],
image_dim[1]
)

self.train()
self.train()


# pdb.set_trace()

# Convert dragbox coordinates from screen to model space
# Convert dragbox coordinates from screen to model space
box_min = np.array(click.box_min)
box_min = np.array(click.min_bounds)
box_max = np.array(click.box_max)
box_max = np.array(click.max_bounds)


# Flip the y-axis coordinates due to screen space convention
# Flip the y-axis coordinates due to screen space convention
box_min[1], box_max[1] = -box_max[1], -box_min[1]
box_min[1], box_max[1] = -box_max[1], -box_min[1]


# Adjust the coordinates to the model space
# Adjust the coordinates to the model space
box_min[0] = 200 * box_min[0] + 200
box_min[0] = image_dim[0] // 2 * box_min[0] + image_dim[0] // 2
box_max[0] = 200 * box_max[0] + 200
box_max[0] = image_dim[0] // 2 * box_max[0] + image_dim[0] // 2
box_min[1] = 150 * box_min[1] + 150
box_min[1] = image_dim[1] // 2 * box_min[1] + image_dim[1] // 2
box_max[1] = 150 * box_max[1] + 150
box_max[1] = image_dim[1] // 2 * box_max[1] + image_dim[1] // 2


# Create a mask for the Gaussians inside the dragbox
print("Chosen dragbox coordinates: ", box_min, box_max)


mask = (
mask = (
(self.xys[:, 0] >= box_min[0])
(means2d[0, :, 0] >= box_min[0])
& (self.xys[:, 0] <= box_max[0])
& (means2d[0, :, 0] <= box_max[0])
& (self.xys[:, 1] >= box_min[1])
& (means2d[0, :, 1] >= box_min[1])
& (self.xys[:, 1] <= box_max[1])
& (means2d[0, :, 1] <= box_max[1])
)
)


# Track the indices of modified (selected) Gaussians
# Track the indices of modified (selected) Gaussians
selected_indices = torch.where(mask)
selected_indices = torch.where(mask)
self.selected_gaussian_indices.append(selected_indices) # Add a new set to the list
self.selected_gaussian_indices.append(selected_indices) # Add a new set to the list




update_colors()
update_colors()


# self.was_selected = True

self.button.set_disabled(False)
self.button.set_disabled(False)
self.viewer_control.unregister_pointer_cb(self.on_select)
# self.viewer_control.unregister_pointer_cb(self.on_select)
self.viewer_control.unregister_pointer_cb()