mirror of
				https://github.com/yuzu-emu/yuzu.git
				synced 2025-10-25 15:03:47 +00:00 
			
		
		
		
	Kernel/Threads: Dynamically allocate the TLS region for threads in the BASE region of the linear heap.
Each thread gets a 0x200-byte area from the 0x1000-sized page, when all 8 thread slots in a single page are used up, the kernel allocates a new page to hold another 8 entries. This is consistent with what the real kernel does.
This commit is contained in:
		
							parent
							
								
									3e7e8daf59
								
							
						
					
					
						commit
						d192fb066d
					
				| @ -109,7 +109,6 @@ struct MemoryArea { | ||||
| static MemoryArea memory_areas[] = { | ||||
|     {SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE,     "Shared Memory"}, // Shared memory
 | ||||
|     {VRAM_VADDR,          VRAM_SIZE,              "VRAM"},          // Video memory (VRAM)
 | ||||
|     {TLS_AREA_VADDR,      TLS_AREA_SIZE,          "TLS Area"},      // TLS memory
 | ||||
| }; | ||||
| 
 | ||||
| } | ||||
|  | ||||
| @ -140,8 +140,11 @@ public: | ||||
| 
 | ||||
|     MemoryRegionInfo* memory_region = nullptr; | ||||
| 
 | ||||
|     /// Bitmask of the used TLS slots
 | ||||
|     std::bitset<300> used_tls_slots; | ||||
|     /// The Thread Local Storage area is allocated as processes create threads,
 | ||||
|     /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
 | ||||
|     /// holds the TLS for a specific thread. This vector contains which parts are in use for each page as a bitmask.
 | ||||
|     /// This vector will grow as more pages are allocated for new threads.
 | ||||
|     std::vector<std::bitset<8>> tls_slots; | ||||
| 
 | ||||
|     VAddr GetLinearHeapAreaAddress() const; | ||||
|     VAddr GetLinearHeapBase() const; | ||||
|  | ||||
| @ -117,9 +117,10 @@ void Thread::Stop() { | ||||
|     } | ||||
|     wait_objects.clear(); | ||||
| 
 | ||||
|     Kernel::g_current_process->used_tls_slots[tls_index] = false; | ||||
|     g_current_process->misc_memory_used -= Memory::TLS_ENTRY_SIZE; | ||||
|     g_current_process->memory_region->used -= Memory::TLS_ENTRY_SIZE; | ||||
|     // Mark the TLS slot in the thread's page as free.
 | ||||
|     u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE; | ||||
|     u32 tls_slot = ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; | ||||
|     Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot); | ||||
| 
 | ||||
|     HLE::Reschedule(__func__); | ||||
| } | ||||
| @ -366,6 +367,31 @@ static void DebugThreadQueue() { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Finds a free location for the TLS section of a thread. | ||||
|  * @param tls_slots The TLS page array of the thread's owner process. | ||||
|  * Returns a tuple of (page, slot, alloc_needed) where: | ||||
|  * page: The index of the first allocated TLS page that has free slots. | ||||
|  * slot: The index of the first free slot in the indicated page. | ||||
|  * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full). | ||||
|  */ | ||||
| std::tuple<u32, u32, bool> GetFreeThreadLocalSlot(std::vector<std::bitset<8>>& tls_slots) { | ||||
|     // Iterate over all the allocated pages, and try to find one where not all slots are used.
 | ||||
|     for (unsigned page = 0; page < tls_slots.size(); ++page) { | ||||
|         const auto& page_tls_slots = tls_slots[page]; | ||||
|         if (!page_tls_slots.all()) { | ||||
|             // We found a page with at least one free slot, find which slot it is
 | ||||
|             for (unsigned slot = 0; slot < page_tls_slots.size(); ++slot) { | ||||
|                 if (!page_tls_slots.test(slot)) { | ||||
|                     return std::make_tuple(page, slot, false); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     return std::make_tuple(0, 0, true); | ||||
| } | ||||
| 
 | ||||
| ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority, | ||||
|         u32 arg, s32 processor_id, VAddr stack_top) { | ||||
|     if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) { | ||||
| @ -403,22 +429,50 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, | ||||
|     thread->name = std::move(name); | ||||
|     thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom(); | ||||
|     thread->owner_process = g_current_process; | ||||
|     thread->tls_index = -1; | ||||
|     thread->waitsynch_waited = false; | ||||
| 
 | ||||
|     // Find the next available TLS index, and mark it as used
 | ||||
|     auto& used_tls_slots = Kernel::g_current_process->used_tls_slots; | ||||
|     for (unsigned int i = 0; i < used_tls_slots.size(); ++i) { | ||||
|         if (used_tls_slots[i] == false) { | ||||
|             thread->tls_index = i; | ||||
|             used_tls_slots[i] = true; | ||||
|             break; | ||||
|     auto& tls_slots = Kernel::g_current_process->tls_slots; | ||||
|     bool needs_allocation = true; | ||||
|     u32 available_page; // Which allocated page has free space
 | ||||
|     u32 available_slot; // Which slot within the page is free
 | ||||
| 
 | ||||
|     std::tie(available_page, available_slot, needs_allocation) = GetFreeThreadLocalSlot(tls_slots); | ||||
| 
 | ||||
|     if (needs_allocation) { | ||||
|         // There are no already-allocated pages with free slots, lets allocate a new one.
 | ||||
|         // TLS pages are allocated from the BASE region in the linear heap.
 | ||||
|         MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::BASE); | ||||
|         auto& linheap_memory = memory_region->linear_heap_memory; | ||||
| 
 | ||||
|         if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) { | ||||
|             LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread"); | ||||
|             return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, ErrorSummary::OutOfResource, ErrorLevel::Permanent); | ||||
|         } | ||||
| 
 | ||||
|         u32 offset = linheap_memory->size(); | ||||
| 
 | ||||
|         // Allocate some memory from the end of the linear heap for this region.
 | ||||
|         linheap_memory->insert(linheap_memory->end(), Memory::PAGE_SIZE, 0); | ||||
|         memory_region->used += Memory::PAGE_SIZE; | ||||
|         Kernel::g_current_process->linear_heap_used += Memory::PAGE_SIZE; | ||||
| 
 | ||||
|         tls_slots.emplace_back(0); // The page is completely available at the start
 | ||||
|         available_page = tls_slots.size() - 1; | ||||
|         available_slot = 0; // Use the first slot in the new page
 | ||||
| 
 | ||||
|         auto& vm_manager = Kernel::g_current_process->vm_manager; | ||||
|         vm_manager.RefreshMemoryBlockMappings(linheap_memory.get()); | ||||
| 
 | ||||
|         // Map the page to the current process' address space.
 | ||||
|         // TODO(Subv): Find the correct MemoryState for this region.
 | ||||
|         vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE, | ||||
|                                   linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::Private); | ||||
|     } | ||||
| 
 | ||||
|     ASSERT_MSG(thread->tls_index != -1, "Out of TLS space"); | ||||
|     g_current_process->misc_memory_used += Memory::TLS_ENTRY_SIZE; | ||||
|     g_current_process->memory_region->used += Memory::TLS_ENTRY_SIZE; | ||||
|     // Mark the slot as used
 | ||||
|     tls_slots[available_page].set(available_slot); | ||||
|     thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE; | ||||
| 
 | ||||
|     // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
 | ||||
|     // to initialize the context
 | ||||
| @ -508,10 +562,6 @@ void Thread::SetWaitSynchronizationOutput(s32 output) { | ||||
|     context.cpu_registers[1] = output; | ||||
| } | ||||
| 
 | ||||
| VAddr Thread::GetTLSAddress() const { | ||||
|     return Memory::TLS_AREA_VADDR + tls_index * Memory::TLS_ENTRY_SIZE; | ||||
| } | ||||
| 
 | ||||
| ////////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
| void ThreadingInit() { | ||||
|  | ||||
| @ -127,7 +127,7 @@ public: | ||||
|      * Returns the Thread Local Storage address of the current thread | ||||
|      * @returns VAddr of the thread's TLS | ||||
|      */ | ||||
|     VAddr GetTLSAddress() const; | ||||
|     VAddr GetTLSAddress() const { return tls_address; } | ||||
| 
 | ||||
|     Core::ThreadContext context; | ||||
| 
 | ||||
| @ -144,7 +144,7 @@ public: | ||||
| 
 | ||||
|     s32 processor_id; | ||||
| 
 | ||||
|     s32 tls_index; ///< Index of the Thread Local Storage of the thread
 | ||||
|     VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread
 | ||||
| 
 | ||||
|     bool waitsynch_waited; ///< Set to true if the last svcWaitSynch call caused the thread to wait
 | ||||
| 
 | ||||
|  | ||||
| @ -100,15 +100,9 @@ enum : VAddr { | ||||
|     SHARED_PAGE_SIZE      = 0x00001000, | ||||
|     SHARED_PAGE_VADDR_END = SHARED_PAGE_VADDR + SHARED_PAGE_SIZE, | ||||
| 
 | ||||
|     // TODO(yuriks): The size of this area is dynamic, the kernel grows
 | ||||
|     // it as more and more threads are created. For now we'll just use a
 | ||||
|     // hardcoded value.
 | ||||
|     /// Area where TLS (Thread-Local Storage) buffers are allocated.
 | ||||
|     TLS_AREA_VADDR     = 0x1FF82000, | ||||
|     TLS_ENTRY_SIZE     = 0x200, | ||||
|     TLS_AREA_SIZE      = 300 * TLS_ENTRY_SIZE + 0x800, // Space for up to 300 threads + round to page size
 | ||||
|     TLS_AREA_VADDR_END = TLS_AREA_VADDR + TLS_AREA_SIZE, | ||||
| 
 | ||||
| 
 | ||||
|     /// Equivalent to LINEAR_HEAP_VADDR, but expanded to cover the extra memory in the New 3DS.
 | ||||
|     NEW_LINEAR_HEAP_VADDR     = 0x30000000, | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Subv
						Subv