Skip to content

Commit ec206a0

Browse files
committed
Split replay commands into start-middle-end.
This allows us to prime the state on the replay device while doing other things in the server. It also means we do not have to process the initial commands every time we want to generate anything. It is not enabled for OpenGL ES yet, which retains its previous behavior. There are some dependencies between the compat layer, and DCE that I have not figured out yet. Nothing there should regress at all however.
1 parent c6eaca1 commit ec206a0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+1298
-419
lines changed

cmd/gapir/cc/main.cpp

+157-16
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,14 @@ std::string getTempOnDiskCachePath() {
9292
}
9393
#endif
9494

95+
struct PrewarmData {
96+
GrpcReplayService* prewarm_service = nullptr;
97+
Context* prewarm_context = nullptr;
98+
std::string prewarm_id;
99+
std::string cleanup_id;
100+
std::string current_state;
101+
};
102+
95103
// Setup creates and starts a replay server at the given URI port. Returns the
96104
// created and started server.
97105
// Note the given memory manager and the crash handler, they may be used for
@@ -101,15 +109,14 @@ std::string getTempOnDiskCachePath() {
101109
std::unique_ptr<Server> Setup(const char* uri, const char* authToken,
102110
ResourceCache* cache, int idleTimeoutSec,
103111
core::CrashHandler* crashHandler,
104-
MemoryManager* memMgr, std::mutex* lock) {
112+
MemoryManager* memMgr, PrewarmData* prewarm,
113+
std::mutex* lock) {
105114
// Return a replay server with the following replay ID handler. The first
106115
// package for a replay must be the ID of the replay.
107116
return Server::createAndStart(
108117
uri, authToken, idleTimeoutSec,
109-
[cache, memMgr, crashHandler, lock](GrpcReplayService* replayConn,
110-
const std::string& replayId) {
111-
std::lock_guard<std::mutex> mem_mgr_crash_hdl_lock_guard(*lock);
112-
118+
[cache, memMgr, crashHandler, lock,
119+
prewarm](GrpcReplayService* replayConn) {
113120
std::unique_ptr<ResourceLoader> resLoader;
114121
if (cache == nullptr) {
115122
resLoader = PassThroughResourceLoader::create(replayConn);
@@ -126,16 +133,134 @@ std::unique_ptr<Server> Setup(const char* uri, const char* authToken,
126133
Context::create(replayConn, *crashHandler, resLoader.get(), memMgr);
127134

128135
if (context == nullptr) {
129-
GAPID_WARNING("Loading Context failed!");
136+
GAPID_ERROR("Loading Context failed!");
130137
return;
131138
}
132-
if (cache != nullptr) {
133-
context->prefetch(cache);
134-
}
135139

136-
GAPID_INFO("Replay started");
137-
bool ok = context->interpret();
138-
GAPID_INFO("Replay %s", ok ? "finished successfully" : "failed");
140+
auto cleanup_state = [&]() {
141+
if (!prewarm->prewarm_context->initialize(prewarm->cleanup_id)) {
142+
return false;
143+
}
144+
if (cache != nullptr) {
145+
prewarm->prewarm_context->prefetch(cache);
146+
}
147+
bool ok = prewarm->prewarm_context->interpret();
148+
if (!ok) {
149+
return false;
150+
}
151+
if (!prewarm->prewarm_context->cleanup()) {
152+
return false;
153+
}
154+
prewarm->prewarm_id = "";
155+
prewarm->cleanup_id = "";
156+
prewarm->current_state = "";
157+
prewarm->prewarm_context = nullptr;
158+
prewarm->prewarm_service = nullptr;
159+
return true;
160+
};
161+
162+
auto prime_state = [&](std::string state, std::string cleanup) {
163+
GAPID_INFO("Priming %s", state.c_str());
164+
if (context->initialize(state)) {
165+
GAPID_INFO("Replay context initialized successfully");
166+
} else {
167+
GAPID_ERROR("Replay context initialization failed");
168+
return false;
169+
}
170+
if (cache != nullptr) {
171+
context->prefetch(cache);
172+
}
173+
GAPID_INFO("Replay started");
174+
bool ok = context->interpret(false);
175+
GAPID_INFO("Priming %s", ok ? "finished successfully" : "failed");
176+
if (!ok) {
177+
return false;
178+
}
179+
180+
if (!cleanup.empty()) {
181+
prewarm->current_state = state;
182+
prewarm->cleanup_id = cleanup;
183+
prewarm->prewarm_id = state;
184+
prewarm->prewarm_service = replayConn;
185+
prewarm->prewarm_context = context.get();
186+
}
187+
return true;
188+
};
189+
190+
do {
191+
auto req = replayConn->getReplayRequest();
192+
if (!req) {
193+
GAPID_INFO("No more requests!");
194+
break;
195+
}
196+
GAPID_INFO("Got request %d", req->req_case());
197+
switch (req->req_case()) {
198+
case replay_service::ReplayRequest::kReplay: {
199+
std::lock_guard<std::mutex> mem_mgr_crash_hdl_lock_guard(*lock);
200+
201+
if (prewarm->current_state != req->replay().dependent_id()) {
202+
GAPID_INFO("Trying to get into the correct state");
203+
cleanup_state();
204+
if (req->replay().dependent_id() != "") {
205+
prime_state(req->replay().dependent_id(), "");
206+
}
207+
} else {
208+
GAPID_INFO("Already in the correct state");
209+
}
210+
GAPID_INFO("Running %s", req->replay().replay_id().c_str());
211+
if (context->initialize(req->replay().replay_id())) {
212+
GAPID_INFO("Replay context initialized successfully");
213+
} else {
214+
GAPID_ERROR("Replay context initialization failed");
215+
continue;
216+
}
217+
if (cache != nullptr) {
218+
context->prefetch(cache);
219+
}
220+
221+
GAPID_INFO("Replay started");
222+
bool ok = context->interpret();
223+
GAPID_INFO("Replay %s", ok ? "finished successfully" : "failed");
224+
replayConn->sendReplayFinished();
225+
if (!context->cleanup()) {
226+
return;
227+
}
228+
prewarm->current_state = "";
229+
if (prewarm->prewarm_service && !prewarm->prewarm_id.empty() &&
230+
!prewarm->cleanup_id.empty()) {
231+
prewarm->prewarm_service->primeState(prewarm->prewarm_id,
232+
prewarm->cleanup_id);
233+
}
234+
break;
235+
}
236+
case replay_service::ReplayRequest::kPrewarm: {
237+
std::lock_guard<std::mutex> mem_mgr_crash_hdl_lock_guard(*lock);
238+
// We want to pre-warm into the existing state, good deal.
239+
if (prewarm->current_state == req->prewarm().prerun_id()) {
240+
GAPID_INFO(
241+
"Already primed in the correct state, no more work is "
242+
"needed");
243+
prewarm->cleanup_id = req->prewarm().cleanup_id();
244+
break;
245+
}
246+
if (prewarm->current_state != "") {
247+
if (!cleanup_state()) {
248+
GAPID_ERROR(
249+
"Could not clean up after previous replay, in a bad "
250+
"state now");
251+
return;
252+
}
253+
}
254+
if (!prime_state(std::move(req->prewarm().prerun_id()),
255+
std::move(req->prewarm().cleanup_id()))) {
256+
GAPID_ERROR("Could not prime state: in a bad state now");
257+
return;
258+
}
259+
break;
260+
}
261+
default: { break; }
262+
}
263+
} while (true);
139264
});
140265
}
141266

@@ -234,9 +359,10 @@ void android_main(struct android_app* app) {
234359
auto opts = Options::Parse(app);
235360
auto cache = InMemoryResourceCache::create(memoryManager.getTopAddress());
236361
std::mutex lock;
362+
PrewarmData data;
237363
std::unique_ptr<Server> server =
238364
Setup(uri.c_str(), opts.authToken.c_str(), cache.get(),
239-
opts.idleTimeoutSec, &crashHandler, &memoryManager, &lock);
365+
opts.idleTimeoutSec, &crashHandler, &memoryManager, &data, &lock);
240366
std::atomic<bool> serverIsDone(false);
241367
std::thread waiting_thread([&]() {
242368
server.get()->wait();
@@ -498,11 +624,24 @@ static int replayArchive(Options opts) {
498624
auto onDiskCache = OnDiskResourceCache::create(opts.replayArchive, false);
499625
std::unique_ptr<ResourceLoader> resLoader =
500626
CachedResourceLoader::create(onDiskCache.get(), nullptr);
627+
501628
std::unique_ptr<Context> context = Context::create(
502629
&replayArchive, crashHandler, resLoader.get(), &memoryManager);
503630

631+
if (context->initialize("payload")) {
632+
GAPID_DEBUG("Replay context initialized successfully");
633+
} else {
634+
GAPID_ERROR("Replay context initialization failed");
635+
return EXIT_FAILURE;
636+
}
637+
504638
GAPID_INFO("Replay started");
505639
bool ok = context->interpret();
640+
replayArchive.sendReplayFinished();
641+
if (!context->cleanup()) {
642+
GAPID_ERROR("Replay cleanup failed");
643+
return EXIT_FAILURE;
644+
}
506645
GAPID_INFO("Replay %s", ok ? "finished successfully" : "failed");
507646

508647
return ok ? EXIT_SUCCESS : EXIT_FAILURE;
@@ -554,9 +693,11 @@ static int startServer(Options opts) {
554693
auto cache = createCache(opts.onDiskCacheOptions, &memoryManager);
555694

556695
std::mutex lock;
557-
std::unique_ptr<Server> server = Setup(
558-
uri.c_str(), (authToken.size() > 0) ? authToken.data() : nullptr,
559-
cache.get(), opts.idleTimeoutSec, &crashHandler, &memoryManager, &lock);
696+
PrewarmData data;
697+
std::unique_ptr<Server> server =
698+
Setup(uri.c_str(), (authToken.size() > 0) ? authToken.data() : nullptr,
699+
cache.get(), opts.idleTimeoutSec, &crashHandler, &memoryManager,
700+
&data, &lock);
560701
// The following message is parsed by launchers to detect the selected port.
561702
// DO NOT CHANGE!
562703
printf("Bound on port '%s'\n", portStr.c_str());

cmd/gapit/benchmark.go

+32-28
Original file line numberDiff line numberDiff line change
@@ -371,12 +371,44 @@ func (verb *benchmarkVerb) Run(ctx context.Context, flags flag.FlagSet) error {
371371

372372
status.Event(ctx, status.GlobalScope, "Load done, interaction starting %+v", verb.traceSizeInBytes)
373373

374+
// Sleep for 20 seconds
375+
time.Sleep(20 * time.Second)
376+
374377
ctx = status.Start(oldCtx, "Interacting with frame")
375378
// One interaction done
376379
verb.interactionStartTime = time.Now()
380+
377381
interactionWG := sync.WaitGroup{}
382+
// Get the framebuffer
378383
interactionWG.Add(1)
384+
go func() {
385+
ctx = status.Start(oldCtx, "Getting Framebuffer")
386+
defer status.Finish(ctx)
387+
defer interactionWG.Done()
388+
hints := &service.UsageHints{Primary: true}
389+
settings := &service.RenderSettings{MaxWidth: uint32(0xFFFFFFFF), MaxHeight: uint32(0xFFFFFFFF)}
390+
iip, err := client.GetFramebufferAttachment(ctx,
391+
&service.ReplaySettings{
392+
Device: device,
393+
DisableReplayOptimization: verb.NoOpt,
394+
DisplayToSurface: false,
395+
},
396+
commandToClick, api.FramebufferAttachment_Color0, settings, hints)
397+
398+
iio, err := client.Get(ctx, iip.Path(), resolveConfig)
399+
if err != nil {
400+
return
401+
}
402+
ii := iio.(*img.Info)
403+
dataO, err := client.Get(ctx, path.NewBlob(ii.Bytes.ID()).Path(), resolveConfig)
404+
if err != nil {
405+
panic(log.Errf(ctx, err, "Get frame image data failed"))
406+
}
407+
_, _, _ = int(ii.Width), int(ii.Height), dataO.([]byte)
408+
}()
409+
379410
// Get state tree
411+
interactionWG.Add(1)
380412
go func() {
381413
ctx = status.Start(oldCtx, "Resolving State Tree")
382414
defer status.Finish(ctx)
@@ -419,34 +451,6 @@ func (verb *benchmarkVerb) Run(ctx context.Context, flags flag.FlagSet) error {
419451
gotNodes.Wait()
420452
}()
421453

422-
// Get the framebuffer
423-
interactionWG.Add(1)
424-
go func() {
425-
ctx = status.Start(oldCtx, "Getting Framebuffer")
426-
defer status.Finish(ctx)
427-
defer interactionWG.Done()
428-
hints := &service.UsageHints{Primary: true}
429-
settings := &service.RenderSettings{MaxWidth: uint32(0xFFFFFFFF), MaxHeight: uint32(0xFFFFFFFF)}
430-
iip, err := client.GetFramebufferAttachment(ctx,
431-
&service.ReplaySettings{
432-
Device: device,
433-
DisableReplayOptimization: verb.NoOpt,
434-
DisplayToSurface: false,
435-
},
436-
commandToClick, api.FramebufferAttachment_Color0, settings, hints)
437-
438-
iio, err := client.Get(ctx, iip.Path(), resolveConfig)
439-
if err != nil {
440-
return
441-
}
442-
ii := iio.(*img.Info)
443-
dataO, err := client.Get(ctx, path.NewBlob(ii.Bytes.ID()).Path(), resolveConfig)
444-
if err != nil {
445-
panic(log.Errf(ctx, err, "Get frame image data failed"))
446-
}
447-
_, _, _ = int(ii.Width), int(ii.Height), dataO.([]byte)
448-
}()
449-
450454
// Get the mesh
451455
interactionWG.Add(1)
452456
go func() {

cmd/gapit/report.go

+12
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,12 @@
1515
package main
1616

1717
import (
18+
"bytes"
1819
"context"
1920
"flag"
2021
"fmt"
2122
"io"
23+
"io/ioutil"
2224
"os"
2325

2426
"github.com/google/gapid/core/app"
@@ -51,6 +53,16 @@ func (verb *reportVerb) Run(ctx context.Context, flags flag.FlagSet) error {
5153
}
5254

5355
client, capturePath, err := getGapisAndLoadCapture(ctx, verb.Gapis, verb.Gapir, flags.Arg(0), verb.CaptureFileFlags)
56+
gapisTrace := &bytes.Buffer{}
57+
stopGapisTrace, err := client.Profile(ctx, nil, gapisTrace, 1)
58+
if err != nil {
59+
return err
60+
}
61+
defer func() {
62+
stopGapisTrace()
63+
ioutil.WriteFile("report.out", gapisTrace.Bytes(), 0644)
64+
}()
65+
5466
if err != nil {
5567
return err
5668
}

core/memory_tracker/cc/windows/memory_tracker.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
#include "core/memory_tracker/cc/memory_tracker.h"
2+
3+
#if COHERENT_TRACKING_ENABLED
24
#include <Windows.h>
35
#include <atomic>
46
#include <functional>
@@ -78,3 +80,4 @@ uint32_t GetPageSize() {
7880

7981
} // namespace track_memory
8082
} // namespace gapii
83+
#endif // COHERENT_MEMORY_TRACKING_ENABLED

gapir/cc/archive_replay_service.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,8 @@
2222

2323
namespace gapir {
2424

25-
std::unique_ptr<ReplayService::Payload> ArchiveReplayService::getPayload() {
25+
std::unique_ptr<ReplayService::Payload> ArchiveReplayService::getPayload(
26+
const std::string&) {
2627
std::fstream input(mFilePrefix, std::ios::in | std::ios::binary);
2728
std::unique_ptr<replay_service::Payload> payload(new replay_service::Payload);
2829
payload->ParseFromIstream(&input);

0 commit comments

Comments
 (0)