BufferLayer.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Copyright (C) 2017 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. //#define LOG_NDEBUG 0
  17. #undef LOG_TAG
  18. #define LOG_TAG "BufferLayer"
  19. #define ATRACE_TAG ATRACE_TAG_GRAPHICS
  20. #include "BufferLayer.h"
  21. #include <compositionengine/CompositionEngine.h>
  22. #include <compositionengine/Display.h>
  23. #include <compositionengine/Layer.h>
  24. #include <compositionengine/LayerCreationArgs.h>
  25. #include <compositionengine/OutputLayer.h>
  26. #include <compositionengine/impl/LayerCompositionState.h>
  27. #include <compositionengine/impl/OutputLayerCompositionState.h>
  28. #include <cutils/compiler.h>
  29. #include <cutils/native_handle.h>
  30. #include <cutils/properties.h>
  31. #include <gui/BufferItem.h>
  32. #include <gui/BufferQueue.h>
  33. #include <gui/LayerDebugInfo.h>
  34. #include <gui/Surface.h>
  35. #include <renderengine/RenderEngine.h>
  36. #include <ui/DebugUtils.h>
  37. #include <utils/Errors.h>
  38. #include <utils/Log.h>
  39. #include <utils/NativeHandle.h>
  40. #include <utils/StopWatch.h>
  41. #include <utils/Trace.h>
  42. #include <cmath>
  43. #include <cstdlib>
  44. #include <mutex>
  45. #include <sstream>
  46. #include "Colorizer.h"
  47. #include "DisplayDevice.h"
  48. #include "LayerRejecter.h"
  49. #include "TimeStats/TimeStats.h"
  50. namespace android {
  51. BufferLayer::BufferLayer(const LayerCreationArgs& args)
  52. : Layer(args),
  53. mTextureName(args.flinger->getNewTexture()),
  54. mCompositionLayer{mFlinger->getCompositionEngine().createLayer(
  55. compositionengine::LayerCreationArgs{this})} {
  56. ALOGV("Creating Layer %s", args.name.string());
  57. mPremultipliedAlpha = !(args.flags & ISurfaceComposerClient::eNonPremultiplied);
  58. mPotentialCursor = args.flags & ISurfaceComposerClient::eCursorWindow;
  59. mProtectedByApp = args.flags & ISurfaceComposerClient::eProtectedByApp;
  60. }
  61. BufferLayer::~BufferLayer() {
  62. mFlinger->deleteTextureAsync(mTextureName);
  63. mFlinger->mTimeStats->onDestroy(getSequence());
  64. }
  65. void BufferLayer::useSurfaceDamage() {
  66. if (mFlinger->mForceFullDamage) {
  67. surfaceDamageRegion = Region::INVALID_REGION;
  68. } else {
  69. surfaceDamageRegion = getDrawingSurfaceDamage();
  70. }
  71. }
  72. void BufferLayer::useEmptyDamage() {
  73. surfaceDamageRegion.clear();
  74. }
  75. bool BufferLayer::isOpaque(const Layer::State& s) const {
  76. // if we don't have a buffer or sidebandStream yet, we're translucent regardless of the
  77. // layer's opaque flag.
  78. if ((mSidebandStream == nullptr) && (mActiveBuffer == nullptr)) {
  79. return false;
  80. }
  81. // if the layer has the opaque flag, then we're always opaque,
  82. // otherwise we use the current buffer's format.
  83. return ((s.flags & layer_state_t::eLayerOpaque) != 0) || getOpacityForFormat(getPixelFormat());
  84. }
  85. bool BufferLayer::isVisible() const {
  86. bool visible = !(isHiddenByPolicy()) && getAlpha() > 0.0f &&
  87. (mActiveBuffer != nullptr || mSidebandStream != nullptr);
  88. mFlinger->mScheduler->setLayerVisibility(mSchedulerLayerHandle, visible);
  89. return visible;
  90. }
  91. bool BufferLayer::isFixedSize() const {
  92. return getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE;
  93. }
  94. bool BufferLayer::usesSourceCrop() const {
  95. return true;
  96. }
  97. static constexpr mat4 inverseOrientation(uint32_t transform) {
  98. const mat4 flipH(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
  99. const mat4 flipV(1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1);
  100. const mat4 rot90(0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
  101. mat4 tr;
  102. if (transform & NATIVE_WINDOW_TRANSFORM_ROT_90) {
  103. tr = tr * rot90;
  104. }
  105. if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_H) {
  106. tr = tr * flipH;
  107. }
  108. if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_V) {
  109. tr = tr * flipV;
  110. }
  111. return inverse(tr);
  112. }
  113. bool BufferLayer::prepareClientLayer(const RenderArea& renderArea, const Region& clip,
  114. bool useIdentityTransform, Region& clearRegion,
  115. const bool supportProtectedContent,
  116. renderengine::LayerSettings& layer) {
  117. ATRACE_CALL();
  118. Layer::prepareClientLayer(renderArea, clip, useIdentityTransform, clearRegion,
  119. supportProtectedContent, layer);
  120. if (CC_UNLIKELY(mActiveBuffer == 0)) {
  121. // the texture has not been created yet, this Layer has
  122. // in fact never been drawn into. This happens frequently with
  123. // SurfaceView because the WindowManager can't know when the client
  124. // has drawn the first time.
  125. // If there is nothing under us, we paint the screen in black, otherwise
  126. // we just skip this update.
  127. // figure out if there is something below us
  128. Region under;
  129. bool finished = false;
  130. mFlinger->mDrawingState.traverseInZOrder([&](Layer* layer) {
  131. if (finished || layer == static_cast<BufferLayer const*>(this)) {
  132. finished = true;
  133. return;
  134. }
  135. under.orSelf(layer->visibleRegion);
  136. });
  137. // if not everything below us is covered, we plug the holes!
  138. Region holes(clip.subtract(under));
  139. if (!holes.isEmpty()) {
  140. clearRegion.orSelf(holes);
  141. }
  142. return false;
  143. }
  144. bool blackOutLayer =
  145. (isProtected() && !supportProtectedContent) || (isSecure() && !renderArea.isSecure());
  146. const State& s(getDrawingState());
  147. if (!blackOutLayer) {
  148. layer.source.buffer.buffer = mActiveBuffer;
  149. layer.source.buffer.isOpaque = isOpaque(s);
  150. layer.source.buffer.fence = mActiveBufferFence;
  151. layer.source.buffer.textureName = mTextureName;
  152. layer.source.buffer.usePremultipliedAlpha = getPremultipledAlpha();
  153. layer.source.buffer.isY410BT2020 = isHdrY410();
  154. // TODO: we could be more subtle with isFixedSize()
  155. const bool useFiltering = needsFiltering(renderArea.getDisplayDevice()) ||
  156. renderArea.needsFiltering() || isFixedSize();
  157. // Query the texture matrix given our current filtering mode.
  158. float textureMatrix[16];
  159. setFilteringEnabled(useFiltering);
  160. getDrawingTransformMatrix(textureMatrix);
  161. if (getTransformToDisplayInverse()) {
  162. /*
  163. * the code below applies the primary display's inverse transform to
  164. * the texture transform
  165. */
  166. uint32_t transform = DisplayDevice::getPrimaryDisplayOrientationTransform();
  167. mat4 tr = inverseOrientation(transform);
  168. /**
  169. * TODO(b/36727915): This is basically a hack.
  170. *
  171. * Ensure that regardless of the parent transformation,
  172. * this buffer is always transformed from native display
  173. * orientation to display orientation. For example, in the case
  174. * of a camera where the buffer remains in native orientation,
  175. * we want the pixels to always be upright.
  176. */
  177. sp<Layer> p = mDrawingParent.promote();
  178. if (p != nullptr) {
  179. const auto parentTransform = p->getTransform();
  180. tr = tr * inverseOrientation(parentTransform.getOrientation());
  181. }
  182. // and finally apply it to the original texture matrix
  183. const mat4 texTransform(mat4(static_cast<const float*>(textureMatrix)) * tr);
  184. memcpy(textureMatrix, texTransform.asArray(), sizeof(textureMatrix));
  185. }
  186. const Rect win{getBounds()};
  187. float bufferWidth = getBufferSize(s).getWidth();
  188. float bufferHeight = getBufferSize(s).getHeight();
  189. // BufferStateLayers can have a "buffer size" of [0, 0, -1, -1] when no display frame has
  190. // been set and there is no parent layer bounds. In that case, the scale is meaningless so
  191. // ignore them.
  192. if (!getBufferSize(s).isValid()) {
  193. bufferWidth = float(win.right) - float(win.left);
  194. bufferHeight = float(win.bottom) - float(win.top);
  195. }
  196. const float scaleHeight = (float(win.bottom) - float(win.top)) / bufferHeight;
  197. const float scaleWidth = (float(win.right) - float(win.left)) / bufferWidth;
  198. const float translateY = float(win.top) / bufferHeight;
  199. const float translateX = float(win.left) / bufferWidth;
  200. // Flip y-coordinates because GLConsumer expects OpenGL convention.
  201. mat4 tr = mat4::translate(vec4(.5, .5, 0, 1)) * mat4::scale(vec4(1, -1, 1, 1)) *
  202. mat4::translate(vec4(-.5, -.5, 0, 1)) *
  203. mat4::translate(vec4(translateX, translateY, 0, 1)) *
  204. mat4::scale(vec4(scaleWidth, scaleHeight, 1.0, 1.0));
  205. layer.source.buffer.useTextureFiltering = useFiltering;
  206. layer.source.buffer.textureTransform = mat4(static_cast<const float*>(textureMatrix)) * tr;
  207. } else {
  208. // If layer is blacked out, force alpha to 1 so that we draw a black color
  209. // layer.
  210. layer.source.buffer.buffer = nullptr;
  211. layer.alpha = 1.0;
  212. }
  213. return true;
  214. }
  215. bool BufferLayer::isHdrY410() const {
  216. // pixel format is HDR Y410 masquerading as RGBA_1010102
  217. return (mCurrentDataSpace == ui::Dataspace::BT2020_ITU_PQ &&
  218. getDrawingApi() == NATIVE_WINDOW_API_MEDIA &&
  219. mActiveBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
  220. }
  221. void BufferLayer::setPerFrameData(const sp<const DisplayDevice>& displayDevice,
  222. const ui::Transform& transform, const Rect& viewport,
  223. int32_t supportedPerFrameMetadata,
  224. const ui::Dataspace targetDataspace) {
  225. RETURN_IF_NO_HWC_LAYER(displayDevice);
  226. // Apply this display's projection's viewport to the visible region
  227. // before giving it to the HWC HAL.
  228. Region visible = transform.transform(visibleRegion.intersect(viewport));
  229. const auto outputLayer = findOutputLayerForDisplay(displayDevice);
  230. LOG_FATAL_IF(!outputLayer || !outputLayer->getState().hwc);
  231. auto& hwcLayer = (*outputLayer->getState().hwc).hwcLayer;
  232. auto error = hwcLayer->setVisibleRegion(visible);
  233. if (error != HWC2::Error::None) {
  234. ALOGE("[%s] Failed to set visible region: %s (%d)", mName.string(),
  235. to_string(error).c_str(), static_cast<int32_t>(error));
  236. visible.dump(LOG_TAG);
  237. }
  238. outputLayer->editState().visibleRegion = visible;
  239. auto& layerCompositionState = getCompositionLayer()->editState().frontEnd;
  240. error = hwcLayer->setSurfaceDamage(surfaceDamageRegion);
  241. if (error != HWC2::Error::None) {
  242. ALOGE("[%s] Failed to set surface damage: %s (%d)", mName.string(),
  243. to_string(error).c_str(), static_cast<int32_t>(error));
  244. surfaceDamageRegion.dump(LOG_TAG);
  245. }
  246. layerCompositionState.surfaceDamage = surfaceDamageRegion;
  247. // Sideband layers
  248. if (layerCompositionState.sidebandStream.get()) {
  249. setCompositionType(displayDevice, Hwc2::IComposerClient::Composition::SIDEBAND);
  250. ALOGV("[%s] Requesting Sideband composition", mName.string());
  251. error = hwcLayer->setSidebandStream(layerCompositionState.sidebandStream->handle());
  252. if (error != HWC2::Error::None) {
  253. ALOGE("[%s] Failed to set sideband stream %p: %s (%d)", mName.string(),
  254. layerCompositionState.sidebandStream->handle(), to_string(error).c_str(),
  255. static_cast<int32_t>(error));
  256. }
  257. layerCompositionState.compositionType = Hwc2::IComposerClient::Composition::SIDEBAND;
  258. return;
  259. }
  260. // Device or Cursor layers
  261. if (mPotentialCursor) {
  262. ALOGV("[%s] Requesting Cursor composition", mName.string());
  263. setCompositionType(displayDevice, Hwc2::IComposerClient::Composition::CURSOR);
  264. } else {
  265. ALOGV("[%s] Requesting Device composition", mName.string());
  266. setCompositionType(displayDevice, Hwc2::IComposerClient::Composition::DEVICE);
  267. }
  268. ui::Dataspace dataspace = isColorSpaceAgnostic() && targetDataspace != ui::Dataspace::UNKNOWN
  269. ? targetDataspace
  270. : mCurrentDataSpace;
  271. error = hwcLayer->setDataspace(dataspace);
  272. if (error != HWC2::Error::None) {
  273. ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), dataspace,
  274. to_string(error).c_str(), static_cast<int32_t>(error));
  275. }
  276. const HdrMetadata& metadata = getDrawingHdrMetadata();
  277. error = hwcLayer->setPerFrameMetadata(supportedPerFrameMetadata, metadata);
  278. if (error != HWC2::Error::None && error != HWC2::Error::Unsupported) {
  279. ALOGE("[%s] Failed to set hdrMetadata: %s (%d)", mName.string(),
  280. to_string(error).c_str(), static_cast<int32_t>(error));
  281. }
  282. error = hwcLayer->setColorTransform(getColorTransform());
  283. if (error == HWC2::Error::Unsupported) {
  284. // If per layer color transform is not supported, we use GPU composition.
  285. setCompositionType(displayDevice, Hwc2::IComposerClient::Composition::CLIENT);
  286. } else if (error != HWC2::Error::None) {
  287. ALOGE("[%s] Failed to setColorTransform: %s (%d)", mName.string(),
  288. to_string(error).c_str(), static_cast<int32_t>(error));
  289. }
  290. layerCompositionState.dataspace = mCurrentDataSpace;
  291. layerCompositionState.colorTransform = getColorTransform();
  292. layerCompositionState.hdrMetadata = metadata;
  293. setHwcLayerBuffer(displayDevice);
  294. }
  295. bool BufferLayer::onPreComposition(nsecs_t refreshStartTime) {
  296. if (mBufferLatched) {
  297. Mutex::Autolock lock(mFrameEventHistoryMutex);
  298. mFrameEventHistory.addPreComposition(mCurrentFrameNumber, refreshStartTime);
  299. }
  300. mRefreshPending = false;
  301. return hasReadyFrame();
  302. }
  303. bool BufferLayer::onPostComposition(const std::optional<DisplayId>& displayId,
  304. const std::shared_ptr<FenceTime>& glDoneFence,
  305. const std::shared_ptr<FenceTime>& presentFence,
  306. const CompositorTiming& compositorTiming) {
  307. // mFrameLatencyNeeded is true when a new frame was latched for the
  308. // composition.
  309. if (!mFrameLatencyNeeded) return false;
  310. // Update mFrameEventHistory.
  311. {
  312. Mutex::Autolock lock(mFrameEventHistoryMutex);
  313. mFrameEventHistory.addPostComposition(mCurrentFrameNumber, glDoneFence, presentFence,
  314. compositorTiming);
  315. }
  316. // Update mFrameTracker.
  317. nsecs_t desiredPresentTime = getDesiredPresentTime();
  318. mFrameTracker.setDesiredPresentTime(desiredPresentTime);
  319. const int32_t layerID = getSequence();
  320. mFlinger->mTimeStats->setDesiredTime(layerID, mCurrentFrameNumber, desiredPresentTime);
  321. std::shared_ptr<FenceTime> frameReadyFence = getCurrentFenceTime();
  322. if (frameReadyFence->isValid()) {
  323. mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
  324. } else {
  325. // There was no fence for this frame, so assume that it was ready
  326. // to be presented at the desired present time.
  327. mFrameTracker.setFrameReadyTime(desiredPresentTime);
  328. }
  329. if (presentFence->isValid()) {
  330. mFlinger->mTimeStats->setPresentFence(layerID, mCurrentFrameNumber, presentFence);
  331. mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
  332. } else if (displayId && mFlinger->getHwComposer().isConnected(*displayId)) {
  333. // The HWC doesn't support present fences, so use the refresh
  334. // timestamp instead.
  335. const nsecs_t actualPresentTime = mFlinger->getHwComposer().getRefreshTimestamp(*displayId);
  336. mFlinger->mTimeStats->setPresentTime(layerID, mCurrentFrameNumber, actualPresentTime);
  337. mFrameTracker.setActualPresentTime(actualPresentTime);
  338. }
  339. mFrameTracker.advanceFrame();
  340. mFrameLatencyNeeded = false;
  341. return true;
  342. }
  343. bool BufferLayer::latchBuffer(bool& recomputeVisibleRegions, nsecs_t latchTime) {
  344. ATRACE_CALL();
  345. bool refreshRequired = latchSidebandStream(recomputeVisibleRegions);
  346. if (refreshRequired) {
  347. return refreshRequired;
  348. }
  349. if (!hasReadyFrame()) {
  350. return false;
  351. }
  352. // if we've already called updateTexImage() without going through
  353. // a composition step, we have to skip this layer at this point
  354. // because we cannot call updateTeximage() without a corresponding
  355. // compositionComplete() call.
  356. // we'll trigger an update in onPreComposition().
  357. if (mRefreshPending) {
  358. return false;
  359. }
  360. // If the head buffer's acquire fence hasn't signaled yet, return and
  361. // try again later
  362. if (!fenceHasSignaled()) {
  363. ATRACE_NAME("!fenceHasSignaled()");
  364. mFlinger->signalLayerUpdate();
  365. return false;
  366. }
  367. // Capture the old state of the layer for comparisons later
  368. const State& s(getDrawingState());
  369. const bool oldOpacity = isOpaque(s);
  370. sp<GraphicBuffer> oldBuffer = mActiveBuffer;
  371. if (!allTransactionsSignaled()) {
  372. mFlinger->setTransactionFlags(eTraversalNeeded);
  373. return false;
  374. }
  375. status_t err = updateTexImage(recomputeVisibleRegions, latchTime);
  376. if (err != NO_ERROR) {
  377. return false;
  378. }
  379. err = updateActiveBuffer();
  380. if (err != NO_ERROR) {
  381. return false;
  382. }
  383. mBufferLatched = true;
  384. err = updateFrameNumber(latchTime);
  385. if (err != NO_ERROR) {
  386. return false;
  387. }
  388. mRefreshPending = true;
  389. mFrameLatencyNeeded = true;
  390. if (oldBuffer == nullptr) {
  391. // the first time we receive a buffer, we need to trigger a
  392. // geometry invalidation.
  393. recomputeVisibleRegions = true;
  394. }
  395. ui::Dataspace dataSpace = getDrawingDataSpace();
  396. // translate legacy dataspaces to modern dataspaces
  397. switch (dataSpace) {
  398. case ui::Dataspace::SRGB:
  399. dataSpace = ui::Dataspace::V0_SRGB;
  400. break;
  401. case ui::Dataspace::SRGB_LINEAR:
  402. dataSpace = ui::Dataspace::V0_SRGB_LINEAR;
  403. break;
  404. case ui::Dataspace::JFIF:
  405. dataSpace = ui::Dataspace::V0_JFIF;
  406. break;
  407. case ui::Dataspace::BT601_625:
  408. dataSpace = ui::Dataspace::V0_BT601_625;
  409. break;
  410. case ui::Dataspace::BT601_525:
  411. dataSpace = ui::Dataspace::V0_BT601_525;
  412. break;
  413. case ui::Dataspace::BT709:
  414. dataSpace = ui::Dataspace::V0_BT709;
  415. break;
  416. default:
  417. break;
  418. }
  419. mCurrentDataSpace = dataSpace;
  420. Rect crop(getDrawingCrop());
  421. const uint32_t transform(getDrawingTransform());
  422. const uint32_t scalingMode(getDrawingScalingMode());
  423. const bool transformToDisplayInverse(getTransformToDisplayInverse());
  424. if ((crop != mCurrentCrop) || (transform != mCurrentTransform) ||
  425. (scalingMode != mCurrentScalingMode) ||
  426. (transformToDisplayInverse != mTransformToDisplayInverse)) {
  427. mCurrentCrop = crop;
  428. mCurrentTransform = transform;
  429. mCurrentScalingMode = scalingMode;
  430. mTransformToDisplayInverse = transformToDisplayInverse;
  431. recomputeVisibleRegions = true;
  432. }
  433. if (oldBuffer != nullptr) {
  434. uint32_t bufWidth = mActiveBuffer->getWidth();
  435. uint32_t bufHeight = mActiveBuffer->getHeight();
  436. if (bufWidth != uint32_t(oldBuffer->width) || bufHeight != uint32_t(oldBuffer->height)) {
  437. recomputeVisibleRegions = true;
  438. }
  439. }
  440. if (oldOpacity != isOpaque(s)) {
  441. recomputeVisibleRegions = true;
  442. }
  443. // Remove any sync points corresponding to the buffer which was just
  444. // latched
  445. {
  446. Mutex::Autolock lock(mLocalSyncPointMutex);
  447. auto point = mLocalSyncPoints.begin();
  448. while (point != mLocalSyncPoints.end()) {
  449. if (!(*point)->frameIsAvailable() || !(*point)->transactionIsApplied()) {
  450. // This sync point must have been added since we started
  451. // latching. Don't drop it yet.
  452. ++point;
  453. continue;
  454. }
  455. if ((*point)->getFrameNumber() <= mCurrentFrameNumber) {
  456. std::stringstream ss;
  457. ss << "Dropping sync point " << (*point)->getFrameNumber();
  458. ATRACE_NAME(ss.str().c_str());
  459. point = mLocalSyncPoints.erase(point);
  460. } else {
  461. ++point;
  462. }
  463. }
  464. }
  465. return true;
  466. }
  467. // transaction
  468. void BufferLayer::notifyAvailableFrames() {
  469. const auto headFrameNumber = getHeadFrameNumber();
  470. const bool headFenceSignaled = fenceHasSignaled();
  471. const bool presentTimeIsCurrent = framePresentTimeIsCurrent();
  472. Mutex::Autolock lock(mLocalSyncPointMutex);
  473. for (auto& point : mLocalSyncPoints) {
  474. if (headFrameNumber >= point->getFrameNumber() && headFenceSignaled &&
  475. presentTimeIsCurrent) {
  476. point->setFrameAvailable();
  477. sp<Layer> requestedSyncLayer = point->getRequestedSyncLayer();
  478. if (requestedSyncLayer) {
  479. // Need to update the transaction flag to ensure the layer's pending transaction
  480. // gets applied.
  481. requestedSyncLayer->setTransactionFlags(eTransactionNeeded);
  482. }
  483. }
  484. }
  485. }
  486. bool BufferLayer::hasReadyFrame() const {
  487. return hasFrameUpdate() || getSidebandStreamChanged() || getAutoRefresh();
  488. }
  489. uint32_t BufferLayer::getEffectiveScalingMode() const {
  490. if (mOverrideScalingMode >= 0) {
  491. return mOverrideScalingMode;
  492. }
  493. return mCurrentScalingMode;
  494. }
  495. bool BufferLayer::isProtected() const {
  496. const sp<GraphicBuffer>& buffer(mActiveBuffer);
  497. return (buffer != 0) && (buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
  498. }
  499. bool BufferLayer::latchUnsignaledBuffers() {
  500. static bool propertyLoaded = false;
  501. static bool latch = false;
  502. static std::mutex mutex;
  503. std::lock_guard<std::mutex> lock(mutex);
  504. if (!propertyLoaded) {
  505. char value[PROPERTY_VALUE_MAX] = {};
  506. property_get("debug.sf.latch_unsignaled", value, "0");
  507. latch = atoi(value);
  508. propertyLoaded = true;
  509. }
  510. return latch;
  511. }
  512. // h/w composer set-up
  513. bool BufferLayer::allTransactionsSignaled() {
  514. auto headFrameNumber = getHeadFrameNumber();
  515. bool matchingFramesFound = false;
  516. bool allTransactionsApplied = true;
  517. Mutex::Autolock lock(mLocalSyncPointMutex);
  518. for (auto& point : mLocalSyncPoints) {
  519. if (point->getFrameNumber() > headFrameNumber) {
  520. break;
  521. }
  522. matchingFramesFound = true;
  523. if (!point->frameIsAvailable()) {
  524. // We haven't notified the remote layer that the frame for
  525. // this point is available yet. Notify it now, and then
  526. // abort this attempt to latch.
  527. point->setFrameAvailable();
  528. allTransactionsApplied = false;
  529. break;
  530. }
  531. allTransactionsApplied = allTransactionsApplied && point->transactionIsApplied();
  532. }
  533. return !matchingFramesFound || allTransactionsApplied;
  534. }
  535. // As documented in libhardware header, formats in the range
  536. // 0x100 - 0x1FF are specific to the HAL implementation, and
  537. // are known to have no alpha channel
  538. // TODO: move definition for device-specific range into
  539. // hardware.h, instead of using hard-coded values here.
  540. #define HARDWARE_IS_DEVICE_FORMAT(f) ((f) >= 0x100 && (f) <= 0x1FF)
  541. bool BufferLayer::getOpacityForFormat(uint32_t format) {
  542. if (HARDWARE_IS_DEVICE_FORMAT(format)) {
  543. return true;
  544. }
  545. switch (format) {
  546. case HAL_PIXEL_FORMAT_RGBA_8888:
  547. case HAL_PIXEL_FORMAT_BGRA_8888:
  548. case HAL_PIXEL_FORMAT_RGBA_FP16:
  549. case HAL_PIXEL_FORMAT_RGBA_1010102:
  550. return false;
  551. }
  552. // in all other case, we have no blending (also for unknown formats)
  553. return true;
  554. }
  555. bool BufferLayer::needsFiltering(const sp<const DisplayDevice>& displayDevice) const {
  556. // If we are not capturing based on the state of a known display device, we
  557. // only return mNeedsFiltering
  558. if (displayDevice == nullptr) {
  559. return mNeedsFiltering;
  560. }
  561. const auto outputLayer = findOutputLayerForDisplay(displayDevice);
  562. if (outputLayer == nullptr) {
  563. return mNeedsFiltering;
  564. }
  565. const auto& compositionState = outputLayer->getState();
  566. const auto displayFrame = compositionState.displayFrame;
  567. const auto sourceCrop = compositionState.sourceCrop;
  568. return mNeedsFiltering || sourceCrop.getHeight() != displayFrame.getHeight() ||
  569. sourceCrop.getWidth() != displayFrame.getWidth();
  570. }
  571. uint64_t BufferLayer::getHeadFrameNumber() const {
  572. if (hasFrameUpdate()) {
  573. return getFrameNumber();
  574. } else {
  575. return mCurrentFrameNumber;
  576. }
  577. }
  578. Rect BufferLayer::getBufferSize(const State& s) const {
  579. // If we have a sideband stream, or we are scaling the buffer then return the layer size since
  580. // we cannot determine the buffer size.
  581. if ((s.sidebandStream != nullptr) ||
  582. (getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE)) {
  583. return Rect(getActiveWidth(s), getActiveHeight(s));
  584. }
  585. if (mActiveBuffer == nullptr) {
  586. return Rect::INVALID_RECT;
  587. }
  588. uint32_t bufWidth = mActiveBuffer->getWidth();
  589. uint32_t bufHeight = mActiveBuffer->getHeight();
  590. // Undo any transformations on the buffer and return the result.
  591. if (mCurrentTransform & ui::Transform::ROT_90) {
  592. std::swap(bufWidth, bufHeight);
  593. }
  594. if (getTransformToDisplayInverse()) {
  595. uint32_t invTransform = DisplayDevice::getPrimaryDisplayOrientationTransform();
  596. if (invTransform & ui::Transform::ROT_90) {
  597. std::swap(bufWidth, bufHeight);
  598. }
  599. }
  600. return Rect(bufWidth, bufHeight);
  601. }
  602. std::shared_ptr<compositionengine::Layer> BufferLayer::getCompositionLayer() const {
  603. return mCompositionLayer;
  604. }
  605. FloatRect BufferLayer::computeSourceBounds(const FloatRect& parentBounds) const {
  606. const State& s(getDrawingState());
  607. // If we have a sideband stream, or we are scaling the buffer then return the layer size since
  608. // we cannot determine the buffer size.
  609. if ((s.sidebandStream != nullptr) ||
  610. (getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE)) {
  611. return FloatRect(0, 0, getActiveWidth(s), getActiveHeight(s));
  612. }
  613. if (mActiveBuffer == nullptr) {
  614. return parentBounds;
  615. }
  616. uint32_t bufWidth = mActiveBuffer->getWidth();
  617. uint32_t bufHeight = mActiveBuffer->getHeight();
  618. // Undo any transformations on the buffer and return the result.
  619. if (mCurrentTransform & ui::Transform::ROT_90) {
  620. std::swap(bufWidth, bufHeight);
  621. }
  622. if (getTransformToDisplayInverse()) {
  623. uint32_t invTransform = DisplayDevice::getPrimaryDisplayOrientationTransform();
  624. if (invTransform & ui::Transform::ROT_90) {
  625. std::swap(bufWidth, bufHeight);
  626. }
  627. }
  628. return FloatRect(0, 0, bufWidth, bufHeight);
  629. }
  630. } // namespace android
  631. #if defined(__gl_h_)
  632. #error "don't include gl/gl.h in this file"
  633. #endif
  634. #if defined(__gl2_h_)
  635. #error "don't include gl2/gl2.h in this file"
  636. #endif